kernel: Mark two more functions __printflike.
[dragonfly.git] / sys / vm / swap_pager.c
blob2ecedebc9d413cfcaae388fadffbaac58bb21ae5
1 /*
2 * (MPSAFE)
4 * Copyright (c) 1998-2010 The DragonFly Project. All rights reserved.
5 *
6 * This code is derived from software contributed to The DragonFly Project
7 * by Matthew Dillon <dillon@backplane.com>
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
18 * distribution.
19 * 3. Neither the name of The DragonFly Project nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific, prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * Copyright (c) 1994 John S. Dyson
37 * Copyright (c) 1990 University of Utah.
38 * Copyright (c) 1991, 1993
39 * The Regents of the University of California. All rights reserved.
41 * This code is derived from software contributed to Berkeley by
42 * the Systems Programming Group of the University of Utah Computer
43 * Science Department.
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 * notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 * notice, this list of conditions and the following disclaimer in the
52 * documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 * must display the following acknowledgement:
55 * This product includes software developed by the University of
56 * California, Berkeley and its contributors.
57 * 4. Neither the name of the University nor the names of its contributors
58 * may be used to endorse or promote products derived from this software
59 * without specific prior written permission.
61 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
62 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
63 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
64 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
65 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
66 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
67 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
68 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
69 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
70 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * SUCH DAMAGE.
73 * New Swap System
74 * Matthew Dillon
76 * Radix Bitmap 'blists'.
78 * - The new swapper uses the new radix bitmap code. This should scale
79 * to arbitrarily small or arbitrarily large swap spaces and an almost
80 * arbitrary degree of fragmentation.
82 * Features:
84 * - on the fly reallocation of swap during putpages. The new system
85 * does not try to keep previously allocated swap blocks for dirty
86 * pages.
88 * - on the fly deallocation of swap
90 * - No more garbage collection required. Unnecessarily allocated swap
91 * blocks only exist for dirty vm_page_t's now and these are already
92 * cycled (in a high-load system) by the pager. We also do on-the-fly
93 * removal of invalidated swap blocks when a page is destroyed
94 * or renamed.
96 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
97 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
101 #include <sys/param.h>
102 #include <sys/systm.h>
103 #include <sys/conf.h>
104 #include <sys/kernel.h>
105 #include <sys/proc.h>
106 #include <sys/buf.h>
107 #include <sys/vnode.h>
108 #include <sys/malloc.h>
109 #include <sys/vmmeter.h>
110 #include <sys/sysctl.h>
111 #include <sys/blist.h>
112 #include <sys/lock.h>
113 #include <sys/thread2.h>
115 #ifndef MAX_PAGEOUT_CLUSTER
116 #define MAX_PAGEOUT_CLUSTER 16
117 #endif
119 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
121 #include "opt_swap.h"
122 #include <vm/vm.h>
123 #include <vm/vm_object.h>
124 #include <vm/vm_page.h>
125 #include <vm/vm_pager.h>
126 #include <vm/vm_pageout.h>
127 #include <vm/swap_pager.h>
128 #include <vm/vm_extern.h>
129 #include <vm/vm_zone.h>
130 #include <vm/vnode_pager.h>
132 #include <sys/buf2.h>
133 #include <vm/vm_page2.h>
135 #define SWM_FREE 0x02 /* free, period */
136 #define SWM_POP 0x04 /* pop out */
138 #define SWBIO_READ 0x01
139 #define SWBIO_WRITE 0x02
140 #define SWBIO_SYNC 0x04
142 struct swfreeinfo {
143 vm_object_t object;
144 vm_pindex_t basei;
145 vm_pindex_t begi;
146 vm_pindex_t endi; /* inclusive */
150 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
151 * in the old system.
154 int swap_pager_full; /* swap space exhaustion (task killing) */
155 int vm_swap_cache_use;
156 int vm_swap_anon_use;
158 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
159 static int nsw_rcount; /* free read buffers */
160 static int nsw_wcount_sync; /* limit write buffers / synchronous */
161 static int nsw_wcount_async; /* limit write buffers / asynchronous */
162 static int nsw_wcount_async_max;/* assigned maximum */
163 static int nsw_cluster_max; /* maximum VOP I/O allowed */
165 struct blist *swapblist;
166 static int swap_async_max = 4; /* maximum in-progress async I/O's */
167 static int swap_burst_read = 0; /* allow burst reading */
169 /* from vm_swap.c */
170 extern struct vnode *swapdev_vp;
171 extern struct swdevt *swdevt;
172 extern int nswdev;
174 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
176 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
177 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
178 SYSCTL_INT(_vm, OID_AUTO, swap_burst_read,
179 CTLFLAG_RW, &swap_burst_read, 0, "Allow burst reads for pageins");
181 SYSCTL_INT(_vm, OID_AUTO, swap_cache_use,
182 CTLFLAG_RD, &vm_swap_cache_use, 0, "");
183 SYSCTL_INT(_vm, OID_AUTO, swap_anon_use,
184 CTLFLAG_RD, &vm_swap_anon_use, 0, "");
185 SYSCTL_INT(_vm, OID_AUTO, swap_size,
186 CTLFLAG_RD, &vm_swap_size, 0, "");
188 vm_zone_t swap_zone;
191 * Red-Black tree for swblock entries
193 * The caller must hold vm_token
195 RB_GENERATE2(swblock_rb_tree, swblock, swb_entry, rb_swblock_compare,
196 vm_pindex_t, swb_index);
199 rb_swblock_compare(struct swblock *swb1, struct swblock *swb2)
201 if (swb1->swb_index < swb2->swb_index)
202 return(-1);
203 if (swb1->swb_index > swb2->swb_index)
204 return(1);
205 return(0);
208 static
210 rb_swblock_scancmp(struct swblock *swb, void *data)
212 struct swfreeinfo *info = data;
214 if (swb->swb_index < info->basei)
215 return(-1);
216 if (swb->swb_index > info->endi)
217 return(1);
218 return(0);
221 static
223 rb_swblock_condcmp(struct swblock *swb, void *data)
225 struct swfreeinfo *info = data;
227 if (swb->swb_index < info->basei)
228 return(-1);
229 return(0);
233 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
234 * calls hooked from other parts of the VM system and do not appear here.
235 * (see vm/swap_pager.h).
238 static void swap_pager_dealloc (vm_object_t object);
239 static int swap_pager_getpage (vm_object_t, vm_page_t *, int);
240 static void swap_chain_iodone(struct bio *biox);
242 struct pagerops swappagerops = {
243 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
244 swap_pager_getpage, /* pagein */
245 swap_pager_putpages, /* pageout */
246 swap_pager_haspage /* get backing store status for page */
250 * dmmax is in page-sized chunks with the new swap system. It was
251 * dev-bsized chunks in the old. dmmax is always a power of 2.
253 * swap_*() routines are externally accessible. swp_*() routines are
254 * internal.
257 int dmmax;
258 static int dmmax_mask;
259 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
260 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
262 static __inline void swp_sizecheck (void);
263 static void swp_pager_async_iodone (struct bio *bio);
266 * Swap bitmap functions
269 static __inline void swp_pager_freeswapspace(vm_object_t object,
270 swblk_t blk, int npages);
271 static __inline swblk_t swp_pager_getswapspace(vm_object_t object, int npages);
274 * Metadata functions
277 static void swp_pager_meta_convert(vm_object_t);
278 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, swblk_t);
279 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
280 static void swp_pager_meta_free_all(vm_object_t);
281 static swblk_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
284 * SWP_SIZECHECK() - update swap_pager_full indication
286 * update the swap_pager_almost_full indication and warn when we are
287 * about to run out of swap space, using lowat/hiwat hysteresis.
289 * Clear swap_pager_full ( task killing ) indication when lowat is met.
291 * No restrictions on call
292 * This routine may not block.
293 * SMP races are ok.
295 static __inline void
296 swp_sizecheck(void)
298 if (vm_swap_size < nswap_lowat) {
299 if (swap_pager_almost_full == 0) {
300 kprintf("swap_pager: out of swap space\n");
301 swap_pager_almost_full = 1;
303 } else {
304 swap_pager_full = 0;
305 if (vm_swap_size > nswap_hiwat)
306 swap_pager_almost_full = 0;
311 * SWAP_PAGER_INIT() - initialize the swap pager!
313 * Expected to be started from system init. NOTE: This code is run
314 * before much else so be careful what you depend on. Most of the VM
315 * system has yet to be initialized at this point.
317 * Called from the low level boot code only.
319 static void
320 swap_pager_init(void *arg __unused)
323 * Device Stripe, in PAGE_SIZE'd blocks
325 dmmax = SWB_NPAGES * 2;
326 dmmax_mask = ~(dmmax - 1);
328 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_THIRD, swap_pager_init, NULL)
331 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
333 * Expected to be started from pageout process once, prior to entering
334 * its main loop.
336 * Called from the low level boot code only.
338 void
339 swap_pager_swap_init(void)
341 int n, n2;
344 * Number of in-transit swap bp operations. Don't
345 * exhaust the pbufs completely. Make sure we
346 * initialize workable values (0 will work for hysteresis
347 * but it isn't very efficient).
349 * The nsw_cluster_max is constrained by the number of pages an XIO
350 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
351 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
352 * constrained by the swap device interleave stripe size.
354 * Currently we hardwire nsw_wcount_async to 4. This limit is
355 * designed to prevent other I/O from having high latencies due to
356 * our pageout I/O. The value 4 works well for one or two active swap
357 * devices but is probably a little low if you have more. Even so,
358 * a higher value would probably generate only a limited improvement
359 * with three or four active swap devices since the system does not
360 * typically have to pageout at extreme bandwidths. We will want
361 * at least 2 per swap devices, and 4 is a pretty good value if you
362 * have one NFS swap device due to the command/ack latency over NFS.
363 * So it all works out pretty well.
366 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
368 nsw_rcount = (nswbuf + 1) / 2;
369 nsw_wcount_sync = (nswbuf + 3) / 4;
370 nsw_wcount_async = 4;
371 nsw_wcount_async_max = nsw_wcount_async;
374 * The zone is dynamically allocated so generally size it to
375 * maxswzone (32MB to 512MB of KVM). Set a minimum size based
376 * on physical memory of around 8x (each swblock can hold 16 pages).
378 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
379 * has increased dramatically.
381 n = vmstats.v_page_count / 2;
382 if (maxswzone && n < maxswzone / sizeof(struct swblock))
383 n = maxswzone / sizeof(struct swblock);
384 n2 = n;
386 do {
387 swap_zone = zinit(
388 "SWAPMETA",
389 sizeof(struct swblock),
391 ZONE_INTERRUPT,
393 if (swap_zone != NULL)
394 break;
396 * if the allocation failed, try a zone two thirds the
397 * size of the previous attempt.
399 n -= ((n + 2) / 3);
400 } while (n > 0);
402 if (swap_zone == NULL)
403 panic("swap_pager_swap_init: swap_zone == NULL");
404 if (n2 != n)
405 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
409 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
410 * its metadata structures.
412 * This routine is called from the mmap and fork code to create a new
413 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
414 * and then converting it with swp_pager_meta_convert().
416 * We only support unnamed objects.
418 * No restrictions.
420 vm_object_t
421 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
423 vm_object_t object;
425 KKASSERT(handle == NULL);
426 object = vm_object_allocate_hold(OBJT_DEFAULT,
427 OFF_TO_IDX(offset + PAGE_MASK + size));
428 swp_pager_meta_convert(object);
429 vm_object_drop(object);
431 return (object);
435 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
437 * The swap backing for the object is destroyed. The code is
438 * designed such that we can reinstantiate it later, but this
439 * routine is typically called only when the entire object is
440 * about to be destroyed.
442 * The object must be locked or unreferenceable.
443 * No other requirements.
445 static void
446 swap_pager_dealloc(vm_object_t object)
448 vm_object_hold(object);
449 vm_object_pip_wait(object, "swpdea");
452 * Free all remaining metadata. We only bother to free it from
453 * the swap meta data. We do not attempt to free swapblk's still
454 * associated with vm_page_t's for this object. We do not care
455 * if paging is still in progress on some objects.
457 swp_pager_meta_free_all(object);
458 vm_object_drop(object);
461 /************************************************************************
462 * SWAP PAGER BITMAP ROUTINES *
463 ************************************************************************/
466 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
468 * Allocate swap for the requested number of pages. The starting
469 * swap block number (a page index) is returned or SWAPBLK_NONE
470 * if the allocation failed.
472 * Also has the side effect of advising that somebody made a mistake
473 * when they configured swap and didn't configure enough.
475 * The caller must hold the object.
476 * This routine may not block.
478 static __inline swblk_t
479 swp_pager_getswapspace(vm_object_t object, int npages)
481 swblk_t blk;
483 lwkt_gettoken(&vm_token);
484 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
485 if (swap_pager_full != 2) {
486 kprintf("swap_pager_getswapspace: failed alloc=%d\n",
487 npages);
488 swap_pager_full = 2;
489 swap_pager_almost_full = 1;
491 } else {
492 swapacctspace(blk, -npages);
493 if (object->type == OBJT_SWAP)
494 vm_swap_anon_use += npages;
495 else
496 vm_swap_cache_use += npages;
497 swp_sizecheck();
499 lwkt_reltoken(&vm_token);
500 return(blk);
504 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
506 * This routine returns the specified swap blocks back to the bitmap.
508 * Note: This routine may not block (it could in the old swap code),
509 * and through the use of the new blist routines it does not block.
511 * We must be called at splvm() to avoid races with bitmap frees from
512 * vm_page_remove() aka swap_pager_page_removed().
514 * This routine may not block.
517 static __inline void
518 swp_pager_freeswapspace(vm_object_t object, swblk_t blk, int npages)
520 struct swdevt *sp = &swdevt[BLK2DEVIDX(blk)];
522 lwkt_gettoken(&vm_token);
523 sp->sw_nused -= npages;
524 if (object->type == OBJT_SWAP)
525 vm_swap_anon_use -= npages;
526 else
527 vm_swap_cache_use -= npages;
529 if (sp->sw_flags & SW_CLOSING) {
530 lwkt_reltoken(&vm_token);
531 return;
534 blist_free(swapblist, blk, npages);
535 vm_swap_size += npages;
536 swp_sizecheck();
537 lwkt_reltoken(&vm_token);
541 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
542 * range within an object.
544 * This is a globally accessible routine.
546 * This routine removes swapblk assignments from swap metadata.
548 * The external callers of this routine typically have already destroyed
549 * or renamed vm_page_t's associated with this range in the object so
550 * we should be ok.
552 * No requirements.
554 void
555 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_pindex_t size)
557 vm_object_hold(object);
558 swp_pager_meta_free(object, start, size);
559 vm_object_drop(object);
563 * No requirements.
565 void
566 swap_pager_freespace_all(vm_object_t object)
568 vm_object_hold(object);
569 swp_pager_meta_free_all(object);
570 vm_object_drop(object);
574 * This function conditionally frees swap cache swap starting at
575 * (*basei) in the object. (count) swap blocks will be nominally freed.
576 * The actual number of blocks freed can be more or less than the
577 * requested number.
579 * This function nominally returns the number of blocks freed. However,
580 * the actual number of blocks freed may be less then the returned value.
581 * If the function is unable to exhaust the object or if it is able to
582 * free (approximately) the requested number of blocks it returns
583 * a value n > count.
585 * If we exhaust the object we will return a value n <= count.
587 * The caller must hold the object.
589 * WARNING! If count == 0 then -1 can be returned as a degenerate case,
590 * callers should always pass a count value > 0.
592 static int swap_pager_condfree_callback(struct swblock *swap, void *data);
595 swap_pager_condfree(vm_object_t object, vm_pindex_t *basei, int count)
597 struct swfreeinfo info;
598 int n;
599 int t;
601 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
603 info.object = object;
604 info.basei = *basei; /* skip up to this page index */
605 info.begi = count; /* max swap pages to destroy */
606 info.endi = count * 8; /* max swblocks to scan */
608 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_condcmp,
609 swap_pager_condfree_callback, &info);
610 *basei = info.basei;
613 * Take the higher difference swblocks vs pages
615 n = count - (int)info.begi;
616 t = count * 8 - (int)info.endi;
617 if (n < t)
618 n = t;
619 if (n < 1)
620 n = 1;
621 return(n);
625 * The idea is to free whole meta-block to avoid fragmenting
626 * the swap space or disk I/O. We only do this if NO VM pages
627 * are present.
629 * We do not have to deal with clearing PG_SWAPPED in related VM
630 * pages because there are no related VM pages.
632 * The caller must hold the object.
634 static int
635 swap_pager_condfree_callback(struct swblock *swap, void *data)
637 struct swfreeinfo *info = data;
638 vm_object_t object = info->object;
639 int i;
641 for (i = 0; i < SWAP_META_PAGES; ++i) {
642 if (vm_page_lookup(object, swap->swb_index + i))
643 break;
645 info->basei = swap->swb_index + SWAP_META_PAGES;
646 if (i == SWAP_META_PAGES) {
647 info->begi -= swap->swb_count;
648 swap_pager_freespace(object, swap->swb_index, SWAP_META_PAGES);
650 --info->endi;
651 if ((int)info->begi < 0 || (int)info->endi < 0)
652 return(-1);
653 lwkt_yield();
654 return(0);
658 * Called by vm_page_alloc() when a new VM page is inserted
659 * into a VM object. Checks whether swap has been assigned to
660 * the page and sets PG_SWAPPED as necessary.
662 * No requirements.
664 void
665 swap_pager_page_inserted(vm_page_t m)
667 if (m->object->swblock_count) {
668 vm_object_hold(m->object);
669 if (swp_pager_meta_ctl(m->object, m->pindex, 0) != SWAPBLK_NONE)
670 vm_page_flag_set(m, PG_SWAPPED);
671 vm_object_drop(m->object);
676 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
678 * Assigns swap blocks to the specified range within the object. The
679 * swap blocks are not zerod. Any previous swap assignment is destroyed.
681 * Returns 0 on success, -1 on failure.
683 * The caller is responsible for avoiding races in the specified range.
684 * No other requirements.
687 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
689 int n = 0;
690 swblk_t blk = SWAPBLK_NONE;
691 vm_pindex_t beg = start; /* save start index */
693 vm_object_hold(object);
695 while (size) {
696 if (n == 0) {
697 n = BLIST_MAX_ALLOC;
698 while ((blk = swp_pager_getswapspace(object, n)) ==
699 SWAPBLK_NONE)
701 n >>= 1;
702 if (n == 0) {
703 swp_pager_meta_free(object, beg,
704 start - beg);
705 vm_object_drop(object);
706 return(-1);
710 swp_pager_meta_build(object, start, blk);
711 --size;
712 ++start;
713 ++blk;
714 --n;
716 swp_pager_meta_free(object, start, n);
717 vm_object_drop(object);
718 return(0);
722 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
723 * and destroy the source.
725 * Copy any valid swapblks from the source to the destination. In
726 * cases where both the source and destination have a valid swapblk,
727 * we keep the destination's.
729 * This routine is allowed to block. It may block allocating metadata
730 * indirectly through swp_pager_meta_build() or if paging is still in
731 * progress on the source.
733 * XXX vm_page_collapse() kinda expects us not to block because we
734 * supposedly do not need to allocate memory, but for the moment we
735 * *may* have to get a little memory from the zone allocator, but
736 * it is taken from the interrupt memory. We should be ok.
738 * The source object contains no vm_page_t's (which is just as well)
739 * The source object is of type OBJT_SWAP.
741 * The source and destination objects must be held by the caller.
743 void
744 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
745 vm_pindex_t base_index, int destroysource)
747 vm_pindex_t i;
749 ASSERT_LWKT_TOKEN_HELD(vm_object_token(srcobject));
750 ASSERT_LWKT_TOKEN_HELD(vm_object_token(dstobject));
753 * transfer source to destination.
755 for (i = 0; i < dstobject->size; ++i) {
756 swblk_t dstaddr;
759 * Locate (without changing) the swapblk on the destination,
760 * unless it is invalid in which case free it silently, or
761 * if the destination is a resident page, in which case the
762 * source is thrown away.
764 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
766 if (dstaddr == SWAPBLK_NONE) {
768 * Destination has no swapblk and is not resident,
769 * copy source.
771 swblk_t srcaddr;
773 srcaddr = swp_pager_meta_ctl(srcobject,
774 base_index + i, SWM_POP);
776 if (srcaddr != SWAPBLK_NONE)
777 swp_pager_meta_build(dstobject, i, srcaddr);
778 } else {
780 * Destination has valid swapblk or it is represented
781 * by a resident page. We destroy the sourceblock.
783 swp_pager_meta_ctl(srcobject, base_index + i, SWM_FREE);
788 * Free left over swap blocks in source.
790 * We have to revert the type to OBJT_DEFAULT so we do not accidently
791 * double-remove the object from the swap queues.
793 if (destroysource) {
795 * Reverting the type is not necessary, the caller is going
796 * to destroy srcobject directly, but I'm doing it here
797 * for consistency since we've removed the object from its
798 * queues.
800 swp_pager_meta_free_all(srcobject);
801 if (srcobject->type == OBJT_SWAP)
802 srcobject->type = OBJT_DEFAULT;
807 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
808 * the requested page.
810 * We determine whether good backing store exists for the requested
811 * page and return TRUE if it does, FALSE if it doesn't.
813 * If TRUE, we also try to determine how much valid, contiguous backing
814 * store exists before and after the requested page within a reasonable
815 * distance. We do not try to restrict it to the swap device stripe
816 * (that is handled in getpages/putpages). It probably isn't worth
817 * doing here.
819 * No requirements.
821 boolean_t
822 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex)
824 swblk_t blk0;
827 * do we have good backing store at the requested index ?
829 vm_object_hold(object);
830 blk0 = swp_pager_meta_ctl(object, pindex, 0);
832 if (blk0 == SWAPBLK_NONE) {
833 vm_object_drop(object);
834 return (FALSE);
836 vm_object_drop(object);
837 return (TRUE);
841 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
843 * This removes any associated swap backing store, whether valid or
844 * not, from the page. This operates on any VM object, not just OBJT_SWAP
845 * objects.
847 * This routine is typically called when a page is made dirty, at
848 * which point any associated swap can be freed. MADV_FREE also
849 * calls us in a special-case situation
851 * NOTE!!! If the page is clean and the swap was valid, the caller
852 * should make the page dirty before calling this routine. This routine
853 * does NOT change the m->dirty status of the page. Also: MADV_FREE
854 * depends on it.
856 * The page must be busied or soft-busied.
857 * The caller can hold the object to avoid blocking, else we might block.
858 * No other requirements.
860 void
861 swap_pager_unswapped(vm_page_t m)
863 if (m->flags & PG_SWAPPED) {
864 vm_object_hold(m->object);
865 KKASSERT(m->flags & PG_SWAPPED);
866 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
867 vm_page_flag_clear(m, PG_SWAPPED);
868 vm_object_drop(m->object);
873 * SWAP_PAGER_STRATEGY() - read, write, free blocks
875 * This implements a VM OBJECT strategy function using swap backing store.
876 * This can operate on any VM OBJECT type, not necessarily just OBJT_SWAP
877 * types.
879 * This is intended to be a cacheless interface (i.e. caching occurs at
880 * higher levels), and is also used as a swap-based SSD cache for vnode
881 * and device objects.
883 * All I/O goes directly to and from the swap device.
885 * We currently attempt to run I/O synchronously or asynchronously as
886 * the caller requests. This isn't perfect because we loose error
887 * sequencing when we run multiple ops in parallel to satisfy a request.
888 * But this is swap, so we let it all hang out.
890 * No requirements.
892 void
893 swap_pager_strategy(vm_object_t object, struct bio *bio)
895 struct buf *bp = bio->bio_buf;
896 struct bio *nbio;
897 vm_pindex_t start;
898 vm_pindex_t biox_blkno = 0;
899 int count;
900 char *data;
901 struct bio *biox;
902 struct buf *bufx;
903 struct bio_track *track;
906 * tracking for swapdev vnode I/Os
908 if (bp->b_cmd == BUF_CMD_READ)
909 track = &swapdev_vp->v_track_read;
910 else
911 track = &swapdev_vp->v_track_write;
913 if (bp->b_bcount & PAGE_MASK) {
914 bp->b_error = EINVAL;
915 bp->b_flags |= B_ERROR | B_INVAL;
916 biodone(bio);
917 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
918 "not page bounded\n",
919 bp, (long long)bio->bio_offset, (int)bp->b_bcount);
920 return;
924 * Clear error indication, initialize page index, count, data pointer.
926 bp->b_error = 0;
927 bp->b_flags &= ~B_ERROR;
928 bp->b_resid = bp->b_bcount;
930 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
931 count = howmany(bp->b_bcount, PAGE_SIZE);
932 data = bp->b_data;
935 * Deal with BUF_CMD_FREEBLKS
937 if (bp->b_cmd == BUF_CMD_FREEBLKS) {
939 * FREE PAGE(s) - destroy underlying swap that is no longer
940 * needed.
942 vm_object_hold(object);
943 swp_pager_meta_free(object, start, count);
944 vm_object_drop(object);
945 bp->b_resid = 0;
946 biodone(bio);
947 return;
951 * We need to be able to create a new cluster of I/O's. We cannot
952 * use the caller fields of the passed bio so push a new one.
954 * Because nbio is just a placeholder for the cluster links,
955 * we can biodone() the original bio instead of nbio to make
956 * things a bit more efficient.
958 nbio = push_bio(bio);
959 nbio->bio_offset = bio->bio_offset;
960 nbio->bio_caller_info1.cluster_head = NULL;
961 nbio->bio_caller_info2.cluster_tail = NULL;
963 biox = NULL;
964 bufx = NULL;
967 * Execute read or write
969 vm_object_hold(object);
971 while (count > 0) {
972 swblk_t blk;
975 * Obtain block. If block not found and writing, allocate a
976 * new block and build it into the object.
978 blk = swp_pager_meta_ctl(object, start, 0);
979 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) {
980 blk = swp_pager_getswapspace(object, 1);
981 if (blk == SWAPBLK_NONE) {
982 bp->b_error = ENOMEM;
983 bp->b_flags |= B_ERROR;
984 break;
986 swp_pager_meta_build(object, start, blk);
990 * Do we have to flush our current collection? Yes if:
992 * - no swap block at this index
993 * - swap block is not contiguous
994 * - we cross a physical disk boundry in the
995 * stripe.
997 if (
998 biox && (biox_blkno + btoc(bufx->b_bcount) != blk ||
999 ((biox_blkno ^ blk) & dmmax_mask)
1002 if (bp->b_cmd == BUF_CMD_READ) {
1003 ++mycpu->gd_cnt.v_swapin;
1004 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1005 } else {
1006 ++mycpu->gd_cnt.v_swapout;
1007 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1008 bufx->b_dirtyend = bufx->b_bcount;
1012 * Finished with this buf.
1014 KKASSERT(bufx->b_bcount != 0);
1015 if (bufx->b_cmd != BUF_CMD_READ)
1016 bufx->b_dirtyend = bufx->b_bcount;
1017 biox = NULL;
1018 bufx = NULL;
1022 * Add new swapblk to biox, instantiating biox if necessary.
1023 * Zero-fill reads are able to take a shortcut.
1025 if (blk == SWAPBLK_NONE) {
1027 * We can only get here if we are reading. Since
1028 * we are at splvm() we can safely modify b_resid,
1029 * even if chain ops are in progress.
1031 bzero(data, PAGE_SIZE);
1032 bp->b_resid -= PAGE_SIZE;
1033 } else {
1034 if (biox == NULL) {
1035 /* XXX chain count > 4, wait to <= 4 */
1037 bufx = getpbuf(NULL);
1038 biox = &bufx->b_bio1;
1039 cluster_append(nbio, bufx);
1040 bufx->b_flags |= (bufx->b_flags & B_ORDERED);
1041 bufx->b_cmd = bp->b_cmd;
1042 biox->bio_done = swap_chain_iodone;
1043 biox->bio_offset = (off_t)blk << PAGE_SHIFT;
1044 biox->bio_caller_info1.cluster_parent = nbio;
1045 biox_blkno = blk;
1046 bufx->b_bcount = 0;
1047 bufx->b_data = data;
1049 bufx->b_bcount += PAGE_SIZE;
1051 --count;
1052 ++start;
1053 data += PAGE_SIZE;
1056 vm_object_drop(object);
1059 * Flush out last buffer
1061 if (biox) {
1062 if (bufx->b_cmd == BUF_CMD_READ) {
1063 ++mycpu->gd_cnt.v_swapin;
1064 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1065 } else {
1066 ++mycpu->gd_cnt.v_swapout;
1067 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1068 bufx->b_dirtyend = bufx->b_bcount;
1070 KKASSERT(bufx->b_bcount);
1071 if (bufx->b_cmd != BUF_CMD_READ)
1072 bufx->b_dirtyend = bufx->b_bcount;
1073 /* biox, bufx = NULL */
1077 * Now initiate all the I/O. Be careful looping on our chain as
1078 * I/O's may complete while we are still initiating them.
1080 * If the request is a 100% sparse read no bios will be present
1081 * and we just biodone() the buffer.
1083 nbio->bio_caller_info2.cluster_tail = NULL;
1084 bufx = nbio->bio_caller_info1.cluster_head;
1086 if (bufx) {
1087 while (bufx) {
1088 biox = &bufx->b_bio1;
1089 BUF_KERNPROC(bufx);
1090 bufx = bufx->b_cluster_next;
1091 vn_strategy(swapdev_vp, biox);
1093 } else {
1094 biodone(bio);
1098 * Completion of the cluster will also call biodone_chain(nbio).
1099 * We never call biodone(nbio) so we don't have to worry about
1100 * setting up a bio_done callback. It's handled in the sub-IO.
1102 /**/
1106 * biodone callback
1108 * No requirements.
1110 static void
1111 swap_chain_iodone(struct bio *biox)
1113 struct buf **nextp;
1114 struct buf *bufx; /* chained sub-buffer */
1115 struct bio *nbio; /* parent nbio with chain glue */
1116 struct buf *bp; /* original bp associated with nbio */
1117 int chain_empty;
1119 bufx = biox->bio_buf;
1120 nbio = biox->bio_caller_info1.cluster_parent;
1121 bp = nbio->bio_buf;
1124 * Update the original buffer
1126 KKASSERT(bp != NULL);
1127 if (bufx->b_flags & B_ERROR) {
1128 atomic_set_int(&bufx->b_flags, B_ERROR);
1129 bp->b_error = bufx->b_error; /* race ok */
1130 } else if (bufx->b_resid != 0) {
1131 atomic_set_int(&bufx->b_flags, B_ERROR);
1132 bp->b_error = EINVAL; /* race ok */
1133 } else {
1134 atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1138 * Remove us from the chain.
1140 spin_lock(&bp->b_lock.lk_spinlock);
1141 nextp = &nbio->bio_caller_info1.cluster_head;
1142 while (*nextp != bufx) {
1143 KKASSERT(*nextp != NULL);
1144 nextp = &(*nextp)->b_cluster_next;
1146 *nextp = bufx->b_cluster_next;
1147 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1148 spin_unlock(&bp->b_lock.lk_spinlock);
1151 * Clean up bufx. If the chain is now empty we finish out
1152 * the parent. Note that we may be racing other completions
1153 * so we must use the chain_empty status from above.
1155 if (chain_empty) {
1156 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1157 atomic_set_int(&bp->b_flags, B_ERROR);
1158 bp->b_error = EINVAL;
1160 biodone_chain(nbio);
1162 relpbuf(bufx, NULL);
1166 * SWAP_PAGER_GETPAGES() - bring page in from swap
1168 * The requested page may have to be brought in from swap. Calculate the
1169 * swap block and bring in additional pages if possible. All pages must
1170 * have contiguous swap block assignments and reside in the same object.
1172 * The caller has a single vm_object_pip_add() reference prior to
1173 * calling us and we should return with the same.
1175 * The caller has BUSY'd the page. We should return with (*mpp) left busy,
1176 * and any additinal pages unbusied.
1178 * If the caller encounters a PG_RAM page it will pass it to us even though
1179 * it may be valid and dirty. We cannot overwrite the page in this case!
1180 * The case is used to allow us to issue pure read-aheads.
1182 * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1183 * the PG_RAM page is validated at the same time as mreq. What we
1184 * really need to do is issue a separate read-ahead pbuf.
1186 * No requirements.
1188 static int
1189 swap_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess)
1191 struct buf *bp;
1192 struct bio *bio;
1193 vm_page_t mreq;
1194 vm_page_t m;
1195 vm_offset_t kva;
1196 swblk_t blk;
1197 int i;
1198 int j;
1199 int raonly;
1200 int error;
1201 u_int32_t flags;
1202 vm_page_t marray[XIO_INTERNAL_PAGES];
1204 mreq = *mpp;
1206 vm_object_hold(object);
1207 if (mreq->object != object) {
1208 panic("swap_pager_getpages: object mismatch %p/%p",
1209 object,
1210 mreq->object
1215 * We don't want to overwrite a fully valid page as it might be
1216 * dirty. This case can occur when e.g. vm_fault hits a perfectly
1217 * valid page with PG_RAM set.
1219 * In this case we see if the next page is a suitable page-in
1220 * candidate and if it is we issue read-ahead. PG_RAM will be
1221 * set on the last page of the read-ahead to continue the pipeline.
1223 if (mreq->valid == VM_PAGE_BITS_ALL) {
1224 if (swap_burst_read == 0 || mreq->pindex + 1 >= object->size) {
1225 vm_object_drop(object);
1226 return(VM_PAGER_OK);
1228 blk = swp_pager_meta_ctl(object, mreq->pindex + 1, 0);
1229 if (blk == SWAPBLK_NONE) {
1230 vm_object_drop(object);
1231 return(VM_PAGER_OK);
1233 m = vm_page_lookup_busy_try(object, mreq->pindex + 1,
1234 TRUE, &error);
1235 if (error) {
1236 vm_object_drop(object);
1237 return(VM_PAGER_OK);
1238 } else if (m == NULL) {
1240 * Use VM_ALLOC_QUICK to avoid blocking on cache
1241 * page reuse.
1243 m = vm_page_alloc(object, mreq->pindex + 1,
1244 VM_ALLOC_QUICK);
1245 if (m == NULL) {
1246 vm_object_drop(object);
1247 return(VM_PAGER_OK);
1249 } else {
1250 if (m->valid) {
1251 vm_page_wakeup(m);
1252 vm_object_drop(object);
1253 return(VM_PAGER_OK);
1255 vm_page_unqueue_nowakeup(m);
1257 /* page is busy */
1258 mreq = m;
1259 raonly = 1;
1260 } else {
1261 raonly = 0;
1265 * Try to block-read contiguous pages from swap if sequential,
1266 * otherwise just read one page. Contiguous pages from swap must
1267 * reside within a single device stripe because the I/O cannot be
1268 * broken up across multiple stripes.
1270 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1271 * set up such that the case(s) are handled implicitly.
1273 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1274 marray[0] = mreq;
1276 for (i = 1; swap_burst_read &&
1277 i < XIO_INTERNAL_PAGES &&
1278 mreq->pindex + i < object->size; ++i) {
1279 swblk_t iblk;
1281 iblk = swp_pager_meta_ctl(object, mreq->pindex + i, 0);
1282 if (iblk != blk + i)
1283 break;
1284 if ((blk ^ iblk) & dmmax_mask)
1285 break;
1286 m = vm_page_lookup_busy_try(object, mreq->pindex + i,
1287 TRUE, &error);
1288 if (error) {
1289 break;
1290 } else if (m == NULL) {
1292 * Use VM_ALLOC_QUICK to avoid blocking on cache
1293 * page reuse.
1295 m = vm_page_alloc(object, mreq->pindex + i,
1296 VM_ALLOC_QUICK);
1297 if (m == NULL)
1298 break;
1299 } else {
1300 if (m->valid) {
1301 vm_page_wakeup(m);
1302 break;
1304 vm_page_unqueue_nowakeup(m);
1306 /* page is busy */
1307 marray[i] = m;
1309 if (i > 1)
1310 vm_page_flag_set(marray[i - 1], PG_RAM);
1313 * If mreq is the requested page and we have nothing to do return
1314 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead
1315 * page and must be cleaned up.
1317 if (blk == SWAPBLK_NONE) {
1318 KKASSERT(i == 1);
1319 if (raonly) {
1320 vnode_pager_freepage(mreq);
1321 vm_object_drop(object);
1322 return(VM_PAGER_OK);
1323 } else {
1324 vm_object_drop(object);
1325 return(VM_PAGER_FAIL);
1330 * map our page(s) into kva for input
1332 bp = getpbuf_kva(&nsw_rcount);
1333 bio = &bp->b_bio1;
1334 kva = (vm_offset_t) bp->b_kvabase;
1335 bcopy(marray, bp->b_xio.xio_pages, i * sizeof(vm_page_t));
1336 pmap_qenter(kva, bp->b_xio.xio_pages, i);
1338 bp->b_data = (caddr_t)kva;
1339 bp->b_bcount = PAGE_SIZE * i;
1340 bp->b_xio.xio_npages = i;
1341 bio->bio_done = swp_pager_async_iodone;
1342 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1343 bio->bio_caller_info1.index = SWBIO_READ;
1346 * Set index. If raonly set the index beyond the array so all
1347 * the pages are treated the same, otherwise the original mreq is
1348 * at index 0.
1350 if (raonly)
1351 bio->bio_driver_info = (void *)(intptr_t)i;
1352 else
1353 bio->bio_driver_info = (void *)(intptr_t)0;
1355 for (j = 0; j < i; ++j)
1356 vm_page_flag_set(bp->b_xio.xio_pages[j], PG_SWAPINPROG);
1358 mycpu->gd_cnt.v_swapin++;
1359 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1362 * We still hold the lock on mreq, and our automatic completion routine
1363 * does not remove it.
1365 vm_object_pip_add(object, bp->b_xio.xio_npages);
1368 * perform the I/O. NOTE!!! bp cannot be considered valid after
1369 * this point because we automatically release it on completion.
1370 * Instead, we look at the one page we are interested in which we
1371 * still hold a lock on even through the I/O completion.
1373 * The other pages in our m[] array are also released on completion,
1374 * so we cannot assume they are valid anymore either.
1376 bp->b_cmd = BUF_CMD_READ;
1377 BUF_KERNPROC(bp);
1378 vn_strategy(swapdev_vp, bio);
1381 * Wait for the page we want to complete. PG_SWAPINPROG is always
1382 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1383 * is set in the meta-data.
1385 * If this is a read-ahead only we return immediately without
1386 * waiting for I/O.
1388 if (raonly) {
1389 vm_object_drop(object);
1390 return(VM_PAGER_OK);
1394 * Read-ahead includes originally requested page case.
1396 for (;;) {
1397 flags = mreq->flags;
1398 cpu_ccfence();
1399 if ((flags & PG_SWAPINPROG) == 0)
1400 break;
1401 tsleep_interlock(mreq, 0);
1402 if (!atomic_cmpset_int(&mreq->flags, flags,
1403 flags | PG_WANTED | PG_REFERENCED)) {
1404 continue;
1406 mycpu->gd_cnt.v_intrans++;
1407 if (tsleep(mreq, PINTERLOCKED, "swread", hz*20)) {
1408 kprintf(
1409 "swap_pager: indefinite wait buffer: "
1410 " offset: %lld, size: %ld\n",
1411 (long long)bio->bio_offset,
1412 (long)bp->b_bcount
1418 * mreq is left bussied after completion, but all the other pages
1419 * are freed. If we had an unrecoverable read error the page will
1420 * not be valid.
1422 vm_object_drop(object);
1423 if (mreq->valid != VM_PAGE_BITS_ALL)
1424 return(VM_PAGER_ERROR);
1425 else
1426 return(VM_PAGER_OK);
1429 * A final note: in a low swap situation, we cannot deallocate swap
1430 * and mark a page dirty here because the caller is likely to mark
1431 * the page clean when we return, causing the page to possibly revert
1432 * to all-zero's later.
1437 * swap_pager_putpages:
1439 * Assign swap (if necessary) and initiate I/O on the specified pages.
1441 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1442 * are automatically converted to SWAP objects.
1444 * In a low memory situation we may block in vn_strategy(), but the new
1445 * vm_page reservation system coupled with properly written VFS devices
1446 * should ensure that no low-memory deadlock occurs. This is an area
1447 * which needs work.
1449 * The parent has N vm_object_pip_add() references prior to
1450 * calling us and will remove references for rtvals[] that are
1451 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1452 * completion.
1454 * The parent has soft-busy'd the pages it passes us and will unbusy
1455 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1456 * We need to unbusy the rest on I/O completion.
1458 * No requirements.
1460 void
1461 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1462 boolean_t sync, int *rtvals)
1464 int i;
1465 int n = 0;
1467 vm_object_hold(object);
1469 if (count && m[0]->object != object) {
1470 panic("swap_pager_getpages: object mismatch %p/%p",
1471 object,
1472 m[0]->object
1477 * Step 1
1479 * Turn object into OBJT_SWAP
1480 * check for bogus sysops
1481 * force sync if not pageout process
1483 if (object->type == OBJT_DEFAULT) {
1484 if (object->type == OBJT_DEFAULT)
1485 swp_pager_meta_convert(object);
1488 if (curthread != pagethread)
1489 sync = TRUE;
1492 * Step 2
1494 * Update nsw parameters from swap_async_max sysctl values.
1495 * Do not let the sysop crash the machine with bogus numbers.
1497 if (swap_async_max != nsw_wcount_async_max) {
1498 int n;
1501 * limit range
1503 if ((n = swap_async_max) > nswbuf / 2)
1504 n = nswbuf / 2;
1505 if (n < 1)
1506 n = 1;
1507 swap_async_max = n;
1510 * Adjust difference ( if possible ). If the current async
1511 * count is too low, we may not be able to make the adjustment
1512 * at this time.
1514 * vm_token needed for nsw_wcount sleep interlock
1516 lwkt_gettoken(&vm_token);
1517 n -= nsw_wcount_async_max;
1518 if (nsw_wcount_async + n >= 0) {
1519 nsw_wcount_async_max += n;
1520 pbuf_adjcount(&nsw_wcount_async, n);
1522 lwkt_reltoken(&vm_token);
1526 * Step 3
1528 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1529 * The page is left dirty until the pageout operation completes
1530 * successfully.
1533 for (i = 0; i < count; i += n) {
1534 struct buf *bp;
1535 struct bio *bio;
1536 swblk_t blk;
1537 int j;
1540 * Maximum I/O size is limited by a number of factors.
1543 n = min(BLIST_MAX_ALLOC, count - i);
1544 n = min(n, nsw_cluster_max);
1546 lwkt_gettoken(&vm_token);
1549 * Get biggest block of swap we can. If we fail, fall
1550 * back and try to allocate a smaller block. Don't go
1551 * overboard trying to allocate space if it would overly
1552 * fragment swap.
1554 while (
1555 (blk = swp_pager_getswapspace(object, n)) == SWAPBLK_NONE &&
1556 n > 4
1558 n >>= 1;
1560 if (blk == SWAPBLK_NONE) {
1561 for (j = 0; j < n; ++j)
1562 rtvals[i+j] = VM_PAGER_FAIL;
1563 lwkt_reltoken(&vm_token);
1564 continue;
1568 * The I/O we are constructing cannot cross a physical
1569 * disk boundry in the swap stripe. Note: we are still
1570 * at splvm().
1572 if ((blk ^ (blk + n)) & dmmax_mask) {
1573 j = ((blk + dmmax) & dmmax_mask) - blk;
1574 swp_pager_freeswapspace(object, blk + j, n - j);
1575 n = j;
1579 * All I/O parameters have been satisfied, build the I/O
1580 * request and assign the swap space.
1582 if (sync == TRUE)
1583 bp = getpbuf_kva(&nsw_wcount_sync);
1584 else
1585 bp = getpbuf_kva(&nsw_wcount_async);
1586 bio = &bp->b_bio1;
1588 lwkt_reltoken(&vm_token);
1590 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1592 bp->b_bcount = PAGE_SIZE * n;
1593 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1595 for (j = 0; j < n; ++j) {
1596 vm_page_t mreq = m[i+j];
1598 swp_pager_meta_build(mreq->object, mreq->pindex,
1599 blk + j);
1600 if (object->type == OBJT_SWAP)
1601 vm_page_dirty(mreq);
1602 rtvals[i+j] = VM_PAGER_OK;
1604 vm_page_flag_set(mreq, PG_SWAPINPROG);
1605 bp->b_xio.xio_pages[j] = mreq;
1607 bp->b_xio.xio_npages = n;
1609 mycpu->gd_cnt.v_swapout++;
1610 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1612 bp->b_dirtyoff = 0; /* req'd for NFS */
1613 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */
1614 bp->b_cmd = BUF_CMD_WRITE;
1615 bio->bio_caller_info1.index = SWBIO_WRITE;
1618 * asynchronous
1620 if (sync == FALSE) {
1621 bio->bio_done = swp_pager_async_iodone;
1622 BUF_KERNPROC(bp);
1623 vn_strategy(swapdev_vp, bio);
1625 for (j = 0; j < n; ++j)
1626 rtvals[i+j] = VM_PAGER_PEND;
1627 continue;
1631 * Issue synchrnously.
1633 * Wait for the sync I/O to complete, then update rtvals.
1634 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1635 * our async completion routine at the end, thus avoiding a
1636 * double-free.
1638 bio->bio_caller_info1.index |= SWBIO_SYNC;
1639 bio->bio_done = biodone_sync;
1640 bio->bio_flags |= BIO_SYNC;
1641 vn_strategy(swapdev_vp, bio);
1642 biowait(bio, "swwrt");
1644 for (j = 0; j < n; ++j)
1645 rtvals[i+j] = VM_PAGER_PEND;
1648 * Now that we are through with the bp, we can call the
1649 * normal async completion, which frees everything up.
1651 swp_pager_async_iodone(bio);
1653 vm_object_drop(object);
1657 * No requirements.
1659 void
1660 swap_pager_newswap(void)
1662 swp_sizecheck();
1666 * swp_pager_async_iodone:
1668 * Completion routine for asynchronous reads and writes from/to swap.
1669 * Also called manually by synchronous code to finish up a bp.
1671 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1672 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1673 * unbusy all pages except the 'main' request page. For WRITE
1674 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1675 * because we marked them all VM_PAGER_PEND on return from putpages ).
1677 * This routine may not block.
1679 * No requirements.
1681 static void
1682 swp_pager_async_iodone(struct bio *bio)
1684 struct buf *bp = bio->bio_buf;
1685 vm_object_t object = NULL;
1686 int i;
1687 int *nswptr;
1690 * report error
1692 if (bp->b_flags & B_ERROR) {
1693 kprintf(
1694 "swap_pager: I/O error - %s failed; offset %lld,"
1695 "size %ld, error %d\n",
1696 ((bio->bio_caller_info1.index & SWBIO_READ) ?
1697 "pagein" : "pageout"),
1698 (long long)bio->bio_offset,
1699 (long)bp->b_bcount,
1700 bp->b_error
1705 * set object, raise to splvm().
1707 if (bp->b_xio.xio_npages)
1708 object = bp->b_xio.xio_pages[0]->object;
1711 * remove the mapping for kernel virtual
1713 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1716 * cleanup pages. If an error occurs writing to swap, we are in
1717 * very serious trouble. If it happens to be a disk error, though,
1718 * we may be able to recover by reassigning the swap later on. So
1719 * in this case we remove the m->swapblk assignment for the page
1720 * but do not free it in the rlist. The errornous block(s) are thus
1721 * never reallocated as swap. Redirty the page and continue.
1723 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1724 vm_page_t m = bp->b_xio.xio_pages[i];
1726 if (bp->b_flags & B_ERROR) {
1728 * If an error occurs I'd love to throw the swapblk
1729 * away without freeing it back to swapspace, so it
1730 * can never be used again. But I can't from an
1731 * interrupt.
1734 if (bio->bio_caller_info1.index & SWBIO_READ) {
1736 * When reading, reqpage needs to stay
1737 * locked for the parent, but all other
1738 * pages can be freed. We still want to
1739 * wakeup the parent waiting on the page,
1740 * though. ( also: pg_reqpage can be -1 and
1741 * not match anything ).
1743 * We have to wake specifically requested pages
1744 * up too because we cleared PG_SWAPINPROG and
1745 * someone may be waiting for that.
1747 * NOTE: for reads, m->dirty will probably
1748 * be overridden by the original caller of
1749 * getpages so don't play cute tricks here.
1751 * NOTE: We can't actually free the page from
1752 * here, because this is an interrupt. It
1753 * is not legal to mess with object->memq
1754 * from an interrupt. Deactivate the page
1755 * instead.
1758 m->valid = 0;
1759 vm_page_flag_clear(m, PG_ZERO);
1760 vm_page_flag_clear(m, PG_SWAPINPROG);
1763 * bio_driver_info holds the requested page
1764 * index.
1766 if (i != (int)(intptr_t)bio->bio_driver_info) {
1767 vm_page_deactivate(m);
1768 vm_page_wakeup(m);
1769 } else {
1770 vm_page_flash(m);
1773 * If i == bp->b_pager.pg_reqpage, do not wake
1774 * the page up. The caller needs to.
1776 } else {
1778 * If a write error occurs remove the swap
1779 * assignment (note that PG_SWAPPED may or
1780 * may not be set depending on prior activity).
1782 * Re-dirty OBJT_SWAP pages as there is no
1783 * other backing store, we can't throw the
1784 * page away.
1786 * Non-OBJT_SWAP pages (aka swapcache) must
1787 * not be dirtied since they may not have
1788 * been dirty in the first place, and they
1789 * do have backing store (the vnode).
1791 vm_page_busy_wait(m, FALSE, "swadpg");
1792 swp_pager_meta_ctl(m->object, m->pindex,
1793 SWM_FREE);
1794 vm_page_flag_clear(m, PG_SWAPPED);
1795 if (m->object->type == OBJT_SWAP) {
1796 vm_page_dirty(m);
1797 vm_page_activate(m);
1799 vm_page_flag_clear(m, PG_SWAPINPROG);
1800 vm_page_io_finish(m);
1801 vm_page_wakeup(m);
1803 } else if (bio->bio_caller_info1.index & SWBIO_READ) {
1805 * NOTE: for reads, m->dirty will probably be
1806 * overridden by the original caller of getpages so
1807 * we cannot set them in order to free the underlying
1808 * swap in a low-swap situation. I don't think we'd
1809 * want to do that anyway, but it was an optimization
1810 * that existed in the old swapper for a time before
1811 * it got ripped out due to precisely this problem.
1813 * clear PG_ZERO in page.
1815 * If not the requested page then deactivate it.
1817 * Note that the requested page, reqpage, is left
1818 * busied, but we still have to wake it up. The
1819 * other pages are released (unbusied) by
1820 * vm_page_wakeup(). We do not set reqpage's
1821 * valid bits here, it is up to the caller.
1825 * NOTE: can't call pmap_clear_modify(m) from an
1826 * interrupt thread, the pmap code may have to map
1827 * non-kernel pmaps and currently asserts the case.
1829 /*pmap_clear_modify(m);*/
1830 m->valid = VM_PAGE_BITS_ALL;
1831 vm_page_undirty(m);
1832 vm_page_flag_clear(m, PG_ZERO | PG_SWAPINPROG);
1833 vm_page_flag_set(m, PG_SWAPPED);
1836 * We have to wake specifically requested pages
1837 * up too because we cleared PG_SWAPINPROG and
1838 * could be waiting for it in getpages. However,
1839 * be sure to not unbusy getpages specifically
1840 * requested page - getpages expects it to be
1841 * left busy.
1843 * bio_driver_info holds the requested page
1845 if (i != (int)(intptr_t)bio->bio_driver_info) {
1846 vm_page_deactivate(m);
1847 vm_page_wakeup(m);
1848 } else {
1849 vm_page_flash(m);
1851 } else {
1853 * Mark the page clean but do not mess with the
1854 * pmap-layer's modified state. That state should
1855 * also be clear since the caller protected the
1856 * page VM_PROT_READ, but allow the case.
1858 * We are in an interrupt, avoid pmap operations.
1860 * If we have a severe page deficit, deactivate the
1861 * page. Do not try to cache it (which would also
1862 * involve a pmap op), because the page might still
1863 * be read-heavy.
1865 * When using the swap to cache clean vnode pages
1866 * we do not mess with the page dirty bits.
1868 vm_page_busy_wait(m, FALSE, "swadpg");
1869 if (m->object->type == OBJT_SWAP)
1870 vm_page_undirty(m);
1871 vm_page_flag_clear(m, PG_SWAPINPROG);
1872 vm_page_flag_set(m, PG_SWAPPED);
1873 if (vm_page_count_severe())
1874 vm_page_deactivate(m);
1875 #if 0
1876 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1877 vm_page_protect(m, VM_PROT_READ);
1878 #endif
1879 vm_page_io_finish(m);
1880 vm_page_wakeup(m);
1885 * adjust pip. NOTE: the original parent may still have its own
1886 * pip refs on the object.
1889 if (object)
1890 vm_object_pip_wakeup_n(object, bp->b_xio.xio_npages);
1893 * Release the physical I/O buffer.
1895 * NOTE: Due to synchronous operations in the write case b_cmd may
1896 * already be set to BUF_CMD_DONE and BIO_SYNC may have already
1897 * been cleared.
1899 * Use vm_token to interlock nsw_rcount/wcount wakeup?
1901 lwkt_gettoken(&vm_token);
1902 if (bio->bio_caller_info1.index & SWBIO_READ)
1903 nswptr = &nsw_rcount;
1904 else if (bio->bio_caller_info1.index & SWBIO_SYNC)
1905 nswptr = &nsw_wcount_sync;
1906 else
1907 nswptr = &nsw_wcount_async;
1908 bp->b_cmd = BUF_CMD_DONE;
1909 relpbuf(bp, nswptr);
1910 lwkt_reltoken(&vm_token);
1914 * Fault-in a potentially swapped page and remove the swap reference.
1916 * object must be held.
1918 static __inline void
1919 swp_pager_fault_page(vm_object_t object, vm_pindex_t pindex)
1921 struct vnode *vp;
1922 vm_page_t m;
1923 int error;
1925 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
1927 if (object->type == OBJT_VNODE) {
1929 * Any swap related to a vnode is due to swapcache. We must
1930 * vget() the vnode in case it is not active (otherwise
1931 * vref() will panic). Calling vm_object_page_remove() will
1932 * ensure that any swap ref is removed interlocked with the
1933 * page. clean_only is set to TRUE so we don't throw away
1934 * dirty pages.
1936 vp = object->handle;
1937 error = vget(vp, LK_SHARED | LK_RETRY | LK_CANRECURSE);
1938 if (error == 0) {
1939 vm_object_page_remove(object, pindex, pindex + 1, TRUE);
1940 vput(vp);
1942 } else {
1944 * Otherwise it is a normal OBJT_SWAP object and we can
1945 * fault the page in and remove the swap.
1947 m = vm_fault_object_page(object, IDX_TO_OFF(pindex),
1948 VM_PROT_NONE,
1949 VM_FAULT_DIRTY | VM_FAULT_UNSWAP,
1950 &error);
1951 if (m)
1952 vm_page_unhold(m);
1957 swap_pager_swapoff(int devidx)
1959 vm_object_t object;
1960 struct swblock *swap;
1961 swblk_t v;
1962 int i;
1964 lwkt_gettoken(&vmobj_token);
1965 rescan:
1966 TAILQ_FOREACH(object, &vm_object_list, object_list) {
1967 if (object->type != OBJT_SWAP && object->type != OBJT_VNODE)
1968 continue;
1969 vm_object_hold(object);
1970 if (object->type == OBJT_SWAP || object->type == OBJT_VNODE) {
1971 RB_FOREACH(swap,
1972 swblock_rb_tree, &object->swblock_root) {
1973 for (i = 0; i < SWAP_META_PAGES; ++i) {
1974 v = swap->swb_pages[i];
1975 if (v != SWAPBLK_NONE &&
1976 BLK2DEVIDX(v) == devidx) {
1977 swp_pager_fault_page(
1978 object,
1979 swap->swb_index + i);
1980 vm_object_drop(object);
1981 goto rescan;
1986 vm_object_drop(object);
1988 lwkt_reltoken(&vmobj_token);
1991 * If we fail to locate all swblocks we just fail gracefully and
1992 * do not bother to restore paging on the swap device. If the
1993 * user wants to retry the user can retry.
1995 if (swdevt[devidx].sw_nused)
1996 return (1);
1997 else
1998 return (0);
2001 /************************************************************************
2002 * SWAP META DATA *
2003 ************************************************************************
2005 * These routines manipulate the swap metadata stored in the
2006 * OBJT_SWAP object. All swp_*() routines must be called at
2007 * splvm() because swap can be freed up by the low level vm_page
2008 * code which might be called from interrupts beyond what splbio() covers.
2010 * Swap metadata is implemented with a global hash and not directly
2011 * linked into the object. Instead the object simply contains
2012 * appropriate tracking counters.
2016 * Lookup the swblock containing the specified swap block index.
2018 * The caller must hold the object.
2020 static __inline
2021 struct swblock *
2022 swp_pager_lookup(vm_object_t object, vm_pindex_t index)
2024 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2025 index &= ~(vm_pindex_t)SWAP_META_MASK;
2026 return (RB_LOOKUP(swblock_rb_tree, &object->swblock_root, index));
2030 * Remove a swblock from the RB tree.
2032 * The caller must hold the object.
2034 static __inline
2035 void
2036 swp_pager_remove(vm_object_t object, struct swblock *swap)
2038 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2039 RB_REMOVE(swblock_rb_tree, &object->swblock_root, swap);
2043 * Convert default object to swap object if necessary
2045 * The caller must hold the object.
2047 static void
2048 swp_pager_meta_convert(vm_object_t object)
2050 if (object->type == OBJT_DEFAULT) {
2051 object->type = OBJT_SWAP;
2052 KKASSERT(object->swblock_count == 0);
2057 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
2059 * We first convert the object to a swap object if it is a default
2060 * object. Vnode objects do not need to be converted.
2062 * The specified swapblk is added to the object's swap metadata. If
2063 * the swapblk is not valid, it is freed instead. Any previously
2064 * assigned swapblk is freed.
2066 * The caller must hold the object.
2068 static void
2069 swp_pager_meta_build(vm_object_t object, vm_pindex_t index, swblk_t swapblk)
2071 struct swblock *swap;
2072 struct swblock *oswap;
2073 vm_pindex_t v;
2075 KKASSERT(swapblk != SWAPBLK_NONE);
2076 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2079 * Convert object if necessary
2081 if (object->type == OBJT_DEFAULT)
2082 swp_pager_meta_convert(object);
2085 * Locate swblock. If not found create, but if we aren't adding
2086 * anything just return. If we run out of space in the map we wait
2087 * and, since the hash table may have changed, retry.
2089 retry:
2090 swap = swp_pager_lookup(object, index);
2092 if (swap == NULL) {
2093 int i;
2095 swap = zalloc(swap_zone);
2096 if (swap == NULL) {
2097 vm_wait(0);
2098 goto retry;
2100 swap->swb_index = index & ~(vm_pindex_t)SWAP_META_MASK;
2101 swap->swb_count = 0;
2103 ++object->swblock_count;
2105 for (i = 0; i < SWAP_META_PAGES; ++i)
2106 swap->swb_pages[i] = SWAPBLK_NONE;
2107 oswap = RB_INSERT(swblock_rb_tree, &object->swblock_root, swap);
2108 KKASSERT(oswap == NULL);
2112 * Delete prior contents of metadata.
2114 * NOTE: Decrement swb_count after the freeing operation (which
2115 * might block) to prevent racing destruction of the swblock.
2117 index &= SWAP_META_MASK;
2119 while ((v = swap->swb_pages[index]) != SWAPBLK_NONE) {
2120 swap->swb_pages[index] = SWAPBLK_NONE;
2121 /* can block */
2122 swp_pager_freeswapspace(object, v, 1);
2123 --swap->swb_count;
2127 * Enter block into metadata
2129 swap->swb_pages[index] = swapblk;
2130 if (swapblk != SWAPBLK_NONE)
2131 ++swap->swb_count;
2135 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2137 * The requested range of blocks is freed, with any associated swap
2138 * returned to the swap bitmap.
2140 * This routine will free swap metadata structures as they are cleaned
2141 * out. This routine does *NOT* operate on swap metadata associated
2142 * with resident pages.
2144 * The caller must hold the object.
2146 static int swp_pager_meta_free_callback(struct swblock *swb, void *data);
2148 static void
2149 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, vm_pindex_t count)
2151 struct swfreeinfo info;
2153 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2156 * Nothing to do
2158 if (object->swblock_count == 0) {
2159 KKASSERT(RB_EMPTY(&object->swblock_root));
2160 return;
2162 if (count == 0)
2163 return;
2166 * Setup for RB tree scan. Note that the pindex range can be huge
2167 * due to the 64 bit page index space so we cannot safely iterate.
2169 info.object = object;
2170 info.basei = index & ~(vm_pindex_t)SWAP_META_MASK;
2171 info.begi = index;
2172 info.endi = index + count - 1;
2173 swblock_rb_tree_RB_SCAN(&object->swblock_root, rb_swblock_scancmp,
2174 swp_pager_meta_free_callback, &info);
2178 * The caller must hold the object.
2180 static
2182 swp_pager_meta_free_callback(struct swblock *swap, void *data)
2184 struct swfreeinfo *info = data;
2185 vm_object_t object = info->object;
2186 int index;
2187 int eindex;
2190 * Figure out the range within the swblock. The wider scan may
2191 * return edge-case swap blocks when the start and/or end points
2192 * are in the middle of a block.
2194 if (swap->swb_index < info->begi)
2195 index = (int)info->begi & SWAP_META_MASK;
2196 else
2197 index = 0;
2199 if (swap->swb_index + SWAP_META_PAGES > info->endi)
2200 eindex = (int)info->endi & SWAP_META_MASK;
2201 else
2202 eindex = SWAP_META_MASK;
2205 * Scan and free the blocks. The loop terminates early
2206 * if (swap) runs out of blocks and could be freed.
2208 * NOTE: Decrement swb_count after swp_pager_freeswapspace()
2209 * to deal with a zfree race.
2211 while (index <= eindex) {
2212 swblk_t v = swap->swb_pages[index];
2214 if (v != SWAPBLK_NONE) {
2215 swap->swb_pages[index] = SWAPBLK_NONE;
2216 /* can block */
2217 swp_pager_freeswapspace(object, v, 1);
2218 if (--swap->swb_count == 0) {
2219 swp_pager_remove(object, swap);
2220 zfree(swap_zone, swap);
2221 --object->swblock_count;
2222 break;
2225 ++index;
2227 /* swap may be invalid here due to zfree above */
2228 return(0);
2232 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2234 * This routine locates and destroys all swap metadata associated with
2235 * an object.
2237 * NOTE: Decrement swb_count after the freeing operation (which
2238 * might block) to prevent racing destruction of the swblock.
2240 * The caller must hold the object.
2242 static void
2243 swp_pager_meta_free_all(vm_object_t object)
2245 struct swblock *swap;
2246 int i;
2248 ASSERT_LWKT_TOKEN_HELD(vm_object_token(object));
2250 while ((swap = RB_ROOT(&object->swblock_root)) != NULL) {
2251 swp_pager_remove(object, swap);
2252 for (i = 0; i < SWAP_META_PAGES; ++i) {
2253 swblk_t v = swap->swb_pages[i];
2254 if (v != SWAPBLK_NONE) {
2255 /* can block */
2256 swp_pager_freeswapspace(object, v, 1);
2257 --swap->swb_count;
2260 if (swap->swb_count != 0)
2261 panic("swap_pager_meta_free_all: swb_count != 0");
2262 zfree(swap_zone, swap);
2263 --object->swblock_count;
2265 KKASSERT(object->swblock_count == 0);
2269 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
2271 * This routine is capable of looking up, popping, or freeing
2272 * swapblk assignments in the swap meta data or in the vm_page_t.
2273 * The routine typically returns the swapblk being looked-up, or popped,
2274 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
2275 * was invalid. This routine will automatically free any invalid
2276 * meta-data swapblks.
2278 * It is not possible to store invalid swapblks in the swap meta data
2279 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2281 * When acting on a busy resident page and paging is in progress, we
2282 * have to wait until paging is complete but otherwise can act on the
2283 * busy page.
2285 * SWM_FREE remove and free swap block from metadata
2286 * SWM_POP remove from meta data but do not free.. pop it out
2288 * The caller must hold the object.
2290 static swblk_t
2291 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t index, int flags)
2293 struct swblock *swap;
2294 swblk_t r1;
2296 if (object->swblock_count == 0)
2297 return(SWAPBLK_NONE);
2299 r1 = SWAPBLK_NONE;
2300 swap = swp_pager_lookup(object, index);
2302 if (swap != NULL) {
2303 index &= SWAP_META_MASK;
2304 r1 = swap->swb_pages[index];
2306 if (r1 != SWAPBLK_NONE) {
2307 if (flags & (SWM_FREE|SWM_POP)) {
2308 swap->swb_pages[index] = SWAPBLK_NONE;
2309 if (--swap->swb_count == 0) {
2310 swp_pager_remove(object, swap);
2311 zfree(swap_zone, swap);
2312 --object->swblock_count;
2315 /* swap ptr may be invalid */
2316 if (flags & SWM_FREE) {
2317 swp_pager_freeswapspace(object, r1, 1);
2318 r1 = SWAPBLK_NONE;
2321 /* swap ptr may be invalid */
2323 return(r1);