2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1994 John S. Dyson
35 * Copyright (c) 1990 University of Utah.
36 * Copyright (c) 1991, 1993
37 * The Regents of the University of California. All rights reserved.
39 * This code is derived from software contributed to Berkeley by
40 * the Systems Programming Group of the University of Utah Computer
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74 * Radix Bitmap 'blists'.
76 * - The new swapper uses the new radix bitmap code. This should scale
77 * to arbitrarily small or arbitrarily large swap spaces and an almost
78 * arbitrary degree of fragmentation.
82 * - on the fly reallocation of swap during putpages. The new system
83 * does not try to keep previously allocated swap blocks for dirty
86 * - on the fly deallocation of swap
88 * - No more garbage collection required. Unnecessarily allocated swap
89 * blocks only exist for dirty vm_page_t's now and these are already
90 * cycled (in a high-load system) by the pager. We also do on-the-fly
91 * removal of invalidated swap blocks when a page is destroyed
94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.32 2008/07/01 02:02:56 dillon Exp $
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/conf.h>
105 #include <sys/kernel.h>
106 #include <sys/proc.h>
108 #include <sys/vnode.h>
109 #include <sys/malloc.h>
110 #include <sys/vmmeter.h>
111 #include <sys/sysctl.h>
112 #include <sys/blist.h>
113 #include <sys/lock.h>
114 #include <sys/thread2.h>
116 #ifndef MAX_PAGEOUT_CLUSTER
117 #define MAX_PAGEOUT_CLUSTER 16
120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
122 #include "opt_swap.h"
124 #include <vm/vm_object.h>
125 #include <vm/vm_page.h>
126 #include <vm/vm_pager.h>
127 #include <vm/vm_pageout.h>
128 #include <vm/swap_pager.h>
129 #include <vm/vm_extern.h>
130 #include <vm/vm_zone.h>
131 #include <vm/vnode_pager.h>
133 #include <sys/buf2.h>
134 #include <vm/vm_page2.h>
136 #define SWM_FREE 0x02 /* free, period */
137 #define SWM_POP 0x04 /* pop out */
139 #define SWBIO_READ 0x01
140 #define SWBIO_WRITE 0x02
141 #define SWBIO_SYNC 0x04
144 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
148 extern int vm_swap_size
; /* number of free swap blocks, in pages */
150 int swap_pager_full
; /* swap space exhaustion (task killing) */
151 static int swap_pager_almost_full
; /* swap space exhaustion (w/ hysteresis)*/
152 static int nsw_rcount
; /* free read buffers */
153 static int nsw_wcount_sync
; /* limit write buffers / synchronous */
154 static int nsw_wcount_async
; /* limit write buffers / asynchronous */
155 static int nsw_wcount_async_max
;/* assigned maximum */
156 static int nsw_cluster_max
; /* maximum VOP I/O allowed */
157 static int sw_alloc_interlock
; /* swap pager allocation interlock */
159 struct blist
*swapblist
;
160 static int swap_async_max
= 4; /* maximum in-progress async I/O's */
161 static int swap_burst_read
= 0; /* allow burst reading */
163 extern struct vnode
*swapdev_vp
; /* from vm_swap.c */
165 SYSCTL_INT(_vm
, OID_AUTO
, swap_async_max
,
166 CTLFLAG_RW
, &swap_async_max
, 0, "Maximum running async swap ops");
167 SYSCTL_INT(_vm
, OID_AUTO
, swap_burst_read
,
168 CTLFLAG_RW
, &swap_burst_read
, 0, "Allow burst reads for pageins");
171 * "named" and "unnamed" anon region objects. Try to reduce the overhead
172 * of searching a named list by hashing it just a little.
177 #define NOBJLIST(handle) \
178 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
180 static struct pagerlst swap_pager_object_list
[NOBJLISTS
];
181 struct pagerlst swap_pager_un_object_list
;
185 * Red-Black tree for swblock entries
187 RB_GENERATE2(swblock_rb_tree
, swblock
, swb_entry
, rb_swblock_compare
,
188 vm_pindex_t
, swb_index
);
191 rb_swblock_compare(struct swblock
*swb1
, struct swblock
*swb2
)
193 if (swb1
->swb_index
< swb2
->swb_index
)
195 if (swb1
->swb_index
> swb2
->swb_index
)
201 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
202 * calls hooked from other parts of the VM system and do not appear here.
203 * (see vm/swap_pager.h).
207 swap_pager_alloc (void *handle
, off_t size
,
208 vm_prot_t prot
, off_t offset
);
209 static void swap_pager_dealloc (vm_object_t object
);
210 static int swap_pager_getpage (vm_object_t
, vm_page_t
*, int);
211 static void swap_pager_init (void);
212 static void swap_pager_unswapped (vm_page_t
);
213 static void swap_pager_strategy (vm_object_t
, struct bio
*);
214 static void swap_chain_iodone(struct bio
*biox
);
216 struct pagerops swappagerops
= {
217 swap_pager_init
, /* early system initialization of pager */
218 swap_pager_alloc
, /* allocate an OBJT_SWAP object */
219 swap_pager_dealloc
, /* deallocate an OBJT_SWAP object */
220 swap_pager_getpage
, /* pagein */
221 swap_pager_putpages
, /* pageout */
222 swap_pager_haspage
, /* get backing store status for page */
223 swap_pager_unswapped
, /* remove swap related to page */
224 swap_pager_strategy
/* pager strategy call */
228 * dmmax is in page-sized chunks with the new swap system. It was
229 * dev-bsized chunks in the old. dmmax is always a power of 2.
231 * swap_*() routines are externally accessible. swp_*() routines are
236 static int dmmax_mask
;
237 int nswap_lowat
= 128; /* in pages, swap_pager_almost_full warn */
238 int nswap_hiwat
= 512; /* in pages, swap_pager_almost_full warn */
240 static __inline
void swp_sizecheck (void);
241 static void swp_pager_async_iodone (struct bio
*bio
);
244 * Swap bitmap functions
247 static __inline
void swp_pager_freeswapspace (daddr_t blk
, int npages
);
248 static __inline daddr_t
swp_pager_getswapspace (int npages
);
254 static void swp_pager_meta_convert (vm_object_t
);
255 static void swp_pager_meta_build (vm_object_t
, vm_pindex_t
, daddr_t
);
256 static void swp_pager_meta_free (vm_object_t
, vm_pindex_t
, daddr_t
);
257 static void swp_pager_meta_free_all (vm_object_t
);
258 static daddr_t
swp_pager_meta_ctl (vm_object_t
, vm_pindex_t
, int);
261 * SWP_SIZECHECK() - update swap_pager_full indication
263 * update the swap_pager_almost_full indication and warn when we are
264 * about to run out of swap space, using lowat/hiwat hysteresis.
266 * Clear swap_pager_full ( task killing ) indication when lowat is met.
268 * No restrictions on call
269 * This routine may not block.
270 * This routine must be called at splvm()
276 if (vm_swap_size
< nswap_lowat
) {
277 if (swap_pager_almost_full
== 0) {
278 kprintf("swap_pager: out of swap space\n");
279 swap_pager_almost_full
= 1;
283 if (vm_swap_size
> nswap_hiwat
)
284 swap_pager_almost_full
= 0;
289 * SWAP_PAGER_INIT() - initialize the swap pager!
291 * Expected to be started from system init. NOTE: This code is run
292 * before much else so be careful what you depend on. Most of the VM
293 * system has yet to be initialized at this point.
297 swap_pager_init(void)
300 * Initialize object lists
304 for (i
= 0; i
< NOBJLISTS
; ++i
)
305 TAILQ_INIT(&swap_pager_object_list
[i
]);
306 TAILQ_INIT(&swap_pager_un_object_list
);
309 * Device Stripe, in PAGE_SIZE'd blocks
312 dmmax
= SWB_NPAGES
* 2;
313 dmmax_mask
= ~(dmmax
- 1);
317 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
319 * Expected to be started from pageout process once, prior to entering
324 swap_pager_swap_init(void)
329 * Number of in-transit swap bp operations. Don't
330 * exhaust the pbufs completely. Make sure we
331 * initialize workable values (0 will work for hysteresis
332 * but it isn't very efficient).
334 * The nsw_cluster_max is constrained by the number of pages an XIO
335 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
336 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
337 * constrained by the swap device interleave stripe size.
339 * Currently we hardwire nsw_wcount_async to 4. This limit is
340 * designed to prevent other I/O from having high latencies due to
341 * our pageout I/O. The value 4 works well for one or two active swap
342 * devices but is probably a little low if you have more. Even so,
343 * a higher value would probably generate only a limited improvement
344 * with three or four active swap devices since the system does not
345 * typically have to pageout at extreme bandwidths. We will want
346 * at least 2 per swap devices, and 4 is a pretty good value if you
347 * have one NFS swap device due to the command/ack latency over NFS.
348 * So it all works out pretty well.
351 nsw_cluster_max
= min((MAXPHYS
/PAGE_SIZE
), MAX_PAGEOUT_CLUSTER
);
353 nsw_rcount
= (nswbuf
+ 1) / 2;
354 nsw_wcount_sync
= (nswbuf
+ 3) / 4;
355 nsw_wcount_async
= 4;
356 nsw_wcount_async_max
= nsw_wcount_async
;
359 * The zone is dynamically allocated so generally size it to
360 * maxswzone (32MB to 512MB of KVM). Set a minimum size based
361 * on physical memory of around 8x (each swblock can hold 16 pages).
363 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
364 * has increased dramatically.
366 n
= vmstats
.v_page_count
/ 2;
367 if (maxswzone
&& n
< maxswzone
/ sizeof(struct swblock
))
368 n
= maxswzone
/ sizeof(struct swblock
);
374 sizeof(struct swblock
),
378 if (swap_zone
!= NULL
)
381 * if the allocation failed, try a zone two thirds the
382 * size of the previous attempt.
387 if (swap_zone
== NULL
)
388 panic("swap_pager_swap_init: swap_zone == NULL");
390 kprintf("Swap zone entries reduced from %d to %d.\n", n2
, n
);
394 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
395 * its metadata structures.
397 * This routine is called from the mmap and fork code to create a new
398 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
399 * and then converting it with swp_pager_meta_convert().
401 * This routine may block in vm_object_allocate() and create a named
402 * object lookup race, so we must interlock. We must also run at
403 * splvm() for the object lookup to handle races with interrupts, but
404 * we do not have to maintain splvm() in between the lookup and the
405 * add because (I believe) it is not possible to attempt to create
406 * a new swap object w/handle when a default object with that handle
411 swap_pager_alloc(void *handle
, off_t size
, vm_prot_t prot
, off_t offset
)
417 * Reference existing named region or allocate new one. There
418 * should not be a race here against swp_pager_meta_build()
419 * as called from vm_page_remove() in regards to the lookup
422 while (sw_alloc_interlock
) {
423 sw_alloc_interlock
= -1;
424 tsleep(&sw_alloc_interlock
, 0, "swpalc", 0);
426 sw_alloc_interlock
= 1;
428 object
= vm_pager_object_lookup(NOBJLIST(handle
), handle
);
430 if (object
!= NULL
) {
431 vm_object_reference(object
);
433 object
= vm_object_allocate(OBJT_DEFAULT
,
434 OFF_TO_IDX(offset
+ PAGE_MASK
+ size
));
435 object
->handle
= handle
;
436 swp_pager_meta_convert(object
);
439 if (sw_alloc_interlock
< 0)
440 wakeup(&sw_alloc_interlock
);
441 sw_alloc_interlock
= 0;
443 object
= vm_object_allocate(OBJT_DEFAULT
,
444 OFF_TO_IDX(offset
+ PAGE_MASK
+ size
));
445 swp_pager_meta_convert(object
);
452 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
454 * The swap backing for the object is destroyed. The code is
455 * designed such that we can reinstantiate it later, but this
456 * routine is typically called only when the entire object is
457 * about to be destroyed.
459 * This routine may block, but no longer does.
461 * The object must be locked or unreferenceable.
465 swap_pager_dealloc(vm_object_t object
)
468 * Remove from list right away so lookups will fail if we block for
469 * pageout completion.
472 if (object
->handle
== NULL
) {
473 TAILQ_REMOVE(&swap_pager_un_object_list
, object
, pager_object_list
);
475 TAILQ_REMOVE(NOBJLIST(object
->handle
), object
, pager_object_list
);
478 vm_object_pip_wait(object
, "swpdea");
481 * Free all remaining metadata. We only bother to free it from
482 * the swap meta data. We do not attempt to free swapblk's still
483 * associated with vm_page_t's for this object. We do not care
484 * if paging is still in progress on some objects.
487 swp_pager_meta_free_all(object
);
491 /************************************************************************
492 * SWAP PAGER BITMAP ROUTINES *
493 ************************************************************************/
496 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
498 * Allocate swap for the requested number of pages. The starting
499 * swap block number (a page index) is returned or SWAPBLK_NONE
500 * if the allocation failed.
502 * Also has the side effect of advising that somebody made a mistake
503 * when they configured swap and didn't configure enough.
505 * Must be called at splvm() to avoid races with bitmap frees from
506 * vm_page_remove() aka swap_pager_page_removed().
508 * This routine may not block
509 * This routine must be called at splvm().
512 static __inline daddr_t
513 swp_pager_getswapspace(int npages
)
517 if ((blk
= blist_alloc(swapblist
, npages
)) == SWAPBLK_NONE
) {
518 if (swap_pager_full
!= 2) {
519 kprintf("swap_pager_getswapspace: failed\n");
521 swap_pager_almost_full
= 1;
524 vm_swap_size
-= npages
;
531 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
533 * This routine returns the specified swap blocks back to the bitmap.
535 * Note: This routine may not block (it could in the old swap code),
536 * and through the use of the new blist routines it does not block.
538 * We must be called at splvm() to avoid races with bitmap frees from
539 * vm_page_remove() aka swap_pager_page_removed().
541 * This routine may not block
542 * This routine must be called at splvm().
546 swp_pager_freeswapspace(daddr_t blk
, int npages
)
548 blist_free(swapblist
, blk
, npages
);
549 vm_swap_size
+= npages
;
554 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
555 * range within an object.
557 * This is a globally accessible routine.
559 * This routine removes swapblk assignments from swap metadata.
561 * The external callers of this routine typically have already destroyed
562 * or renamed vm_page_t's associated with this range in the object so
565 * This routine may be called at any spl. We up our spl to splvm
566 * temporarily in order to perform the metadata removal.
569 swap_pager_freespace(vm_object_t object
, vm_pindex_t start
, vm_size_t size
)
572 swp_pager_meta_free(object
, start
, size
);
577 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
579 * Assigns swap blocks to the specified range within the object. The
580 * swap blocks are not zerod. Any previous swap assignment is destroyed.
582 * Returns 0 on success, -1 on failure.
585 swap_pager_reserve(vm_object_t object
, vm_pindex_t start
, vm_size_t size
)
588 daddr_t blk
= SWAPBLK_NONE
;
589 vm_pindex_t beg
= start
; /* save start index */
595 while ((blk
= swp_pager_getswapspace(n
)) == SWAPBLK_NONE
) {
598 swp_pager_meta_free(object
, beg
, start
- beg
);
604 swp_pager_meta_build(object
, start
, blk
);
610 swp_pager_meta_free(object
, start
, n
);
616 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
617 * and destroy the source.
619 * Copy any valid swapblks from the source to the destination. In
620 * cases where both the source and destination have a valid swapblk,
621 * we keep the destination's.
623 * This routine is allowed to block. It may block allocating metadata
624 * indirectly through swp_pager_meta_build() or if paging is still in
625 * progress on the source.
627 * This routine can be called at any spl
629 * XXX vm_page_collapse() kinda expects us not to block because we
630 * supposedly do not need to allocate memory, but for the moment we
631 * *may* have to get a little memory from the zone allocator, but
632 * it is taken from the interrupt memory. We should be ok.
634 * The source object contains no vm_page_t's (which is just as well)
636 * The source object is of type OBJT_SWAP.
638 * The source and destination objects must be locked or
639 * inaccessible (XXX are they ?)
643 swap_pager_copy(vm_object_t srcobject
, vm_object_t dstobject
,
644 vm_pindex_t offset
, int destroysource
)
651 * If destroysource is set, we remove the source object from the
652 * swap_pager internal queue now.
656 if (srcobject
->handle
== NULL
) {
658 &swap_pager_un_object_list
,
664 NOBJLIST(srcobject
->handle
),
672 * transfer source to destination.
675 for (i
= 0; i
< dstobject
->size
; ++i
) {
679 * Locate (without changing) the swapblk on the destination,
680 * unless it is invalid in which case free it silently, or
681 * if the destination is a resident page, in which case the
682 * source is thrown away.
685 dstaddr
= swp_pager_meta_ctl(dstobject
, i
, 0);
687 if (dstaddr
== SWAPBLK_NONE
) {
689 * Destination has no swapblk and is not resident,
694 srcaddr
= swp_pager_meta_ctl(
700 if (srcaddr
!= SWAPBLK_NONE
)
701 swp_pager_meta_build(dstobject
, i
, srcaddr
);
704 * Destination has valid swapblk or it is represented
705 * by a resident page. We destroy the sourceblock.
708 swp_pager_meta_ctl(srcobject
, i
+ offset
, SWM_FREE
);
713 * Free left over swap blocks in source.
715 * We have to revert the type to OBJT_DEFAULT so we do not accidently
716 * double-remove the object from the swap queues.
721 * Reverting the type is not necessary, the caller is going
722 * to destroy srcobject directly, but I'm doing it here
723 * for consistency since we've removed the object from its
726 swp_pager_meta_free_all(srcobject
);
727 srcobject
->type
= OBJT_DEFAULT
;
733 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
734 * the requested page.
736 * We determine whether good backing store exists for the requested
737 * page and return TRUE if it does, FALSE if it doesn't.
739 * If TRUE, we also try to determine how much valid, contiguous backing
740 * store exists before and after the requested page within a reasonable
741 * distance. We do not try to restrict it to the swap device stripe
742 * (that is handled in getpages/putpages). It probably isn't worth
747 swap_pager_haspage(vm_object_t object
, vm_pindex_t pindex
)
752 * do we have good backing store at the requested index ?
756 blk0
= swp_pager_meta_ctl(object
, pindex
, 0);
758 if (blk0
== SWAPBLK_NONE
) {
765 * find backwards-looking contiguous good backing store
767 if (before
!= NULL
) {
770 for (i
= 1; i
< (SWB_NPAGES
/2); ++i
) {
775 blk
= swp_pager_meta_ctl(object
, pindex
- i
, 0);
783 * find forward-looking contiguous good backing store
789 for (i
= 1; i
< (SWB_NPAGES
/2); ++i
) {
792 blk
= swp_pager_meta_ctl(object
, pindex
+ i
, 0);
804 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
806 * This removes any associated swap backing store, whether valid or
807 * not, from the page.
809 * This routine is typically called when a page is made dirty, at
810 * which point any associated swap can be freed. MADV_FREE also
811 * calls us in a special-case situation
813 * NOTE!!! If the page is clean and the swap was valid, the caller
814 * should make the page dirty before calling this routine. This routine
815 * does NOT change the m->dirty status of the page. Also: MADV_FREE
818 * This routine may not block
819 * This routine must be called at splvm()
823 swap_pager_unswapped(vm_page_t m
)
825 swp_pager_meta_ctl(m
->object
, m
->pindex
, SWM_FREE
);
829 * SWAP_PAGER_STRATEGY() - read, write, free blocks
831 * This implements the vm_pager_strategy() interface to swap and allows
832 * other parts of the system to directly access swap as backing store
833 * through vm_objects of type OBJT_SWAP. This is intended to be a
834 * cacheless interface ( i.e. caching occurs at higher levels ).
835 * Therefore we do not maintain any resident pages. All I/O goes
836 * directly to and from the swap device.
838 * We currently attempt to run I/O synchronously or asynchronously as
839 * the caller requests. This isn't perfect because we loose error
840 * sequencing when we run multiple ops in parallel to satisfy a request.
841 * But this is swap, so we let it all hang out.
845 swap_pager_strategy(vm_object_t object
, struct bio
*bio
)
847 struct buf
*bp
= bio
->bio_buf
;
850 vm_pindex_t biox_blkno
= 0;
855 struct bio_track
*track
;
858 * tracking for swapdev vnode I/Os
860 if (bp
->b_cmd
== BUF_CMD_READ
)
861 track
= &swapdev_vp
->v_track_read
;
863 track
= &swapdev_vp
->v_track_write
;
865 if (bp
->b_bcount
& PAGE_MASK
) {
866 bp
->b_error
= EINVAL
;
867 bp
->b_flags
|= B_ERROR
| B_INVAL
;
869 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
870 "not page bounded\n",
871 bp
, (long long)bio
->bio_offset
, (int)bp
->b_bcount
);
876 * Clear error indication, initialize page index, count, data pointer.
879 bp
->b_flags
&= ~B_ERROR
;
880 bp
->b_resid
= bp
->b_bcount
;
882 start
= (vm_pindex_t
)(bio
->bio_offset
>> PAGE_SHIFT
);
883 count
= howmany(bp
->b_bcount
, PAGE_SIZE
);
887 * Deal with BUF_CMD_FREEBLKS
889 if (bp
->b_cmd
== BUF_CMD_FREEBLKS
) {
891 * FREE PAGE(s) - destroy underlying swap that is no longer
894 swp_pager_meta_free(object
, start
, count
);
901 * We need to be able to create a new cluster of I/O's. We cannot
902 * use the caller fields of the passed bio so push a new one.
904 * Because nbio is just a placeholder for the cluster links,
905 * we can biodone() the original bio instead of nbio to make
906 * things a bit more efficient.
908 nbio
= push_bio(bio
);
909 nbio
->bio_offset
= bio
->bio_offset
;
910 nbio
->bio_caller_info1
.cluster_head
= NULL
;
911 nbio
->bio_caller_info2
.cluster_tail
= NULL
;
917 * Execute read or write
923 * Obtain block. If block not found and writing, allocate a
924 * new block and build it into the object.
926 blk
= swp_pager_meta_ctl(object
, start
, 0);
927 if ((blk
== SWAPBLK_NONE
) && bp
->b_cmd
!= BUF_CMD_READ
) {
928 blk
= swp_pager_getswapspace(1);
929 if (blk
== SWAPBLK_NONE
) {
930 bp
->b_error
= ENOMEM
;
931 bp
->b_flags
|= B_ERROR
;
934 swp_pager_meta_build(object
, start
, blk
);
938 * Do we have to flush our current collection? Yes if:
940 * - no swap block at this index
941 * - swap block is not contiguous
942 * - we cross a physical disk boundry in the
946 biox
&& (biox_blkno
+ btoc(bufx
->b_bcount
) != blk
||
947 ((biox_blkno
^ blk
) & dmmax_mask
)
950 if (bp
->b_cmd
== BUF_CMD_READ
) {
951 ++mycpu
->gd_cnt
.v_swapin
;
952 mycpu
->gd_cnt
.v_swappgsin
+= btoc(bufx
->b_bcount
);
954 ++mycpu
->gd_cnt
.v_swapout
;
955 mycpu
->gd_cnt
.v_swappgsout
+= btoc(bufx
->b_bcount
);
956 bufx
->b_dirtyend
= bufx
->b_bcount
;
960 * Finished with this buf.
962 KKASSERT(bufx
->b_bcount
!= 0);
963 if (bufx
->b_cmd
!= BUF_CMD_READ
)
964 bufx
->b_dirtyend
= bufx
->b_bcount
;
970 * Add new swapblk to biox, instantiating biox if necessary.
971 * Zero-fill reads are able to take a shortcut.
973 if (blk
== SWAPBLK_NONE
) {
975 * We can only get here if we are reading. Since
976 * we are at splvm() we can safely modify b_resid,
977 * even if chain ops are in progress.
979 bzero(data
, PAGE_SIZE
);
980 bp
->b_resid
-= PAGE_SIZE
;
983 /* XXX chain count > 4, wait to <= 4 */
985 bufx
= getpbuf(NULL
);
986 biox
= &bufx
->b_bio1
;
987 cluster_append(nbio
, bufx
);
988 bufx
->b_flags
|= (bufx
->b_flags
& B_ORDERED
);
989 bufx
->b_cmd
= bp
->b_cmd
;
990 biox
->bio_done
= swap_chain_iodone
;
991 biox
->bio_offset
= (off_t
)blk
<< PAGE_SHIFT
;
992 biox
->bio_caller_info1
.cluster_parent
= nbio
;
997 bufx
->b_bcount
+= PAGE_SIZE
;
1005 * Flush out last buffer
1008 if (bufx
->b_cmd
== BUF_CMD_READ
) {
1009 ++mycpu
->gd_cnt
.v_swapin
;
1010 mycpu
->gd_cnt
.v_swappgsin
+= btoc(bufx
->b_bcount
);
1012 ++mycpu
->gd_cnt
.v_swapout
;
1013 mycpu
->gd_cnt
.v_swappgsout
+= btoc(bufx
->b_bcount
);
1014 bufx
->b_dirtyend
= bufx
->b_bcount
;
1016 KKASSERT(bufx
->b_bcount
);
1017 if (bufx
->b_cmd
!= BUF_CMD_READ
)
1018 bufx
->b_dirtyend
= bufx
->b_bcount
;
1019 /* biox, bufx = NULL */
1023 * Now initiate all the I/O. Be careful looping on our chain as
1024 * I/O's may complete while we are still initiating them.
1026 nbio
->bio_caller_info2
.cluster_tail
= NULL
;
1027 bufx
= nbio
->bio_caller_info1
.cluster_head
;
1030 biox
= &bufx
->b_bio1
;
1032 bufx
= bufx
->b_cluster_next
;
1033 vn_strategy(swapdev_vp
, biox
);
1037 * Completion of the cluster will also call biodone_chain(nbio).
1038 * We never call biodone(nbio) so we don't have to worry about
1039 * setting up a bio_done callback. It's handled in the sub-IO.
1045 swap_chain_iodone(struct bio
*biox
)
1048 struct buf
*bufx
; /* chained sub-buffer */
1049 struct bio
*nbio
; /* parent nbio with chain glue */
1050 struct buf
*bp
; /* original bp associated with nbio */
1053 bufx
= biox
->bio_buf
;
1054 nbio
= biox
->bio_caller_info1
.cluster_parent
;
1058 * Update the original buffer
1060 KKASSERT(bp
!= NULL
);
1061 if (bufx
->b_flags
& B_ERROR
) {
1062 atomic_set_int(&bufx
->b_flags
, B_ERROR
);
1063 bp
->b_error
= bufx
->b_error
;
1064 } else if (bufx
->b_resid
!= 0) {
1065 atomic_set_int(&bufx
->b_flags
, B_ERROR
);
1066 bp
->b_error
= EINVAL
;
1068 atomic_subtract_int(&bp
->b_resid
, bufx
->b_bcount
);
1072 * Remove us from the chain.
1074 spin_lock_wr(&bp
->b_lock
.lk_spinlock
);
1075 nextp
= &nbio
->bio_caller_info1
.cluster_head
;
1076 while (*nextp
!= bufx
) {
1077 KKASSERT(*nextp
!= NULL
);
1078 nextp
= &(*nextp
)->b_cluster_next
;
1080 *nextp
= bufx
->b_cluster_next
;
1081 chain_empty
= (nbio
->bio_caller_info1
.cluster_head
== NULL
);
1082 spin_unlock_wr(&bp
->b_lock
.lk_spinlock
);
1085 * Clean up bufx. If the chain is now empty we finish out
1086 * the parent. Note that we may be racing other completions
1087 * so we must use the chain_empty status from above.
1090 if (bp
->b_resid
!= 0 && !(bp
->b_flags
& B_ERROR
)) {
1091 atomic_set_int(&bp
->b_flags
, B_ERROR
);
1092 bp
->b_error
= EINVAL
;
1094 biodone_chain(nbio
);
1096 relpbuf(bufx
, NULL
);
1100 * SWAP_PAGER_GETPAGES() - bring page in from swap
1102 * The requested page may have to be brought in from swap. Calculate the
1103 * swap block and bring in additional pages if possible. All pages must
1104 * have contiguous swap block assignments and reside in the same object.
1106 * The caller has a single vm_object_pip_add() reference prior to
1107 * calling us and we should return with the same.
1109 * The caller has BUSY'd the page. We should return with (*mpp) left busy,
1110 * and any additinal pages unbusied.
1112 * If the caller encounters a PG_RAM page it will pass it to us even though
1113 * it may be valid and dirty. We cannot overwrite the page in this case!
1114 * The case is used to allow us to issue pure read-aheads.
1116 * NOTE! XXX This code does not entirely pipeline yet due to the fact that
1117 * the PG_RAM page is validated at the same time as mreq. What we
1118 * really need to do is issue a separate read-ahead pbuf.
1121 swap_pager_getpage(vm_object_t object
, vm_page_t
*mpp
, int seqaccess
)
1132 vm_page_t marray
[XIO_INTERNAL_PAGES
];
1136 if (mreq
->object
!= object
) {
1137 panic("swap_pager_getpages: object mismatch %p/%p",
1144 * We don't want to overwrite a fully valid page as it might be
1145 * dirty. This case can occur when e.g. vm_fault hits a perfectly
1146 * valid page with PG_RAM set.
1148 * In this case we see if the next page is a suitable page-in
1149 * candidate and if it is we issue read-ahead. PG_RAM will be
1150 * set on the last page of the read-ahead to continue the pipeline.
1152 if (mreq
->valid
== VM_PAGE_BITS_ALL
) {
1153 if (swap_burst_read
== 0 || mreq
->pindex
+ 1 >= object
->size
)
1154 return(VM_PAGER_OK
);
1156 blk
= swp_pager_meta_ctl(object
, mreq
->pindex
+ 1, 0);
1157 if (blk
== SWAPBLK_NONE
) {
1159 return(VM_PAGER_OK
);
1161 m
= vm_page_lookup(object
, mreq
->pindex
+ 1);
1163 m
= vm_page_alloc(object
, mreq
->pindex
+ 1,
1167 return(VM_PAGER_OK
);
1170 if ((m
->flags
& PG_BUSY
) || m
->busy
|| m
->valid
) {
1172 return(VM_PAGER_OK
);
1174 vm_page_unqueue_nowakeup(m
);
1185 * Try to block-read contiguous pages from swap if sequential,
1186 * otherwise just read one page. Contiguous pages from swap must
1187 * reside within a single device stripe because the I/O cannot be
1188 * broken up across multiple stripes.
1190 * Note that blk and iblk can be SWAPBLK_NONE but the loop is
1191 * set up such that the case(s) are handled implicitly.
1194 blk
= swp_pager_meta_ctl(mreq
->object
, mreq
->pindex
, 0);
1197 for (i
= 1; swap_burst_read
&&
1198 i
< XIO_INTERNAL_PAGES
&&
1199 mreq
->pindex
+ i
< object
->size
; ++i
) {
1202 iblk
= swp_pager_meta_ctl(object
, mreq
->pindex
+ i
, 0);
1203 if (iblk
!= blk
+ i
)
1205 if ((blk
^ iblk
) & dmmax_mask
)
1207 m
= vm_page_lookup(object
, mreq
->pindex
+ i
);
1209 m
= vm_page_alloc(object
, mreq
->pindex
+ i
,
1214 if ((m
->flags
& PG_BUSY
) || m
->busy
|| m
->valid
)
1216 vm_page_unqueue_nowakeup(m
);
1222 vm_page_flag_set(marray
[i
- 1], PG_RAM
);
1227 * If mreq is the requested page and we have nothing to do return
1228 * VM_PAGER_FAIL. If raonly is set mreq is just another read-ahead
1229 * page and must be cleaned up.
1231 if (blk
== SWAPBLK_NONE
) {
1234 vnode_pager_freepage(mreq
);
1235 return(VM_PAGER_OK
);
1237 return(VM_PAGER_FAIL
);
1242 * map our page(s) into kva for input
1244 bp
= getpbuf(&nsw_rcount
);
1246 kva
= (vm_offset_t
) bp
->b_kvabase
;
1247 bcopy(marray
, bp
->b_xio
.xio_pages
, i
* sizeof(vm_page_t
));
1248 pmap_qenter(kva
, bp
->b_xio
.xio_pages
, i
);
1250 bp
->b_data
= (caddr_t
)kva
;
1251 bp
->b_bcount
= PAGE_SIZE
* i
;
1252 bp
->b_xio
.xio_npages
= i
;
1253 bio
->bio_done
= swp_pager_async_iodone
;
1254 bio
->bio_offset
= (off_t
)blk
<< PAGE_SHIFT
;
1255 bio
->bio_caller_info1
.index
= SWBIO_READ
;
1258 * Set index. If raonly set the index beyond the array so all
1259 * the pages are treated the same, otherwise the original mreq is
1263 bio
->bio_driver_info
= (void *)(intptr_t)i
;
1265 bio
->bio_driver_info
= (void *)(intptr_t)0;
1267 for (j
= 0; j
< i
; ++j
)
1268 vm_page_flag_set(bp
->b_xio
.xio_pages
[j
], PG_SWAPINPROG
);
1270 mycpu
->gd_cnt
.v_swapin
++;
1271 mycpu
->gd_cnt
.v_swappgsin
+= bp
->b_xio
.xio_npages
;
1274 * We still hold the lock on mreq, and our automatic completion routine
1275 * does not remove it.
1277 vm_object_pip_add(object
, bp
->b_xio
.xio_npages
);
1280 * perform the I/O. NOTE!!! bp cannot be considered valid after
1281 * this point because we automatically release it on completion.
1282 * Instead, we look at the one page we are interested in which we
1283 * still hold a lock on even through the I/O completion.
1285 * The other pages in our m[] array are also released on completion,
1286 * so we cannot assume they are valid anymore either.
1288 bp
->b_cmd
= BUF_CMD_READ
;
1290 vn_strategy(swapdev_vp
, bio
);
1293 * Wait for the page we want to complete. PG_SWAPINPROG is always
1294 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1295 * is set in the meta-data.
1297 * If this is a read-ahead only we return immediately without
1301 return(VM_PAGER_OK
);
1304 * Read-ahead includes originally requested page case.
1307 while ((mreq
->flags
& PG_SWAPINPROG
) != 0) {
1308 vm_page_flag_set(mreq
, PG_WANTED
| PG_REFERENCED
);
1309 mycpu
->gd_cnt
.v_intrans
++;
1310 if (tsleep(mreq
, 0, "swread", hz
*20)) {
1312 "swap_pager: indefinite wait buffer: "
1313 " offset: %lld, size: %ld\n",
1314 (long long)bio
->bio_offset
,
1322 * mreq is left bussied after completion, but all the other pages
1323 * are freed. If we had an unrecoverable read error the page will
1326 if (mreq
->valid
!= VM_PAGE_BITS_ALL
)
1327 return(VM_PAGER_ERROR
);
1329 return(VM_PAGER_OK
);
1332 * A final note: in a low swap situation, we cannot deallocate swap
1333 * and mark a page dirty here because the caller is likely to mark
1334 * the page clean when we return, causing the page to possibly revert
1335 * to all-zero's later.
1340 * swap_pager_putpages:
1342 * Assign swap (if necessary) and initiate I/O on the specified pages.
1344 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1345 * are automatically converted to SWAP objects.
1347 * In a low memory situation we may block in vn_strategy(), but the new
1348 * vm_page reservation system coupled with properly written VFS devices
1349 * should ensure that no low-memory deadlock occurs. This is an area
1352 * The parent has N vm_object_pip_add() references prior to
1353 * calling us and will remove references for rtvals[] that are
1354 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1357 * The parent has soft-busy'd the pages it passes us and will unbusy
1358 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1359 * We need to unbusy the rest on I/O completion.
1362 swap_pager_putpages(vm_object_t object
, vm_page_t
*m
, int count
,
1363 boolean_t sync
, int *rtvals
)
1368 if (count
&& m
[0]->object
!= object
) {
1369 panic("swap_pager_getpages: object mismatch %p/%p",
1378 * Turn object into OBJT_SWAP
1379 * check for bogus sysops
1380 * force sync if not pageout process
1382 if (object
->type
== OBJT_DEFAULT
)
1383 swp_pager_meta_convert(object
);
1385 if (curthread
!= pagethread
)
1391 * Update nsw parameters from swap_async_max sysctl values.
1392 * Do not let the sysop crash the machine with bogus numbers.
1395 if (swap_async_max
!= nsw_wcount_async_max
) {
1401 if ((n
= swap_async_max
) > nswbuf
/ 2)
1408 * Adjust difference ( if possible ). If the current async
1409 * count is too low, we may not be able to make the adjustment
1413 n
-= nsw_wcount_async_max
;
1414 if (nsw_wcount_async
+ n
>= 0) {
1415 nsw_wcount_async
+= n
;
1416 nsw_wcount_async_max
+= n
;
1417 wakeup(&nsw_wcount_async
);
1425 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1426 * The page is left dirty until the pageout operation completes
1430 for (i
= 0; i
< count
; i
+= n
) {
1437 * Maximum I/O size is limited by a number of factors.
1440 n
= min(BLIST_MAX_ALLOC
, count
- i
);
1441 n
= min(n
, nsw_cluster_max
);
1446 * Get biggest block of swap we can. If we fail, fall
1447 * back and try to allocate a smaller block. Don't go
1448 * overboard trying to allocate space if it would overly
1452 (blk
= swp_pager_getswapspace(n
)) == SWAPBLK_NONE
&&
1457 if (blk
== SWAPBLK_NONE
) {
1458 for (j
= 0; j
< n
; ++j
)
1459 rtvals
[i
+j
] = VM_PAGER_FAIL
;
1465 * The I/O we are constructing cannot cross a physical
1466 * disk boundry in the swap stripe. Note: we are still
1469 if ((blk
^ (blk
+ n
)) & dmmax_mask
) {
1470 j
= ((blk
+ dmmax
) & dmmax_mask
) - blk
;
1471 swp_pager_freeswapspace(blk
+ j
, n
- j
);
1476 * All I/O parameters have been satisfied, build the I/O
1477 * request and assign the swap space.
1481 bp
= getpbuf(&nsw_wcount_sync
);
1483 bp
= getpbuf(&nsw_wcount_async
);
1486 pmap_qenter((vm_offset_t
)bp
->b_data
, &m
[i
], n
);
1488 bp
->b_bcount
= PAGE_SIZE
* n
;
1489 bio
->bio_offset
= (off_t
)blk
<< PAGE_SHIFT
;
1491 for (j
= 0; j
< n
; ++j
) {
1492 vm_page_t mreq
= m
[i
+j
];
1494 swp_pager_meta_build(
1499 vm_page_dirty(mreq
);
1500 rtvals
[i
+j
] = VM_PAGER_OK
;
1502 vm_page_flag_set(mreq
, PG_SWAPINPROG
);
1503 bp
->b_xio
.xio_pages
[j
] = mreq
;
1505 bp
->b_xio
.xio_npages
= n
;
1507 mycpu
->gd_cnt
.v_swapout
++;
1508 mycpu
->gd_cnt
.v_swappgsout
+= bp
->b_xio
.xio_npages
;
1512 bp
->b_dirtyoff
= 0; /* req'd for NFS */
1513 bp
->b_dirtyend
= bp
->b_bcount
; /* req'd for NFS */
1514 bp
->b_cmd
= BUF_CMD_WRITE
;
1515 bio
->bio_caller_info1
.index
= SWBIO_WRITE
;
1520 if (sync
== FALSE
) {
1521 bio
->bio_done
= swp_pager_async_iodone
;
1523 vn_strategy(swapdev_vp
, bio
);
1525 for (j
= 0; j
< n
; ++j
)
1526 rtvals
[i
+j
] = VM_PAGER_PEND
;
1531 * Issue synchrnously.
1533 * Wait for the sync I/O to complete, then update rtvals.
1534 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1535 * our async completion routine at the end, thus avoiding a
1538 bio
->bio_caller_info1
.index
|= SWBIO_SYNC
;
1539 bio
->bio_done
= biodone_sync
;
1540 bio
->bio_flags
|= BIO_SYNC
;
1541 vn_strategy(swapdev_vp
, bio
);
1542 biowait(bio
, "swwrt");
1544 for (j
= 0; j
< n
; ++j
)
1545 rtvals
[i
+j
] = VM_PAGER_PEND
;
1548 * Now that we are through with the bp, we can call the
1549 * normal async completion, which frees everything up.
1551 swp_pager_async_iodone(bio
);
1556 swap_pager_newswap(void)
1562 * swp_pager_async_iodone:
1564 * Completion routine for asynchronous reads and writes from/to swap.
1565 * Also called manually by synchronous code to finish up a bp.
1567 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1568 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1569 * unbusy all pages except the 'main' request page. For WRITE
1570 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1571 * because we marked them all VM_PAGER_PEND on return from putpages ).
1573 * This routine may not block.
1576 swp_pager_async_iodone(struct bio
*bio
)
1578 struct buf
*bp
= bio
->bio_buf
;
1579 vm_object_t object
= NULL
;
1586 if (bp
->b_flags
& B_ERROR
) {
1588 "swap_pager: I/O error - %s failed; offset %lld,"
1589 "size %ld, error %d\n",
1590 ((bio
->bio_caller_info1
.index
& SWBIO_READ
) ?
1591 "pagein" : "pageout"),
1592 (long long)bio
->bio_offset
,
1599 * set object, raise to splvm().
1601 if (bp
->b_xio
.xio_npages
)
1602 object
= bp
->b_xio
.xio_pages
[0]->object
;
1606 * remove the mapping for kernel virtual
1608 pmap_qremove((vm_offset_t
)bp
->b_data
, bp
->b_xio
.xio_npages
);
1611 * cleanup pages. If an error occurs writing to swap, we are in
1612 * very serious trouble. If it happens to be a disk error, though,
1613 * we may be able to recover by reassigning the swap later on. So
1614 * in this case we remove the m->swapblk assignment for the page
1615 * but do not free it in the rlist. The errornous block(s) are thus
1616 * never reallocated as swap. Redirty the page and continue.
1618 for (i
= 0; i
< bp
->b_xio
.xio_npages
; ++i
) {
1619 vm_page_t m
= bp
->b_xio
.xio_pages
[i
];
1621 if (bp
->b_flags
& B_ERROR
) {
1623 * If an error occurs I'd love to throw the swapblk
1624 * away without freeing it back to swapspace, so it
1625 * can never be used again. But I can't from an
1629 if (bio
->bio_caller_info1
.index
& SWBIO_READ
) {
1631 * When reading, reqpage needs to stay
1632 * locked for the parent, but all other
1633 * pages can be freed. We still want to
1634 * wakeup the parent waiting on the page,
1635 * though. ( also: pg_reqpage can be -1 and
1636 * not match anything ).
1638 * We have to wake specifically requested pages
1639 * up too because we cleared PG_SWAPINPROG and
1640 * someone may be waiting for that.
1642 * NOTE: for reads, m->dirty will probably
1643 * be overridden by the original caller of
1644 * getpages so don't play cute tricks here.
1646 * NOTE: We can't actually free the page from
1647 * here, because this is an interrupt. It
1648 * is not legal to mess with object->memq
1649 * from an interrupt. Deactivate the page
1654 vm_page_flag_clear(m
, PG_ZERO
);
1655 vm_page_flag_clear(m
, PG_SWAPINPROG
);
1658 * bio_driver_info holds the requested page
1661 if (i
!= (int)(intptr_t)bio
->bio_driver_info
) {
1662 vm_page_deactivate(m
);
1668 * If i == bp->b_pager.pg_reqpage, do not wake
1669 * the page up. The caller needs to.
1673 * If a write error occurs, reactivate page
1674 * so it doesn't clog the inactive list,
1675 * then finish the I/O.
1678 vm_page_flag_clear(m
, PG_SWAPINPROG
);
1679 vm_page_activate(m
);
1680 vm_page_io_finish(m
);
1682 } else if (bio
->bio_caller_info1
.index
& SWBIO_READ
) {
1684 * NOTE: for reads, m->dirty will probably be
1685 * overridden by the original caller of getpages so
1686 * we cannot set them in order to free the underlying
1687 * swap in a low-swap situation. I don't think we'd
1688 * want to do that anyway, but it was an optimization
1689 * that existed in the old swapper for a time before
1690 * it got ripped out due to precisely this problem.
1692 * clear PG_ZERO in page.
1694 * If not the requested page then deactivate it.
1696 * Note that the requested page, reqpage, is left
1697 * busied, but we still have to wake it up. The
1698 * other pages are released (unbusied) by
1699 * vm_page_wakeup(). We do not set reqpage's
1700 * valid bits here, it is up to the caller.
1704 * NOTE: can't call pmap_clear_modify(m) from an
1705 * interrupt thread, the pmap code may have to map
1706 * non-kernel pmaps and currently asserts the case.
1708 /*pmap_clear_modify(m);*/
1709 m
->valid
= VM_PAGE_BITS_ALL
;
1711 vm_page_flag_clear(m
, PG_ZERO
| PG_SWAPINPROG
);
1714 * We have to wake specifically requested pages
1715 * up too because we cleared PG_SWAPINPROG and
1716 * could be waiting for it in getpages. However,
1717 * be sure to not unbusy getpages specifically
1718 * requested page - getpages expects it to be
1721 * bio_driver_info holds the requested page
1723 if (i
!= (int)(intptr_t)bio
->bio_driver_info
) {
1724 vm_page_deactivate(m
);
1731 * Mark the page clean but do not mess with the
1732 * pmap-layer's modified state. That state should
1733 * also be clear since the caller protected the
1734 * page VM_PROT_READ, but allow the case.
1736 * We are in an interrupt, avoid pmap operations.
1738 * If we have a severe page deficit, deactivate the
1739 * page. Do not try to cache it (which would also
1740 * involve a pmap op), because the page might still
1744 vm_page_flag_clear(m
, PG_SWAPINPROG
);
1745 vm_page_io_finish(m
);
1746 if (vm_page_count_severe())
1747 vm_page_deactivate(m
);
1749 if (!vm_page_count_severe() || !vm_page_try_to_cache(m
))
1750 vm_page_protect(m
, VM_PROT_READ
);
1756 * adjust pip. NOTE: the original parent may still have its own
1757 * pip refs on the object.
1761 vm_object_pip_wakeupn(object
, bp
->b_xio
.xio_npages
);
1764 * Release the physical I/O buffer.
1766 * NOTE: Due to synchronous operations in the write case b_cmd may
1767 * already be set to BUF_CMD_DONE and BIO_SYNC may have already
1770 if (bio
->bio_caller_info1
.index
& SWBIO_READ
)
1771 nswptr
= &nsw_rcount
;
1772 else if (bio
->bio_caller_info1
.index
& SWBIO_SYNC
)
1773 nswptr
= &nsw_wcount_sync
;
1775 nswptr
= &nsw_wcount_async
;
1776 bp
->b_cmd
= BUF_CMD_DONE
;
1777 relpbuf(bp
, nswptr
);
1781 /************************************************************************
1783 ************************************************************************
1785 * These routines manipulate the swap metadata stored in the
1786 * OBJT_SWAP object. All swp_*() routines must be called at
1787 * splvm() because swap can be freed up by the low level vm_page
1788 * code which might be called from interrupts beyond what splbio() covers.
1790 * Swap metadata is implemented with a global hash and not directly
1791 * linked into the object. Instead the object simply contains
1792 * appropriate tracking counters.
1796 * Lookup the swblock containing the specified swap block index.
1800 swp_pager_lookup(vm_object_t object
, vm_pindex_t index
)
1802 index
&= ~SWAP_META_MASK
;
1803 return (RB_LOOKUP(swblock_rb_tree
, &object
->swblock_root
, index
));
1807 * Remove a swblock from the RB tree.
1811 swp_pager_remove(vm_object_t object
, struct swblock
*swap
)
1813 RB_REMOVE(swblock_rb_tree
, &object
->swblock_root
, swap
);
1817 * Convert default object to swap object if necessary
1820 swp_pager_meta_convert(vm_object_t object
)
1822 if (object
->type
== OBJT_DEFAULT
) {
1823 object
->type
= OBJT_SWAP
;
1824 KKASSERT(object
->swblock_count
== 0);
1826 if (object
->handle
!= NULL
) {
1828 NOBJLIST(object
->handle
),
1834 &swap_pager_un_object_list
,
1843 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1845 * We first convert the object to a swap object if it is a default
1846 * object. Vnode objects do not need to be converted.
1848 * The specified swapblk is added to the object's swap metadata. If
1849 * the swapblk is not valid, it is freed instead. Any previously
1850 * assigned swapblk is freed.
1853 swp_pager_meta_build(vm_object_t object
, vm_pindex_t index
, daddr_t swapblk
)
1855 struct swblock
*swap
;
1856 struct swblock
*oswap
;
1858 KKASSERT(swapblk
!= SWAPBLK_NONE
);
1861 * Convert object if necessary
1863 if (object
->type
== OBJT_DEFAULT
)
1864 swp_pager_meta_convert(object
);
1867 * Locate swblock. If not found create, but if we aren't adding
1868 * anything just return. If we run out of space in the map we wait
1869 * and, since the hash table may have changed, retry.
1872 swap
= swp_pager_lookup(object
, index
);
1877 swap
= zalloc(swap_zone
);
1882 swap
->swb_index
= index
& ~SWAP_META_MASK
;
1883 swap
->swb_count
= 0;
1885 ++object
->swblock_count
;
1887 for (i
= 0; i
< SWAP_META_PAGES
; ++i
)
1888 swap
->swb_pages
[i
] = SWAPBLK_NONE
;
1889 oswap
= RB_INSERT(swblock_rb_tree
, &object
->swblock_root
, swap
);
1890 KKASSERT(oswap
== NULL
);
1894 * Delete prior contents of metadata
1897 index
&= SWAP_META_MASK
;
1899 if (swap
->swb_pages
[index
] != SWAPBLK_NONE
) {
1900 swp_pager_freeswapspace(swap
->swb_pages
[index
], 1);
1905 * Enter block into metadata
1907 swap
->swb_pages
[index
] = swapblk
;
1908 if (swapblk
!= SWAPBLK_NONE
)
1913 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1915 * The requested range of blocks is freed, with any associated swap
1916 * returned to the swap bitmap.
1918 * This routine will free swap metadata structures as they are cleaned
1919 * out. This routine does *NOT* operate on swap metadata associated
1920 * with resident pages.
1922 * This routine must be called at splvm()
1925 swp_pager_meta_free(vm_object_t object
, vm_pindex_t index
, daddr_t count
)
1927 struct swblock
*swap
;
1929 if (object
->type
!= OBJT_SWAP
&& object
->type
!= OBJT_VNODE
)
1933 swap
= swp_pager_lookup(object
, index
);
1935 daddr_t v
= swap
->swb_pages
[index
& SWAP_META_MASK
];
1937 if (v
!= SWAPBLK_NONE
) {
1938 swp_pager_freeswapspace(v
, 1);
1939 swap
->swb_pages
[index
& SWAP_META_MASK
] =
1941 if (--swap
->swb_count
== 0) {
1942 swp_pager_remove(object
, swap
);
1943 zfree(swap_zone
, swap
);
1944 --object
->swblock_count
;
1950 int n
= SWAP_META_PAGES
- (index
& SWAP_META_MASK
);
1958 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1960 * This routine locates and destroys all swap metadata associated with
1963 * This routine must be called at splvm()
1966 swp_pager_meta_free_all(vm_object_t object
)
1968 struct swblock
*swap
;
1971 if (object
->type
!= OBJT_SWAP
&& object
->type
!= OBJT_VNODE
)
1974 while ((swap
= RB_ROOT(&object
->swblock_root
)) != NULL
) {
1975 swp_pager_remove(object
, swap
);
1976 for (i
= 0; i
< SWAP_META_PAGES
; ++i
) {
1977 daddr_t v
= swap
->swb_pages
[i
];
1978 if (v
!= SWAPBLK_NONE
) {
1980 swp_pager_freeswapspace(v
, 1);
1983 if (swap
->swb_count
!= 0)
1984 panic("swap_pager_meta_free_all: swb_count != 0");
1985 zfree(swap_zone
, swap
);
1986 --object
->swblock_count
;
1988 KKASSERT(object
->swblock_count
== 0);
1992 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1994 * This routine is capable of looking up, popping, or freeing
1995 * swapblk assignments in the swap meta data or in the vm_page_t.
1996 * The routine typically returns the swapblk being looked-up, or popped,
1997 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1998 * was invalid. This routine will automatically free any invalid
1999 * meta-data swapblks.
2001 * It is not possible to store invalid swapblks in the swap meta data
2002 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
2004 * When acting on a busy resident page and paging is in progress, we
2005 * have to wait until paging is complete but otherwise can act on the
2008 * This routine must be called at splvm().
2010 * SWM_FREE remove and free swap block from metadata
2011 * SWM_POP remove from meta data but do not free.. pop it out
2014 swp_pager_meta_ctl(vm_object_t object
, vm_pindex_t index
, int flags
)
2016 struct swblock
*swap
;
2020 * The meta data only exists of the object is OBJT_SWAP
2021 * and even then might not be allocated yet.
2024 if (object
->type
!= OBJT_SWAP
&& object
->type
!= OBJT_VNODE
)
2025 return(SWAPBLK_NONE
);
2028 swap
= swp_pager_lookup(object
, index
);
2031 index
&= SWAP_META_MASK
;
2032 r1
= swap
->swb_pages
[index
];
2034 if (r1
!= SWAPBLK_NONE
) {
2035 if (flags
& SWM_FREE
) {
2036 swp_pager_freeswapspace(r1
, 1);
2039 if (flags
& (SWM_FREE
|SWM_POP
)) {
2040 swap
->swb_pages
[index
] = SWAPBLK_NONE
;
2041 if (--swap
->swb_count
== 0) {
2042 swp_pager_remove(object
, swap
);
2043 zfree(swap_zone
, swap
);
2044 --object
->swblock_count
;