Kernel - Minor cleanup.
[dragonfly.git] / sys / vm / swap_pager.c
blob27aaa5bfa896e3ef22721e6de1bf6eb65206d5c3
1 /*
2 * Copyright (c) 1998,2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * Copyright (c) 1994 John S. Dyson
35 * Copyright (c) 1990 University of Utah.
36 * Copyright (c) 1991, 1993
37 * The Regents of the University of California. All rights reserved.
39 * This code is derived from software contributed to Berkeley by
40 * the Systems Programming Group of the University of Utah Computer
41 * Science Department.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
71 * New Swap System
72 * Matthew Dillon
74 * Radix Bitmap 'blists'.
76 * - The new swapper uses the new radix bitmap code. This should scale
77 * to arbitrarily small or arbitrarily large swap spaces and an almost
78 * arbitrary degree of fragmentation.
80 * Features:
82 * - on the fly reallocation of swap during putpages. The new system
83 * does not try to keep previously allocated swap blocks for dirty
84 * pages.
86 * - on the fly deallocation of swap
88 * - No more garbage collection required. Unnecessarily allocated swap
89 * blocks only exist for dirty vm_page_t's now and these are already
90 * cycled (in a high-load system) by the pager. We also do on-the-fly
91 * removal of invalidated swap blocks when a page is destroyed
92 * or renamed.
94 * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
96 * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
98 * $FreeBSD: src/sys/vm/swap_pager.c,v 1.130.2.12 2002/08/31 21:15:55 dillon Exp $
99 * $DragonFly: src/sys/vm/swap_pager.c,v 1.32 2008/07/01 02:02:56 dillon Exp $
102 #include <sys/param.h>
103 #include <sys/systm.h>
104 #include <sys/conf.h>
105 #include <sys/kernel.h>
106 #include <sys/proc.h>
107 #include <sys/buf.h>
108 #include <sys/vnode.h>
109 #include <sys/malloc.h>
110 #include <sys/vmmeter.h>
111 #include <sys/sysctl.h>
112 #include <sys/blist.h>
113 #include <sys/lock.h>
114 #include <sys/thread2.h>
116 #ifndef MAX_PAGEOUT_CLUSTER
117 #define MAX_PAGEOUT_CLUSTER 16
118 #endif
120 #define SWB_NPAGES MAX_PAGEOUT_CLUSTER
122 #include "opt_swap.h"
123 #include <vm/vm.h>
124 #include <vm/vm_object.h>
125 #include <vm/vm_page.h>
126 #include <vm/vm_pager.h>
127 #include <vm/vm_pageout.h>
128 #include <vm/swap_pager.h>
129 #include <vm/vm_extern.h>
130 #include <vm/vm_zone.h>
132 #include <sys/buf2.h>
133 #include <vm/vm_page2.h>
135 #define SWM_FREE 0x02 /* free, period */
136 #define SWM_POP 0x04 /* pop out */
139 * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks
140 * in the old system.
143 extern int vm_swap_size; /* number of free swap blocks, in pages */
145 int swap_pager_full; /* swap space exhaustion (task killing) */
146 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
147 static int nsw_rcount; /* free read buffers */
148 static int nsw_wcount_sync; /* limit write buffers / synchronous */
149 static int nsw_wcount_async; /* limit write buffers / asynchronous */
150 static int nsw_wcount_async_max;/* assigned maximum */
151 static int nsw_cluster_max; /* maximum VOP I/O allowed */
152 static int sw_alloc_interlock; /* swap pager allocation interlock */
154 struct blist *swapblist;
155 static struct swblock **swhash;
156 static int swhash_mask;
157 static int swap_async_max = 4; /* maximum in-progress async I/O's */
159 extern struct vnode *swapdev_vp; /* from vm_swap.c */
161 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
162 CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
165 * "named" and "unnamed" anon region objects. Try to reduce the overhead
166 * of searching a named list by hashing it just a little.
169 #define NOBJLISTS 8
171 #define NOBJLIST(handle) \
172 (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
174 static struct pagerlst swap_pager_object_list[NOBJLISTS];
175 struct pagerlst swap_pager_un_object_list;
176 vm_zone_t swap_zone;
179 * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
180 * calls hooked from other parts of the VM system and do not appear here.
181 * (see vm/swap_pager.h).
184 static vm_object_t
185 swap_pager_alloc (void *handle, off_t size,
186 vm_prot_t prot, off_t offset);
187 static void swap_pager_dealloc (vm_object_t object);
188 static int swap_pager_getpages (vm_object_t, vm_page_t *, int, int);
189 static void swap_pager_init (void);
190 static void swap_pager_unswapped (vm_page_t);
191 static void swap_pager_strategy (vm_object_t, struct bio *);
192 static void swap_chain_iodone(struct bio *biox);
194 struct pagerops swappagerops = {
195 swap_pager_init, /* early system initialization of pager */
196 swap_pager_alloc, /* allocate an OBJT_SWAP object */
197 swap_pager_dealloc, /* deallocate an OBJT_SWAP object */
198 swap_pager_getpages, /* pagein */
199 swap_pager_putpages, /* pageout */
200 swap_pager_haspage, /* get backing store status for page */
201 swap_pager_unswapped, /* remove swap related to page */
202 swap_pager_strategy /* pager strategy call */
206 * dmmax is in page-sized chunks with the new swap system. It was
207 * dev-bsized chunks in the old. dmmax is always a power of 2.
209 * swap_*() routines are externally accessible. swp_*() routines are
210 * internal.
213 int dmmax;
214 static int dmmax_mask;
215 int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */
216 int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */
218 static __inline void swp_sizecheck (void);
219 static void swp_pager_async_iodone (struct bio *bio);
222 * Swap bitmap functions
225 static __inline void swp_pager_freeswapspace (daddr_t blk, int npages);
226 static __inline daddr_t swp_pager_getswapspace (int npages);
229 * Metadata functions
232 static void swp_pager_meta_build (vm_object_t, vm_pindex_t, daddr_t);
233 static void swp_pager_meta_free (vm_object_t, vm_pindex_t, daddr_t);
234 static void swp_pager_meta_free_all (vm_object_t);
235 static daddr_t swp_pager_meta_ctl (vm_object_t, vm_pindex_t, int);
238 * SWP_SIZECHECK() - update swap_pager_full indication
240 * update the swap_pager_almost_full indication and warn when we are
241 * about to run out of swap space, using lowat/hiwat hysteresis.
243 * Clear swap_pager_full ( task killing ) indication when lowat is met.
245 * No restrictions on call
246 * This routine may not block.
247 * This routine must be called at splvm()
250 static __inline void
251 swp_sizecheck(void)
253 if (vm_swap_size < nswap_lowat) {
254 if (swap_pager_almost_full == 0) {
255 kprintf("swap_pager: out of swap space\n");
256 swap_pager_almost_full = 1;
258 } else {
259 swap_pager_full = 0;
260 if (vm_swap_size > nswap_hiwat)
261 swap_pager_almost_full = 0;
266 * SWAP_PAGER_INIT() - initialize the swap pager!
268 * Expected to be started from system init. NOTE: This code is run
269 * before much else so be careful what you depend on. Most of the VM
270 * system has yet to be initialized at this point.
273 static void
274 swap_pager_init(void)
277 * Initialize object lists
279 int i;
281 for (i = 0; i < NOBJLISTS; ++i)
282 TAILQ_INIT(&swap_pager_object_list[i]);
283 TAILQ_INIT(&swap_pager_un_object_list);
286 * Device Stripe, in PAGE_SIZE'd blocks
289 dmmax = SWB_NPAGES * 2;
290 dmmax_mask = ~(dmmax - 1);
294 * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
296 * Expected to be started from pageout process once, prior to entering
297 * its main loop.
300 void
301 swap_pager_swap_init(void)
303 int n, n2;
306 * Number of in-transit swap bp operations. Don't
307 * exhaust the pbufs completely. Make sure we
308 * initialize workable values (0 will work for hysteresis
309 * but it isn't very efficient).
311 * The nsw_cluster_max is constrained by the number of pages an XIO
312 * holds, i.e., (MAXPHYS/PAGE_SIZE) and our locally defined
313 * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are
314 * constrained by the swap device interleave stripe size.
316 * Currently we hardwire nsw_wcount_async to 4. This limit is
317 * designed to prevent other I/O from having high latencies due to
318 * our pageout I/O. The value 4 works well for one or two active swap
319 * devices but is probably a little low if you have more. Even so,
320 * a higher value would probably generate only a limited improvement
321 * with three or four active swap devices since the system does not
322 * typically have to pageout at extreme bandwidths. We will want
323 * at least 2 per swap devices, and 4 is a pretty good value if you
324 * have one NFS swap device due to the command/ack latency over NFS.
325 * So it all works out pretty well.
328 nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
330 nsw_rcount = (nswbuf + 1) / 2;
331 nsw_wcount_sync = (nswbuf + 3) / 4;
332 nsw_wcount_async = 4;
333 nsw_wcount_async_max = nsw_wcount_async;
336 * The zone is dynamically allocated so generally size it to
337 * maxswzone (32MB to 512MB of KVM). Set a minimum size based
338 * on physical memory of around 8x (each swblock can hold 16 pages).
340 * With the advent of SSDs (vs HDs) the practical (swap:memory) ratio
341 * has increased dramatically.
343 n = vmstats.v_page_count / 2;
344 if (maxswzone && n < maxswzone / sizeof(struct swblock))
345 n = maxswzone / sizeof(struct swblock);
346 n2 = n;
348 do {
349 swap_zone = zinit(
350 "SWAPMETA",
351 sizeof(struct swblock),
353 ZONE_INTERRUPT,
355 if (swap_zone != NULL)
356 break;
358 * if the allocation failed, try a zone two thirds the
359 * size of the previous attempt.
361 n -= ((n + 2) / 3);
362 } while (n > 0);
364 if (swap_zone == NULL)
365 panic("swap_pager_swap_init: swap_zone == NULL");
366 if (n2 != n)
367 kprintf("Swap zone entries reduced from %d to %d.\n", n2, n);
368 n2 = n;
371 * Initialize our meta-data hash table. The swapper does not need to
372 * be quite as efficient as the VM system, so we do not use an
373 * oversized hash table.
375 * n: size of hash table, must be power of 2
376 * swhash_mask: hash table index mask
379 for (n = 1; n < n2 / 8; n *= 2)
382 swhash = kmalloc(sizeof(struct swblock *) * n, M_VMPGDATA,
383 M_WAITOK | M_ZERO);
385 swhash_mask = n - 1;
389 * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate
390 * its metadata structures.
392 * This routine is called from the mmap and fork code to create a new
393 * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object
394 * and then converting it with swp_pager_meta_build().
396 * This routine may block in vm_object_allocate() and create a named
397 * object lookup race, so we must interlock. We must also run at
398 * splvm() for the object lookup to handle races with interrupts, but
399 * we do not have to maintain splvm() in between the lookup and the
400 * add because (I believe) it is not possible to attempt to create
401 * a new swap object w/handle when a default object with that handle
402 * already exists.
405 static vm_object_t
406 swap_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t offset)
408 vm_object_t object;
410 if (handle) {
412 * Reference existing named region or allocate new one. There
413 * should not be a race here against swp_pager_meta_build()
414 * as called from vm_page_remove() in regards to the lookup
415 * of the handle.
418 while (sw_alloc_interlock) {
419 sw_alloc_interlock = -1;
420 tsleep(&sw_alloc_interlock, 0, "swpalc", 0);
422 sw_alloc_interlock = 1;
424 object = vm_pager_object_lookup(NOBJLIST(handle), handle);
426 if (object != NULL) {
427 vm_object_reference(object);
428 } else {
429 object = vm_object_allocate(OBJT_DEFAULT,
430 OFF_TO_IDX(offset + PAGE_MASK + size));
431 object->handle = handle;
433 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
436 if (sw_alloc_interlock < 0)
437 wakeup(&sw_alloc_interlock);
439 sw_alloc_interlock = 0;
440 } else {
441 object = vm_object_allocate(OBJT_DEFAULT,
442 OFF_TO_IDX(offset + PAGE_MASK + size));
444 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
447 return (object);
451 * SWAP_PAGER_DEALLOC() - remove swap metadata from object
453 * The swap backing for the object is destroyed. The code is
454 * designed such that we can reinstantiate it later, but this
455 * routine is typically called only when the entire object is
456 * about to be destroyed.
458 * This routine may block, but no longer does.
460 * The object must be locked or unreferenceable.
463 static void
464 swap_pager_dealloc(vm_object_t object)
467 * Remove from list right away so lookups will fail if we block for
468 * pageout completion.
471 if (object->handle == NULL) {
472 TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
473 } else {
474 TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
477 vm_object_pip_wait(object, "swpdea");
480 * Free all remaining metadata. We only bother to free it from
481 * the swap meta data. We do not attempt to free swapblk's still
482 * associated with vm_page_t's for this object. We do not care
483 * if paging is still in progress on some objects.
485 crit_enter();
486 swp_pager_meta_free_all(object);
487 crit_exit();
490 /************************************************************************
491 * SWAP PAGER BITMAP ROUTINES *
492 ************************************************************************/
495 * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space
497 * Allocate swap for the requested number of pages. The starting
498 * swap block number (a page index) is returned or SWAPBLK_NONE
499 * if the allocation failed.
501 * Also has the side effect of advising that somebody made a mistake
502 * when they configured swap and didn't configure enough.
504 * Must be called at splvm() to avoid races with bitmap frees from
505 * vm_page_remove() aka swap_pager_page_removed().
507 * This routine may not block
508 * This routine must be called at splvm().
511 static __inline daddr_t
512 swp_pager_getswapspace(int npages)
514 daddr_t blk;
516 if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
517 if (swap_pager_full != 2) {
518 kprintf("swap_pager_getswapspace: failed\n");
519 swap_pager_full = 2;
520 swap_pager_almost_full = 1;
522 } else {
523 vm_swap_size -= npages;
524 swp_sizecheck();
526 return(blk);
530 * SWP_PAGER_FREESWAPSPACE() - free raw swap space
532 * This routine returns the specified swap blocks back to the bitmap.
534 * Note: This routine may not block (it could in the old swap code),
535 * and through the use of the new blist routines it does not block.
537 * We must be called at splvm() to avoid races with bitmap frees from
538 * vm_page_remove() aka swap_pager_page_removed().
540 * This routine may not block
541 * This routine must be called at splvm().
544 static __inline void
545 swp_pager_freeswapspace(daddr_t blk, int npages)
547 blist_free(swapblist, blk, npages);
548 vm_swap_size += npages;
549 swp_sizecheck();
553 * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page
554 * range within an object.
556 * This is a globally accessible routine.
558 * This routine removes swapblk assignments from swap metadata.
560 * The external callers of this routine typically have already destroyed
561 * or renamed vm_page_t's associated with this range in the object so
562 * we should be ok.
564 * This routine may be called at any spl. We up our spl to splvm temporarily
565 * in order to perform the metadata removal.
568 void
569 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
571 crit_enter();
572 swp_pager_meta_free(object, start, size);
573 crit_exit();
577 * SWAP_PAGER_RESERVE() - reserve swap blocks in object
579 * Assigns swap blocks to the specified range within the object. The
580 * swap blocks are not zerod. Any previous swap assignment is destroyed.
582 * Returns 0 on success, -1 on failure.
586 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
588 int n = 0;
589 daddr_t blk = SWAPBLK_NONE;
590 vm_pindex_t beg = start; /* save start index */
592 crit_enter();
593 while (size) {
594 if (n == 0) {
595 n = BLIST_MAX_ALLOC;
596 while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
597 n >>= 1;
598 if (n == 0) {
599 swp_pager_meta_free(object, beg, start - beg);
600 crit_exit();
601 return(-1);
605 swp_pager_meta_build(object, start, blk);
606 --size;
607 ++start;
608 ++blk;
609 --n;
611 swp_pager_meta_free(object, start, n);
612 crit_exit();
613 return(0);
617 * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager
618 * and destroy the source.
620 * Copy any valid swapblks from the source to the destination. In
621 * cases where both the source and destination have a valid swapblk,
622 * we keep the destination's.
624 * This routine is allowed to block. It may block allocating metadata
625 * indirectly through swp_pager_meta_build() or if paging is still in
626 * progress on the source.
628 * This routine can be called at any spl
630 * XXX vm_page_collapse() kinda expects us not to block because we
631 * supposedly do not need to allocate memory, but for the moment we
632 * *may* have to get a little memory from the zone allocator, but
633 * it is taken from the interrupt memory. We should be ok.
635 * The source object contains no vm_page_t's (which is just as well)
637 * The source object is of type OBJT_SWAP.
639 * The source and destination objects must be locked or
640 * inaccessible (XXX are they ?)
643 void
644 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
645 vm_pindex_t offset, int destroysource)
647 vm_pindex_t i;
649 crit_enter();
652 * If destroysource is set, we remove the source object from the
653 * swap_pager internal queue now.
656 if (destroysource) {
657 if (srcobject->handle == NULL) {
658 TAILQ_REMOVE(
659 &swap_pager_un_object_list,
660 srcobject,
661 pager_object_list
663 } else {
664 TAILQ_REMOVE(
665 NOBJLIST(srcobject->handle),
666 srcobject,
667 pager_object_list
673 * transfer source to destination.
676 for (i = 0; i < dstobject->size; ++i) {
677 daddr_t dstaddr;
680 * Locate (without changing) the swapblk on the destination,
681 * unless it is invalid in which case free it silently, or
682 * if the destination is a resident page, in which case the
683 * source is thrown away.
686 dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
688 if (dstaddr == SWAPBLK_NONE) {
690 * Destination has no swapblk and is not resident,
691 * copy source.
693 daddr_t srcaddr;
695 srcaddr = swp_pager_meta_ctl(
696 srcobject,
697 i + offset,
698 SWM_POP
701 if (srcaddr != SWAPBLK_NONE)
702 swp_pager_meta_build(dstobject, i, srcaddr);
703 } else {
705 * Destination has valid swapblk or it is represented
706 * by a resident page. We destroy the sourceblock.
709 swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
714 * Free left over swap blocks in source.
716 * We have to revert the type to OBJT_DEFAULT so we do not accidently
717 * double-remove the object from the swap queues.
720 if (destroysource) {
721 swp_pager_meta_free_all(srcobject);
723 * Reverting the type is not necessary, the caller is going
724 * to destroy srcobject directly, but I'm doing it here
725 * for consistency since we've removed the object from its
726 * queues.
728 srcobject->type = OBJT_DEFAULT;
730 crit_exit();
734 * SWAP_PAGER_HASPAGE() - determine if we have good backing store for
735 * the requested page.
737 * We determine whether good backing store exists for the requested
738 * page and return TRUE if it does, FALSE if it doesn't.
740 * If TRUE, we also try to determine how much valid, contiguous backing
741 * store exists before and after the requested page within a reasonable
742 * distance. We do not try to restrict it to the swap device stripe
743 * (that is handled in getpages/putpages). It probably isn't worth
744 * doing here.
747 boolean_t
748 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
749 int *after)
751 daddr_t blk0;
754 * do we have good backing store at the requested index ?
757 crit_enter();
758 blk0 = swp_pager_meta_ctl(object, pindex, 0);
760 if (blk0 == SWAPBLK_NONE) {
761 crit_exit();
762 if (before)
763 *before = 0;
764 if (after)
765 *after = 0;
766 return (FALSE);
770 * find backwards-looking contiguous good backing store
773 if (before != NULL) {
774 int i;
776 for (i = 1; i < (SWB_NPAGES/2); ++i) {
777 daddr_t blk;
779 if (i > pindex)
780 break;
781 blk = swp_pager_meta_ctl(object, pindex - i, 0);
782 if (blk != blk0 - i)
783 break;
785 *before = (i - 1);
789 * find forward-looking contiguous good backing store
792 if (after != NULL) {
793 int i;
795 for (i = 1; i < (SWB_NPAGES/2); ++i) {
796 daddr_t blk;
798 blk = swp_pager_meta_ctl(object, pindex + i, 0);
799 if (blk != blk0 + i)
800 break;
802 *after = (i - 1);
804 crit_exit();
805 return (TRUE);
809 * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
811 * This removes any associated swap backing store, whether valid or
812 * not, from the page.
814 * This routine is typically called when a page is made dirty, at
815 * which point any associated swap can be freed. MADV_FREE also
816 * calls us in a special-case situation
818 * NOTE!!! If the page is clean and the swap was valid, the caller
819 * should make the page dirty before calling this routine. This routine
820 * does NOT change the m->dirty status of the page. Also: MADV_FREE
821 * depends on it.
823 * This routine may not block
824 * This routine must be called at splvm()
827 static void
828 swap_pager_unswapped(vm_page_t m)
830 swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
834 * SWAP_PAGER_STRATEGY() - read, write, free blocks
836 * This implements the vm_pager_strategy() interface to swap and allows
837 * other parts of the system to directly access swap as backing store
838 * through vm_objects of type OBJT_SWAP. This is intended to be a
839 * cacheless interface ( i.e. caching occurs at higher levels ).
840 * Therefore we do not maintain any resident pages. All I/O goes
841 * directly to and from the swap device.
843 * We currently attempt to run I/O synchronously or asynchronously as
844 * the caller requests. This isn't perfect because we loose error
845 * sequencing when we run multiple ops in parallel to satisfy a request.
846 * But this is swap, so we let it all hang out.
849 static void
850 swap_pager_strategy(vm_object_t object, struct bio *bio)
852 struct buf *bp = bio->bio_buf;
853 struct bio *nbio;
854 vm_pindex_t start;
855 vm_pindex_t biox_blkno = 0;
856 int count;
857 char *data;
858 struct bio *biox;
859 struct buf *bufx;
860 struct bio_track *track;
863 * tracking for swapdev vnode I/Os
865 if (bp->b_cmd == BUF_CMD_READ)
866 track = &swapdev_vp->v_track_read;
867 else
868 track = &swapdev_vp->v_track_write;
870 if (bp->b_bcount & PAGE_MASK) {
871 bp->b_error = EINVAL;
872 bp->b_flags |= B_ERROR | B_INVAL;
873 biodone(bio);
874 kprintf("swap_pager_strategy: bp %p offset %lld size %d, "
875 "not page bounded\n",
876 bp, (long long)bio->bio_offset, (int)bp->b_bcount);
877 return;
881 * Clear error indication, initialize page index, count, data pointer.
883 bp->b_error = 0;
884 bp->b_flags &= ~B_ERROR;
885 bp->b_resid = bp->b_bcount;
887 start = (vm_pindex_t)(bio->bio_offset >> PAGE_SHIFT);
888 count = howmany(bp->b_bcount, PAGE_SIZE);
889 data = bp->b_data;
892 * Deal with BUF_CMD_FREEBLKS
894 if (bp->b_cmd == BUF_CMD_FREEBLKS) {
896 * FREE PAGE(s) - destroy underlying swap that is no longer
897 * needed.
899 swp_pager_meta_free(object, start, count);
900 bp->b_resid = 0;
901 biodone(bio);
902 return;
906 * We need to be able to create a new cluster of I/O's. We cannot
907 * use the caller fields of the passed bio so push a new one.
909 * Because nbio is just a placeholder for the cluster links,
910 * we can biodone() the original bio instead of nbio to make
911 * things a bit more efficient.
913 nbio = push_bio(bio);
914 nbio->bio_offset = bio->bio_offset;
915 nbio->bio_caller_info1.cluster_head = NULL;
916 nbio->bio_caller_info2.cluster_tail = NULL;
918 biox = NULL;
919 bufx = NULL;
922 * Execute read or write
924 while (count > 0) {
925 daddr_t blk;
928 * Obtain block. If block not found and writing, allocate a
929 * new block and build it into the object.
931 blk = swp_pager_meta_ctl(object, start, 0);
932 if ((blk == SWAPBLK_NONE) && bp->b_cmd != BUF_CMD_READ) {
933 blk = swp_pager_getswapspace(1);
934 if (blk == SWAPBLK_NONE) {
935 bp->b_error = ENOMEM;
936 bp->b_flags |= B_ERROR;
937 break;
939 swp_pager_meta_build(object, start, blk);
943 * Do we have to flush our current collection? Yes if:
945 * - no swap block at this index
946 * - swap block is not contiguous
947 * - we cross a physical disk boundry in the
948 * stripe.
950 if (
951 biox && (biox_blkno + btoc(bufx->b_bcount) != blk ||
952 ((biox_blkno ^ blk) & dmmax_mask)
955 if (bp->b_cmd == BUF_CMD_READ) {
956 ++mycpu->gd_cnt.v_swapin;
957 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
958 } else {
959 ++mycpu->gd_cnt.v_swapout;
960 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
961 bufx->b_dirtyend = bufx->b_bcount;
965 * Finished with this buf.
967 KKASSERT(bufx->b_bcount != 0);
968 if (bufx->b_cmd != BUF_CMD_READ)
969 bufx->b_dirtyend = bufx->b_bcount;
970 biox = NULL;
971 bufx = NULL;
975 * Add new swapblk to biox, instantiating biox if necessary.
976 * Zero-fill reads are able to take a shortcut.
978 if (blk == SWAPBLK_NONE) {
980 * We can only get here if we are reading. Since
981 * we are at splvm() we can safely modify b_resid,
982 * even if chain ops are in progress.
984 bzero(data, PAGE_SIZE);
985 bp->b_resid -= PAGE_SIZE;
986 } else {
987 if (biox == NULL) {
988 /* XXX chain count > 4, wait to <= 4 */
990 bufx = getpbuf(NULL);
991 biox = &bufx->b_bio1;
992 cluster_append(nbio, bufx);
993 bufx->b_flags |= (bufx->b_flags & B_ORDERED);
994 bufx->b_cmd = bp->b_cmd;
995 biox->bio_done = swap_chain_iodone;
996 biox->bio_offset = (off_t)blk << PAGE_SHIFT;
997 biox->bio_caller_info1.cluster_parent = nbio;
998 biox_blkno = blk;
999 bufx->b_bcount = 0;
1000 bufx->b_data = data;
1002 bufx->b_bcount += PAGE_SIZE;
1004 --count;
1005 ++start;
1006 data += PAGE_SIZE;
1010 * Flush out last buffer
1012 if (biox) {
1013 if (bufx->b_cmd == BUF_CMD_READ) {
1014 ++mycpu->gd_cnt.v_swapin;
1015 mycpu->gd_cnt.v_swappgsin += btoc(bufx->b_bcount);
1016 } else {
1017 ++mycpu->gd_cnt.v_swapout;
1018 mycpu->gd_cnt.v_swappgsout += btoc(bufx->b_bcount);
1019 bufx->b_dirtyend = bufx->b_bcount;
1021 KKASSERT(bufx->b_bcount);
1022 if (bufx->b_cmd != BUF_CMD_READ)
1023 bufx->b_dirtyend = bufx->b_bcount;
1024 /* biox, bufx = NULL */
1028 * Now initiate all the I/O. Be careful looping on our chain as
1029 * I/O's may complete while we are still initiating them.
1031 nbio->bio_caller_info2.cluster_tail = NULL;
1032 bufx = nbio->bio_caller_info1.cluster_head;
1034 while (bufx) {
1035 biox = &bufx->b_bio1;
1036 BUF_KERNPROC(bufx);
1037 bufx = bufx->b_cluster_next;
1038 vn_strategy(swapdev_vp, biox);
1042 * Completion of the cluster will also call biodone_chain(nbio).
1043 * We never call biodone(nbio) so we don't have to worry about
1044 * setting up a bio_done callback. It's handled in the sub-IO.
1046 /**/
1049 static void
1050 swap_chain_iodone(struct bio *biox)
1052 struct buf **nextp;
1053 struct buf *bufx; /* chained sub-buffer */
1054 struct bio *nbio; /* parent nbio with chain glue */
1055 struct buf *bp; /* original bp associated with nbio */
1056 int chain_empty;
1058 bufx = biox->bio_buf;
1059 nbio = biox->bio_caller_info1.cluster_parent;
1060 bp = nbio->bio_buf;
1063 * Update the original buffer
1065 KKASSERT(bp != NULL);
1066 if (bufx->b_flags & B_ERROR) {
1067 atomic_set_int(&bufx->b_flags, B_ERROR);
1068 bp->b_error = bufx->b_error;
1069 } else if (bufx->b_resid != 0) {
1070 atomic_set_int(&bufx->b_flags, B_ERROR);
1071 bp->b_error = EINVAL;
1072 } else {
1073 atomic_subtract_int(&bp->b_resid, bufx->b_bcount);
1077 * Remove us from the chain.
1079 spin_lock_wr(&bp->b_lock.lk_spinlock);
1080 nextp = &nbio->bio_caller_info1.cluster_head;
1081 while (*nextp != bufx) {
1082 KKASSERT(*nextp != NULL);
1083 nextp = &(*nextp)->b_cluster_next;
1085 *nextp = bufx->b_cluster_next;
1086 chain_empty = (nbio->bio_caller_info1.cluster_head == NULL);
1087 spin_unlock_wr(&bp->b_lock.lk_spinlock);
1090 * Clean up bufx. If the chain is now empty we finish out
1091 * the parent. Note that we may be racing other completions
1092 * so we must use the chain_empty status from above.
1094 if (chain_empty) {
1095 if (bp->b_resid != 0 && !(bp->b_flags & B_ERROR)) {
1096 atomic_set_int(&bp->b_flags, B_ERROR);
1097 bp->b_error = EINVAL;
1099 biodone_chain(nbio);
1101 relpbuf(bufx, NULL);
1105 * SWAP_PAGER_GETPAGES() - bring pages in from swap
1107 * Attempt to retrieve (m, count) pages from backing store, but make
1108 * sure we retrieve at least m[reqpage]. We try to load in as large
1109 * a chunk surrounding m[reqpage] as is contiguous in swap and which
1110 * belongs to the same object.
1112 * The code is designed for asynchronous operation and
1113 * immediate-notification of 'reqpage' but tends not to be
1114 * used that way. Please do not optimize-out this algorithmic
1115 * feature, I intend to improve on it in the future.
1117 * The parent has a single vm_object_pip_add() reference prior to
1118 * calling us and we should return with the same.
1120 * The parent has BUSY'd the pages. We should return with 'm'
1121 * left busy, but the others adjusted.
1124 static int
1125 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
1127 struct buf *bp;
1128 struct bio *bio;
1129 vm_page_t mreq;
1130 int i;
1131 int j;
1132 daddr_t blk;
1133 vm_offset_t kva;
1134 vm_pindex_t lastpindex;
1136 mreq = m[reqpage];
1138 if (mreq->object != object) {
1139 panic("swap_pager_getpages: object mismatch %p/%p",
1140 object,
1141 mreq->object
1146 * Calculate range to retrieve. The pages have already been assigned
1147 * their swapblks. We require a *contiguous* range that falls entirely
1148 * within a single device stripe. If we do not supply it, bad things
1149 * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1150 * loops are set up such that the case(s) are handled implicitly.
1152 * The swp_*() calls must be made at splvm(). vm_page_free() does
1153 * not need to be, but it will go a little faster if it is.
1155 crit_enter();
1156 blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1158 for (i = reqpage - 1; i >= 0; --i) {
1159 daddr_t iblk;
1161 iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1162 if (blk != iblk + (reqpage - i))
1163 break;
1164 if ((blk ^ iblk) & dmmax_mask)
1165 break;
1167 ++i;
1169 for (j = reqpage + 1; j < count; ++j) {
1170 daddr_t jblk;
1172 jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1173 if (blk != jblk - (j - reqpage))
1174 break;
1175 if ((blk ^ jblk) & dmmax_mask)
1176 break;
1180 * free pages outside our collection range. Note: we never free
1181 * mreq, it must remain busy throughout.
1185 int k;
1187 for (k = 0; k < i; ++k)
1188 vm_page_free(m[k]);
1189 for (k = j; k < count; ++k)
1190 vm_page_free(m[k]);
1192 crit_exit();
1196 * Return VM_PAGER_FAIL if we have nothing to do. Return mreq
1197 * still busy, but the others unbusied.
1200 if (blk == SWAPBLK_NONE)
1201 return(VM_PAGER_FAIL);
1204 * Get a swap buffer header to perform the IO
1207 bp = getpbuf(&nsw_rcount);
1208 bio = &bp->b_bio1;
1209 kva = (vm_offset_t) bp->b_data;
1212 * map our page(s) into kva for input
1215 pmap_qenter(kva, m + i, j - i);
1217 bp->b_data = (caddr_t) kva;
1218 bp->b_bcount = PAGE_SIZE * (j - i);
1219 bio->bio_done = swp_pager_async_iodone;
1220 bio->bio_offset = (off_t)(blk - (reqpage - i)) << PAGE_SHIFT;
1221 bio->bio_driver_info = (void *)(intptr_t)(reqpage - i);
1224 int k;
1226 for (k = i; k < j; ++k) {
1227 bp->b_xio.xio_pages[k - i] = m[k];
1228 vm_page_flag_set(m[k], PG_SWAPINPROG);
1231 bp->b_xio.xio_npages = j - i;
1233 mycpu->gd_cnt.v_swapin++;
1234 mycpu->gd_cnt.v_swappgsin += bp->b_xio.xio_npages;
1237 * We still hold the lock on mreq, and our automatic completion routine
1238 * does not remove it.
1241 vm_object_pip_add(mreq->object, bp->b_xio.xio_npages);
1242 lastpindex = m[j-1]->pindex;
1245 * perform the I/O. NOTE!!! bp cannot be considered valid after
1246 * this point because we automatically release it on completion.
1247 * Instead, we look at the one page we are interested in which we
1248 * still hold a lock on even through the I/O completion.
1250 * The other pages in our m[] array are also released on completion,
1251 * so we cannot assume they are valid anymore either.
1254 bp->b_cmd = BUF_CMD_READ;
1255 BUF_KERNPROC(bp);
1256 vn_strategy(swapdev_vp, bio);
1259 * wait for the page we want to complete. PG_SWAPINPROG is always
1260 * cleared on completion. If an I/O error occurs, SWAPBLK_NONE
1261 * is set in the meta-data.
1264 crit_enter();
1266 while ((mreq->flags & PG_SWAPINPROG) != 0) {
1267 vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1268 mycpu->gd_cnt.v_intrans++;
1269 if (tsleep(mreq, 0, "swread", hz*20)) {
1270 kprintf(
1271 "swap_pager: indefinite wait buffer: "
1272 " offset: %lld, size: %ld\n",
1273 (long long)bio->bio_offset,
1274 (long)bp->b_bcount
1279 crit_exit();
1282 * mreq is left bussied after completion, but all the other pages
1283 * are freed. If we had an unrecoverable read error the page will
1284 * not be valid.
1287 if (mreq->valid != VM_PAGE_BITS_ALL) {
1288 return(VM_PAGER_ERROR);
1289 } else {
1290 return(VM_PAGER_OK);
1294 * A final note: in a low swap situation, we cannot deallocate swap
1295 * and mark a page dirty here because the caller is likely to mark
1296 * the page clean when we return, causing the page to possibly revert
1297 * to all-zero's later.
1302 * swap_pager_putpages:
1304 * Assign swap (if necessary) and initiate I/O on the specified pages.
1306 * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects
1307 * are automatically converted to SWAP objects.
1309 * In a low memory situation we may block in vn_strategy(), but the new
1310 * vm_page reservation system coupled with properly written VFS devices
1311 * should ensure that no low-memory deadlock occurs. This is an area
1312 * which needs work.
1314 * The parent has N vm_object_pip_add() references prior to
1315 * calling us and will remove references for rtvals[] that are
1316 * not set to VM_PAGER_PEND. We need to remove the rest on I/O
1317 * completion.
1319 * The parent has soft-busy'd the pages it passes us and will unbusy
1320 * those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1321 * We need to unbusy the rest on I/O completion.
1323 void
1324 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1325 boolean_t sync, int *rtvals)
1327 int i;
1328 int n = 0;
1330 if (count && m[0]->object != object) {
1331 panic("swap_pager_getpages: object mismatch %p/%p",
1332 object,
1333 m[0]->object
1338 * Step 1
1340 * Turn object into OBJT_SWAP
1341 * check for bogus sysops
1342 * force sync if not pageout process
1345 if (object->type != OBJT_SWAP)
1346 swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1348 if (curthread != pagethread)
1349 sync = TRUE;
1352 * Step 2
1354 * Update nsw parameters from swap_async_max sysctl values.
1355 * Do not let the sysop crash the machine with bogus numbers.
1358 if (swap_async_max != nsw_wcount_async_max) {
1359 int n;
1362 * limit range
1364 if ((n = swap_async_max) > nswbuf / 2)
1365 n = nswbuf / 2;
1366 if (n < 1)
1367 n = 1;
1368 swap_async_max = n;
1371 * Adjust difference ( if possible ). If the current async
1372 * count is too low, we may not be able to make the adjustment
1373 * at this time.
1375 crit_enter();
1376 n -= nsw_wcount_async_max;
1377 if (nsw_wcount_async + n >= 0) {
1378 nsw_wcount_async += n;
1379 nsw_wcount_async_max += n;
1380 wakeup(&nsw_wcount_async);
1382 crit_exit();
1386 * Step 3
1388 * Assign swap blocks and issue I/O. We reallocate swap on the fly.
1389 * The page is left dirty until the pageout operation completes
1390 * successfully.
1393 for (i = 0; i < count; i += n) {
1394 struct buf *bp;
1395 struct bio *bio;
1396 daddr_t blk;
1397 int j;
1400 * Maximum I/O size is limited by a number of factors.
1403 n = min(BLIST_MAX_ALLOC, count - i);
1404 n = min(n, nsw_cluster_max);
1406 crit_enter();
1409 * Get biggest block of swap we can. If we fail, fall
1410 * back and try to allocate a smaller block. Don't go
1411 * overboard trying to allocate space if it would overly
1412 * fragment swap.
1414 while (
1415 (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1416 n > 4
1418 n >>= 1;
1420 if (blk == SWAPBLK_NONE) {
1421 for (j = 0; j < n; ++j)
1422 rtvals[i+j] = VM_PAGER_FAIL;
1423 crit_exit();
1424 continue;
1428 * The I/O we are constructing cannot cross a physical
1429 * disk boundry in the swap stripe. Note: we are still
1430 * at splvm().
1432 if ((blk ^ (blk + n)) & dmmax_mask) {
1433 j = ((blk + dmmax) & dmmax_mask) - blk;
1434 swp_pager_freeswapspace(blk + j, n - j);
1435 n = j;
1439 * All I/O parameters have been satisfied, build the I/O
1440 * request and assign the swap space.
1443 if (sync == TRUE)
1444 bp = getpbuf(&nsw_wcount_sync);
1445 else
1446 bp = getpbuf(&nsw_wcount_async);
1447 bio = &bp->b_bio1;
1449 pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1451 bp->b_bcount = PAGE_SIZE * n;
1452 bio->bio_offset = (off_t)blk << PAGE_SHIFT;
1454 for (j = 0; j < n; ++j) {
1455 vm_page_t mreq = m[i+j];
1457 swp_pager_meta_build(
1458 mreq->object,
1459 mreq->pindex,
1460 blk + j
1462 vm_page_dirty(mreq);
1463 rtvals[i+j] = VM_PAGER_OK;
1465 vm_page_flag_set(mreq, PG_SWAPINPROG);
1466 bp->b_xio.xio_pages[j] = mreq;
1468 bp->b_xio.xio_npages = n;
1470 mycpu->gd_cnt.v_swapout++;
1471 mycpu->gd_cnt.v_swappgsout += bp->b_xio.xio_npages;
1473 crit_exit();
1475 bp->b_dirtyoff = 0; /* req'd for NFS */
1476 bp->b_dirtyend = bp->b_bcount; /* req'd for NFS */
1477 bp->b_cmd = BUF_CMD_WRITE;
1480 * asynchronous
1482 if (sync == FALSE) {
1483 bio->bio_done = swp_pager_async_iodone;
1484 BUF_KERNPROC(bp);
1485 vn_strategy(swapdev_vp, bio);
1487 for (j = 0; j < n; ++j)
1488 rtvals[i+j] = VM_PAGER_PEND;
1489 continue;
1493 * Issue synchrnously.
1495 * Wait for the sync I/O to complete, then update rtvals.
1496 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1497 * our async completion routine at the end, thus avoiding a
1498 * double-free.
1500 bio->bio_done = biodone_sync;
1501 bio->bio_flags |= BIO_SYNC;
1502 vn_strategy(swapdev_vp, bio);
1503 biowait(bio, "swwrt");
1505 for (j = 0; j < n; ++j)
1506 rtvals[i+j] = VM_PAGER_PEND;
1509 * Now that we are through with the bp, we can call the
1510 * normal async completion, which frees everything up.
1512 swp_pager_async_iodone(bio);
1516 void
1517 swap_pager_newswap(void)
1519 swp_sizecheck();
1523 * swp_pager_async_iodone:
1525 * Completion routine for asynchronous reads and writes from/to swap.
1526 * Also called manually by synchronous code to finish up a bp.
1528 * For READ operations, the pages are PG_BUSY'd. For WRITE operations,
1529 * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY
1530 * unbusy all pages except the 'main' request page. For WRITE
1531 * operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1532 * because we marked them all VM_PAGER_PEND on return from putpages ).
1534 * This routine may not block.
1536 static void
1537 swp_pager_async_iodone(struct bio *bio)
1539 struct buf *bp = bio->bio_buf;
1540 vm_object_t object = NULL;
1541 int i;
1542 int *nswptr;
1545 * report error
1547 if (bp->b_flags & B_ERROR) {
1548 kprintf(
1549 "swap_pager: I/O error - %s failed; offset %lld,"
1550 "size %ld, error %d\n",
1551 ((bp->b_cmd == BUF_CMD_READ) ? "pagein" : "pageout"),
1552 (long long)bio->bio_offset,
1553 (long)bp->b_bcount,
1554 bp->b_error
1559 * set object, raise to splvm().
1561 if (bp->b_xio.xio_npages)
1562 object = bp->b_xio.xio_pages[0]->object;
1563 crit_enter();
1566 * remove the mapping for kernel virtual
1568 pmap_qremove((vm_offset_t)bp->b_data, bp->b_xio.xio_npages);
1571 * cleanup pages. If an error occurs writing to swap, we are in
1572 * very serious trouble. If it happens to be a disk error, though,
1573 * we may be able to recover by reassigning the swap later on. So
1574 * in this case we remove the m->swapblk assignment for the page
1575 * but do not free it in the rlist. The errornous block(s) are thus
1576 * never reallocated as swap. Redirty the page and continue.
1578 for (i = 0; i < bp->b_xio.xio_npages; ++i) {
1579 vm_page_t m = bp->b_xio.xio_pages[i];
1581 vm_page_flag_clear(m, PG_SWAPINPROG);
1583 if (bp->b_flags & B_ERROR) {
1585 * If an error occurs I'd love to throw the swapblk
1586 * away without freeing it back to swapspace, so it
1587 * can never be used again. But I can't from an
1588 * interrupt.
1591 if (bp->b_cmd == BUF_CMD_READ) {
1593 * When reading, reqpage needs to stay
1594 * locked for the parent, but all other
1595 * pages can be freed. We still want to
1596 * wakeup the parent waiting on the page,
1597 * though. ( also: pg_reqpage can be -1 and
1598 * not match anything ).
1600 * We have to wake specifically requested pages
1601 * up too because we cleared PG_SWAPINPROG and
1602 * someone may be waiting for that.
1604 * NOTE: for reads, m->dirty will probably
1605 * be overridden by the original caller of
1606 * getpages so don't play cute tricks here.
1608 * NOTE: We can't actually free the page from
1609 * here, because this is an interrupt. It
1610 * is not legal to mess with object->memq
1611 * from an interrupt. Deactivate the page
1612 * instead.
1615 m->valid = 0;
1616 vm_page_flag_clear(m, PG_ZERO);
1619 * bio_driver_info holds the requested page
1620 * index.
1622 if (i != (int)(intptr_t)bio->bio_driver_info) {
1623 vm_page_deactivate(m);
1624 vm_page_wakeup(m);
1625 } else {
1626 vm_page_flash(m);
1629 * If i == bp->b_pager.pg_reqpage, do not wake
1630 * the page up. The caller needs to.
1632 } else {
1634 * If a write error occurs, reactivate page
1635 * so it doesn't clog the inactive list,
1636 * then finish the I/O.
1638 vm_page_dirty(m);
1639 vm_page_activate(m);
1640 vm_page_io_finish(m);
1642 } else if (bp->b_cmd == BUF_CMD_READ) {
1644 * NOTE: for reads, m->dirty will probably be
1645 * overridden by the original caller of getpages so
1646 * we cannot set them in order to free the underlying
1647 * swap in a low-swap situation. I don't think we'd
1648 * want to do that anyway, but it was an optimization
1649 * that existed in the old swapper for a time before
1650 * it got ripped out due to precisely this problem.
1652 * clear PG_ZERO in page.
1654 * If not the requested page then deactivate it.
1656 * Note that the requested page, reqpage, is left
1657 * busied, but we still have to wake it up. The
1658 * other pages are released (unbusied) by
1659 * vm_page_wakeup(). We do not set reqpage's
1660 * valid bits here, it is up to the caller.
1664 * NOTE: can't call pmap_clear_modify(m) from an
1665 * interrupt thread, the pmap code may have to map
1666 * non-kernel pmaps and currently asserts the case.
1668 /*pmap_clear_modify(m);*/
1669 m->valid = VM_PAGE_BITS_ALL;
1670 vm_page_undirty(m);
1671 vm_page_flag_clear(m, PG_ZERO);
1674 * We have to wake specifically requested pages
1675 * up too because we cleared PG_SWAPINPROG and
1676 * could be waiting for it in getpages. However,
1677 * be sure to not unbusy getpages specifically
1678 * requested page - getpages expects it to be
1679 * left busy.
1681 * bio_driver_info holds the requested page
1683 if (i != (int)(intptr_t)bio->bio_driver_info) {
1684 vm_page_deactivate(m);
1685 vm_page_wakeup(m);
1686 } else {
1687 vm_page_flash(m);
1689 } else {
1691 * Mark the page clean but do not mess with the
1692 * pmap-layer's modified state. That state should
1693 * also be clear since the caller protected the
1694 * page VM_PROT_READ, but allow the case.
1696 * We are in an interrupt, avoid pmap operations.
1698 * If we have a severe page deficit, deactivate the
1699 * page. Do not try to cache it (which would also
1700 * involve a pmap op), because the page might still
1701 * be read-heavy.
1703 vm_page_undirty(m);
1704 vm_page_io_finish(m);
1705 if (vm_page_count_severe())
1706 vm_page_deactivate(m);
1707 #if 0
1708 if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1709 vm_page_protect(m, VM_PROT_READ);
1710 #endif
1715 * adjust pip. NOTE: the original parent may still have its own
1716 * pip refs on the object.
1719 if (object)
1720 vm_object_pip_wakeupn(object, bp->b_xio.xio_npages);
1723 * release the physical I/O buffer
1725 if (bp->b_cmd == BUF_CMD_READ)
1726 nswptr = &nsw_rcount;
1727 else if (bio->bio_flags & BIO_SYNC)
1728 nswptr = &nsw_wcount_sync;
1729 else
1730 nswptr = &nsw_wcount_async;
1731 bp->b_cmd = BUF_CMD_DONE;
1732 relpbuf(bp, nswptr);
1733 crit_exit();
1736 /************************************************************************
1737 * SWAP META DATA *
1738 ************************************************************************
1740 * These routines manipulate the swap metadata stored in the
1741 * OBJT_SWAP object. All swp_*() routines must be called at
1742 * splvm() because swap can be freed up by the low level vm_page
1743 * code which might be called from interrupts beyond what splbio() covers.
1745 * Swap metadata is implemented with a global hash and not directly
1746 * linked into the object. Instead the object simply contains
1747 * appropriate tracking counters.
1751 * SWP_PAGER_HASH() - hash swap meta data
1753 * This is an inline helper function which hashes the swapblk given
1754 * the object and page index. It returns a pointer to a pointer
1755 * to the object, or a pointer to a NULL pointer if it could not
1756 * find a swapblk.
1758 * This routine must be called at splvm().
1761 static __inline struct swblock **
1762 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1764 struct swblock **pswap;
1765 struct swblock *swap;
1767 index &= ~SWAP_META_MASK;
1768 pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1770 while ((swap = *pswap) != NULL) {
1771 if (swap->swb_object == object &&
1772 swap->swb_index == index
1774 break;
1776 pswap = &swap->swb_hnext;
1778 return(pswap);
1782 * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object
1784 * We first convert the object to a swap object if it is a default
1785 * object.
1787 * The specified swapblk is added to the object's swap metadata. If
1788 * the swapblk is not valid, it is freed instead. Any previously
1789 * assigned swapblk is freed.
1791 * This routine must be called at splvm(), except when used to convert
1792 * an OBJT_DEFAULT object into an OBJT_SWAP object.
1796 static void
1797 swp_pager_meta_build(
1798 vm_object_t object,
1799 vm_pindex_t index,
1800 daddr_t swapblk
1802 struct swblock *swap;
1803 struct swblock **pswap;
1806 * Convert default object to swap object if necessary
1809 if (object->type != OBJT_SWAP) {
1810 object->type = OBJT_SWAP;
1811 object->un_pager.swp.swp_bcount = 0;
1813 if (object->handle != NULL) {
1814 TAILQ_INSERT_TAIL(
1815 NOBJLIST(object->handle),
1816 object,
1817 pager_object_list
1819 } else {
1820 TAILQ_INSERT_TAIL(
1821 &swap_pager_un_object_list,
1822 object,
1823 pager_object_list
1829 * Locate hash entry. If not found create, but if we aren't adding
1830 * anything just return. If we run out of space in the map we wait
1831 * and, since the hash table may have changed, retry.
1834 retry:
1835 pswap = swp_pager_hash(object, index);
1837 if ((swap = *pswap) == NULL) {
1838 int i;
1840 if (swapblk == SWAPBLK_NONE)
1841 return;
1843 swap = *pswap = zalloc(swap_zone);
1844 if (swap == NULL) {
1845 vm_wait(0);
1846 goto retry;
1848 swap->swb_hnext = NULL;
1849 swap->swb_object = object;
1850 swap->swb_index = index & ~SWAP_META_MASK;
1851 swap->swb_count = 0;
1853 ++object->un_pager.swp.swp_bcount;
1855 for (i = 0; i < SWAP_META_PAGES; ++i)
1856 swap->swb_pages[i] = SWAPBLK_NONE;
1860 * Delete prior contents of metadata
1863 index &= SWAP_META_MASK;
1865 if (swap->swb_pages[index] != SWAPBLK_NONE) {
1866 swp_pager_freeswapspace(swap->swb_pages[index], 1);
1867 --swap->swb_count;
1871 * Enter block into metadata
1874 swap->swb_pages[index] = swapblk;
1875 if (swapblk != SWAPBLK_NONE)
1876 ++swap->swb_count;
1880 * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1882 * The requested range of blocks is freed, with any associated swap
1883 * returned to the swap bitmap.
1885 * This routine will free swap metadata structures as they are cleaned
1886 * out. This routine does *NOT* operate on swap metadata associated
1887 * with resident pages.
1889 * This routine must be called at splvm()
1892 static void
1893 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1895 if (object->type != OBJT_SWAP)
1896 return;
1898 while (count > 0) {
1899 struct swblock **pswap;
1900 struct swblock *swap;
1902 pswap = swp_pager_hash(object, index);
1904 if ((swap = *pswap) != NULL) {
1905 daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1907 if (v != SWAPBLK_NONE) {
1908 swp_pager_freeswapspace(v, 1);
1909 swap->swb_pages[index & SWAP_META_MASK] =
1910 SWAPBLK_NONE;
1911 if (--swap->swb_count == 0) {
1912 *pswap = swap->swb_hnext;
1913 zfree(swap_zone, swap);
1914 --object->un_pager.swp.swp_bcount;
1917 --count;
1918 ++index;
1919 } else {
1920 int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1921 count -= n;
1922 index += n;
1928 * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1930 * This routine locates and destroys all swap metadata associated with
1931 * an object.
1933 * This routine must be called at splvm()
1936 static void
1937 swp_pager_meta_free_all(vm_object_t object)
1939 daddr_t index = 0;
1941 if (object->type != OBJT_SWAP)
1942 return;
1944 while (object->un_pager.swp.swp_bcount) {
1945 struct swblock **pswap;
1946 struct swblock *swap;
1948 pswap = swp_pager_hash(object, index);
1949 if ((swap = *pswap) != NULL) {
1950 int i;
1952 for (i = 0; i < SWAP_META_PAGES; ++i) {
1953 daddr_t v = swap->swb_pages[i];
1954 if (v != SWAPBLK_NONE) {
1955 --swap->swb_count;
1956 swp_pager_freeswapspace(v, 1);
1959 if (swap->swb_count != 0)
1960 panic("swap_pager_meta_free_all: swb_count != 0");
1961 *pswap = swap->swb_hnext;
1962 zfree(swap_zone, swap);
1963 --object->un_pager.swp.swp_bcount;
1965 index += SWAP_META_PAGES;
1966 if (index > 0x20000000)
1967 panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1972 * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data.
1974 * This routine is capable of looking up, popping, or freeing
1975 * swapblk assignments in the swap meta data or in the vm_page_t.
1976 * The routine typically returns the swapblk being looked-up, or popped,
1977 * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1978 * was invalid. This routine will automatically free any invalid
1979 * meta-data swapblks.
1981 * It is not possible to store invalid swapblks in the swap meta data
1982 * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1984 * When acting on a busy resident page and paging is in progress, we
1985 * have to wait until paging is complete but otherwise can act on the
1986 * busy page.
1988 * This routine must be called at splvm().
1990 * SWM_FREE remove and free swap block from metadata
1991 * SWM_POP remove from meta data but do not free.. pop it out
1994 static daddr_t
1995 swp_pager_meta_ctl(
1996 vm_object_t object,
1997 vm_pindex_t index,
1998 int flags
2000 struct swblock **pswap;
2001 struct swblock *swap;
2002 daddr_t r1;
2005 * The meta data only exists of the object is OBJT_SWAP
2006 * and even then might not be allocated yet.
2009 if (object->type != OBJT_SWAP)
2010 return(SWAPBLK_NONE);
2012 r1 = SWAPBLK_NONE;
2013 pswap = swp_pager_hash(object, index);
2015 if ((swap = *pswap) != NULL) {
2016 index &= SWAP_META_MASK;
2017 r1 = swap->swb_pages[index];
2019 if (r1 != SWAPBLK_NONE) {
2020 if (flags & SWM_FREE) {
2021 swp_pager_freeswapspace(r1, 1);
2022 r1 = SWAPBLK_NONE;
2024 if (flags & (SWM_FREE|SWM_POP)) {
2025 swap->swb_pages[index] = SWAPBLK_NONE;
2026 if (--swap->swb_count == 0) {
2027 *pswap = swap->swb_hnext;
2028 zfree(swap_zone, swap);
2029 --object->un_pager.swp.swp_bcount;
2034 return(r1);