4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
56 * Carnegie Mellon requests users of this software to return to
58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
59 * School of Computer Science
60 * Carnegie Mellon University
61 * Pittsburgh PA 15213-3890
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
66 * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $
70 * Paging space routine stubs. Emulates a matchmaker-like interface
74 #include <sys/param.h>
75 #include <sys/systm.h>
76 #include <sys/kernel.h>
77 #include <sys/vnode.h>
79 #include <sys/ucred.h>
80 #include <sys/malloc.h>
81 #include <sys/dsched.h>
83 #include <sys/sysctl.h>
84 #include <sys/thread2.h>
87 #include <vm/vm_param.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_pager.h>
92 #include <vm/vm_extern.h>
96 MALLOC_DEFINE(M_VMPGDATA
, "VM pgdata", "XXX: VM pager private data");
98 extern struct pagerops defaultpagerops
;
99 extern struct pagerops swappagerops
;
100 extern struct pagerops vnodepagerops
;
101 extern struct pagerops devicepagerops
;
102 extern struct pagerops physpagerops
;
104 int cluster_pbuf_freecnt
= -1; /* unlimited to begin with */
106 static int dead_pager_getpage (vm_object_t
, vm_page_t
*, int);
107 static void dead_pager_putpages (vm_object_t
, vm_page_t
*, int, int, int *);
108 static boolean_t
dead_pager_haspage (vm_object_t
, vm_pindex_t
);
109 static void dead_pager_dealloc (vm_object_t
);
115 dead_pager_getpage(vm_object_t obj
, vm_page_t
*mpp
, int seqaccess
)
117 return VM_PAGER_FAIL
;
124 dead_pager_putpages(vm_object_t object
, vm_page_t
*m
, int count
, int flags
,
129 for (i
= 0; i
< count
; i
++) {
130 rtvals
[i
] = VM_PAGER_AGAIN
;
138 dead_pager_haspage(vm_object_t object
, vm_pindex_t pindex
)
147 dead_pager_dealloc(vm_object_t object
)
149 KKASSERT(object
->swblock_count
== 0);
153 static struct pagerops deadpagerops
= {
160 struct pagerops
*pagertab
[] = {
161 &defaultpagerops
, /* OBJT_DEFAULT */
162 &swappagerops
, /* OBJT_SWAP */
163 &vnodepagerops
, /* OBJT_VNODE */
164 &devicepagerops
, /* OBJT_DEVICE */
165 &physpagerops
, /* OBJT_PHYS */
166 &deadpagerops
/* OBJT_DEAD */
169 int npagers
= NELEM(pagertab
);
172 * Kernel address space for mapping pages.
173 * Used by pagers where KVAs are needed for IO.
175 * XXX needs to be large enough to support the number of pending async
176 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
177 * (MAXPHYS == 64k) if you want to get the most efficiency.
179 #define PAGER_MAP_SIZE (8 * 1024 * 1024)
181 TAILQ_HEAD(swqueue
, buf
);
183 int pager_map_size
= PAGER_MAP_SIZE
;
184 struct vm_map pager_map
;
186 static int bswneeded_raw
;
187 static int bswneeded_kva
;
188 static int nswbuf_raw
;
189 static struct buf
*swbuf_raw
;
190 static vm_offset_t swapbkva
; /* swap buffers kva */
191 static struct swqueue bswlist_raw
; /* without kva */
192 static struct swqueue bswlist_kva
; /* with kva */
193 static struct spinlock bswspin
= SPINLOCK_INITIALIZER(&bswspin
);
194 static int pbuf_raw_count
;
195 static int pbuf_kva_count
;
197 SYSCTL_INT(_vfs
, OID_AUTO
, pbuf_raw_count
, CTLFLAG_RD
, &pbuf_raw_count
, 0,
198 "Kernel virtual address space reservations");
199 SYSCTL_INT(_vfs
, OID_AUTO
, pbuf_kva_count
, CTLFLAG_RD
, &pbuf_kva_count
, 0,
200 "Kernel raw address space reservations");
203 * Initialize the swap buffer list.
205 * Called from the low level boot code only.
208 vm_pager_init(void *arg __unused
)
210 TAILQ_INIT(&bswlist_raw
);
211 TAILQ_INIT(&bswlist_kva
);
213 SYSINIT(vm_mem
, SI_BOOT1_VM
, SI_ORDER_SECOND
, vm_pager_init
, NULL
)
216 * Called from the low level boot code only.
219 vm_pager_bufferinit(void)
225 * Reserve KVM space for pbuf data.
227 swapbkva
= kmem_alloc_pageable(&pager_map
, nswbuf
* MAXPHYS
);
229 panic("Not enough pager_map VM space for physical buffers");
232 * Initial pbuf setup. These pbufs have KVA reservations.
235 for (i
= 0; i
< nswbuf
; ++i
, ++bp
) {
236 bp
->b_kvabase
= (caddr_t
)((intptr_t)i
* MAXPHYS
) + swapbkva
;
237 bp
->b_kvasize
= MAXPHYS
;
240 TAILQ_INSERT_HEAD(&bswlist_kva
, bp
, b_freelist
);
245 * Initial pbuf setup. These pbufs do not have KVA reservations,
246 * so we can have a lot more of them. These are typically used
247 * to massage low level buf/bio requests.
249 nswbuf_raw
= nbuf
* 2;
250 swbuf_raw
= (void *)kmem_alloc(&kernel_map
,
251 round_page(nswbuf_raw
* sizeof(struct buf
)));
253 for (i
= 0; i
< nswbuf_raw
; ++i
, ++bp
) {
256 TAILQ_INSERT_HEAD(&bswlist_raw
, bp
, b_freelist
);
261 * Allow the clustering code to use half of our pbufs.
263 cluster_pbuf_freecnt
= nswbuf
/ 2;
270 vm_pager_deallocate(vm_object_t object
)
272 (*pagertab
[object
->type
]->pgo_dealloc
) (object
);
276 * vm_pager_get_pages() - inline, see vm/vm_pager.h
277 * vm_pager_put_pages() - inline, see vm/vm_pager.h
278 * vm_pager_has_page() - inline, see vm/vm_pager.h
279 * vm_pager_page_inserted() - inline, see vm/vm_pager.h
280 * vm_pager_page_removed() - inline, see vm/vm_pager.h
287 * Called by pageout daemon before going back to sleep.
288 * Gives pagers a chance to clean up any completed async pageing
294 struct pagerops
**pgops
;
296 for (pgops
= pagertab
; pgops
< &pagertab
[npagers
]; pgops
++)
297 if (pgops
&& ((*pgops
)->pgo_sync
!= NULL
))
298 (*(*pgops
)->pgo_sync
) ();
304 * Initialize a physical buffer.
309 initpbuf(struct buf
*bp
)
311 bp
->b_qindex
= 0; /* BQUEUE_NONE */
312 bp
->b_data
= bp
->b_kvabase
; /* NULL if pbuf sans kva */
313 bp
->b_flags
= B_PAGING
;
314 bp
->b_cmd
= BUF_CMD_DONE
;
317 bp
->b_bufsize
= MAXPHYS
;
319 xio_init(&bp
->b_xio
);
320 BUF_LOCK(bp
, LK_EXCLUSIVE
);
324 * Allocate a physical buffer
326 * There are a limited number (nswbuf) of physical buffers. We need
327 * to make sure that no single subsystem is able to hog all of them,
328 * so each subsystem implements a counter which is typically initialized
329 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
330 * increments it on release, and blocks if the counter hits zero. A
331 * subsystem may initialize the counter to -1 to disable the feature,
332 * but it must still be sure to match up all uses of getpbuf() with
333 * relpbuf() using the same variable.
335 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
336 * relatively soon when the rest of the subsystems get smart about it. XXX
338 * Physical buffers can be with or without KVA space reserved. There
339 * are severe limitations on the ones with KVA reserved, and fewer
340 * limitations on the ones without. getpbuf() gets one without,
341 * getpbuf_kva() gets one with.
346 getpbuf(int *pfreecnt
)
354 while (*pfreecnt
== 0)
355 ssleep(pfreecnt
, &bswspin
, 0, "wswbuf0", 0);
358 /* get a bp from the swap buffer header pool */
359 if ((bp
= TAILQ_FIRST(&bswlist_raw
)) != NULL
)
362 ssleep(&bswneeded_raw
, &bswspin
, 0, "wswbuf1", 0);
363 /* loop in case someone else grabbed one */
365 TAILQ_REMOVE(&bswlist_raw
, bp
, b_freelist
);
370 spin_unlock(&bswspin
);
373 KKASSERT(dsched_is_clear_buf_priv(bp
));
379 getpbuf_kva(int *pfreecnt
)
387 while (*pfreecnt
== 0)
388 ssleep(pfreecnt
, &bswspin
, 0, "wswbuf0", 0);
391 /* get a bp from the swap buffer header pool */
392 if ((bp
= TAILQ_FIRST(&bswlist_kva
)) != NULL
)
395 ssleep(&bswneeded_kva
, &bswspin
, 0, "wswbuf1", 0);
396 /* loop in case someone else grabbed one */
398 TAILQ_REMOVE(&bswlist_kva
, bp
, b_freelist
);
403 spin_unlock(&bswspin
);
406 KKASSERT(dsched_is_clear_buf_priv(bp
));
412 * Allocate a physical buffer, if one is available.
414 * Note that there is no NULL hack here - all subsystems using this
415 * call understand how to use pfreecnt.
420 trypbuf(int *pfreecnt
)
426 if (*pfreecnt
== 0 || (bp
= TAILQ_FIRST(&bswlist_raw
)) == NULL
) {
427 spin_unlock(&bswspin
);
430 TAILQ_REMOVE(&bswlist_raw
, bp
, b_freelist
);
434 spin_unlock(&bswspin
);
442 trypbuf_kva(int *pfreecnt
)
448 if (*pfreecnt
== 0 || (bp
= TAILQ_FIRST(&bswlist_kva
)) == NULL
) {
449 spin_unlock(&bswspin
);
452 TAILQ_REMOVE(&bswlist_kva
, bp
, b_freelist
);
456 spin_unlock(&bswspin
);
464 * Release a physical buffer
466 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
467 * relatively soon when the rest of the subsystems get smart about it. XXX
472 relpbuf(struct buf
*bp
, int *pfreecnt
)
474 int wake_bsw_kva
= 0;
475 int wake_bsw_raw
= 0;
476 int wake_freecnt
= 0;
478 KKASSERT(bp
->b_flags
& B_PAGING
);
485 TAILQ_INSERT_HEAD(&bswlist_kva
, bp
, b_freelist
);
488 TAILQ_INSERT_HEAD(&bswlist_raw
, bp
, b_freelist
);
500 if (++*pfreecnt
== 1)
503 spin_unlock(&bswspin
);
506 wakeup(&bswneeded_kva
);
508 wakeup(&bswneeded_raw
);