2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
62 * Paging space routine stubs. Emulates a matchmaker-like interface
66 #include <sys/cdefs.h>
67 __FBSDID("$FreeBSD$");
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/vnode.h>
75 #include <sys/ucred.h>
76 #include <sys/malloc.h>
77 #include <sys/rwlock.h>
80 #include <vm/vm_param.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <vm/vm_page.h>
84 #include <vm/vm_pager.h>
85 #include <vm/vm_extern.h>
87 int cluster_pbuf_freecnt
= -1; /* unlimited to begin with */
91 static int dead_pager_getpages(vm_object_t
, vm_page_t
*, int, int *, int *);
92 static vm_object_t
dead_pager_alloc(void *, vm_ooffset_t
, vm_prot_t
,
93 vm_ooffset_t
, struct ucred
*);
94 static void dead_pager_putpages(vm_object_t
, vm_page_t
*, int, int, int *);
95 static boolean_t
dead_pager_haspage(vm_object_t
, vm_pindex_t
, int *, int *);
96 static void dead_pager_dealloc(vm_object_t
);
99 dead_pager_getpages(vm_object_t obj
, vm_page_t
*ma
, int count
, int *rbehind
,
103 return (VM_PAGER_FAIL
);
107 dead_pager_alloc(void *handle
, vm_ooffset_t size
, vm_prot_t prot
,
108 vm_ooffset_t off
, struct ucred
*cred
)
114 dead_pager_putpages(object
, m
, count
, flags
, rtvals
)
123 for (i
= 0; i
< count
; i
++) {
124 rtvals
[i
] = VM_PAGER_AGAIN
;
129 dead_pager_haspage(object
, pindex
, prev
, next
)
143 dead_pager_dealloc(object
)
149 static struct pagerops deadpagerops
= {
150 .pgo_alloc
= dead_pager_alloc
,
151 .pgo_dealloc
= dead_pager_dealloc
,
152 .pgo_getpages
= dead_pager_getpages
,
153 .pgo_putpages
= dead_pager_putpages
,
154 .pgo_haspage
= dead_pager_haspage
,
157 struct pagerops
*pagertab
[] = {
158 &defaultpagerops
, /* OBJT_DEFAULT */
159 &swappagerops
, /* OBJT_SWAP */
160 &vnodepagerops
, /* OBJT_VNODE */
161 &devicepagerops
, /* OBJT_DEVICE */
162 &physpagerops
, /* OBJT_PHYS */
163 &deadpagerops
, /* OBJT_DEAD */
164 &sgpagerops
, /* OBJT_SG */
165 &mgtdevicepagerops
, /* OBJT_MGTDEVICE */
169 * Kernel address space for mapping pages.
170 * Used by pagers where KVAs are needed for IO.
172 * XXX needs to be large enough to support the number of pending async
173 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
174 * (MAXPHYS == 64k) if you want to get the most efficiency.
176 struct mtx_padalign pbuf_mtx
;
177 static TAILQ_HEAD(swqueue
, buf
) bswlist
;
178 static int bswneeded
;
179 vm_offset_t swapbkva
; /* swap buffers kva */
184 struct pagerops
**pgops
;
186 TAILQ_INIT(&bswlist
);
188 * Initialize known pagers
190 for (pgops
= pagertab
; pgops
< &pagertab
[nitems(pagertab
)]; pgops
++)
191 if ((*pgops
)->pgo_init
!= NULL
)
192 (*(*pgops
)->pgo_init
) ();
196 vm_pager_bufferinit()
201 mtx_init(&pbuf_mtx
, "pbuf mutex", NULL
, MTX_DEF
);
204 * Now set up swap and physical I/O buffer headers.
206 for (i
= 0; i
< nswbuf
; i
++, bp
++) {
207 TAILQ_INSERT_HEAD(&bswlist
, bp
, b_freelist
);
209 LIST_INIT(&bp
->b_dep
);
210 bp
->b_rcred
= bp
->b_wcred
= NOCRED
;
214 cluster_pbuf_freecnt
= nswbuf
/ 2;
215 vnode_pbuf_freecnt
= nswbuf
/ 2 + 1;
216 vnode_async_pbuf_freecnt
= nswbuf
/ 2;
220 * Allocate an instance of a pager of the given type.
221 * Size, protection and offset parameters are passed in for pagers that
222 * need to perform page-level validation (e.g. the device pager).
225 vm_pager_allocate(objtype_t type
, void *handle
, vm_ooffset_t size
,
226 vm_prot_t prot
, vm_ooffset_t off
, struct ucred
*cred
)
229 struct pagerops
*ops
;
231 ops
= pagertab
[type
];
233 ret
= (*ops
->pgo_alloc
) (handle
, size
, prot
, off
, cred
);
240 * The object must be locked.
243 vm_pager_deallocate(object
)
247 VM_OBJECT_ASSERT_WLOCKED(object
);
248 (*pagertab
[object
->type
]->pgo_dealloc
) (object
);
252 vm_pager_assert_in(vm_object_t object
, vm_page_t
*m
, int count
)
256 VM_OBJECT_ASSERT_WLOCKED(object
);
257 KASSERT(count
> 0, ("%s: 0 count", __func__
));
259 * All pages must be busied, not mapped, not fully valid,
260 * not dirty and belong to the proper object.
262 for (int i
= 0 ; i
< count
; i
++) {
263 vm_page_assert_xbusied(m
[i
]);
264 KASSERT(!pmap_page_is_mapped(m
[i
]),
265 ("%s: page %p is mapped", __func__
, m
[i
]));
266 KASSERT(m
[i
]->valid
!= VM_PAGE_BITS_ALL
,
267 ("%s: request for a valid page %p", __func__
, m
[i
]));
268 KASSERT(m
[i
]->dirty
== 0,
269 ("%s: page %p is dirty", __func__
, m
[i
]));
270 KASSERT(m
[i
]->object
== object
,
271 ("%s: wrong object %p/%p", __func__
, object
, m
[i
]->object
));
277 * Page in the pages for the object using its associated pager.
278 * The requested page must be fully valid on successful return.
281 vm_pager_get_pages(vm_object_t object
, vm_page_t
*m
, int count
, int *rbehind
,
285 vm_pindex_t pindex
= m
[0]->pindex
;
289 vm_pager_assert_in(object
, m
, count
);
291 r
= (*pagertab
[object
->type
]->pgo_getpages
)(object
, m
, count
, rbehind
,
293 if (r
!= VM_PAGER_OK
)
296 for (int i
= 0; i
< count
; i
++) {
298 * If pager has replaced a page, assert that it had
301 KASSERT(m
[i
] == vm_page_lookup(object
, pindex
++),
302 ("%s: mismatch page %p pindex %ju", __func__
,
303 m
[i
], (uintmax_t )pindex
- 1));
305 * Zero out partially filled data.
307 if (m
[i
]->valid
!= VM_PAGE_BITS_ALL
)
308 vm_page_zero_invalid(m
[i
], TRUE
);
310 return (VM_PAGER_OK
);
314 vm_pager_get_pages_async(vm_object_t object
, vm_page_t
*m
, int count
,
315 int *rbehind
, int *rahead
, pgo_getpages_iodone_t iodone
, void *arg
)
318 vm_pager_assert_in(object
, m
, count
);
320 return ((*pagertab
[object
->type
]->pgo_getpages_async
)(object
, m
,
321 count
, rbehind
, rahead
, iodone
, arg
));
325 * vm_pager_put_pages() - inline, see vm/vm_pager.h
326 * vm_pager_has_page() - inline, see vm/vm_pager.h
330 * Search the specified pager object list for an object with the
331 * specified handle. If an object with the specified handle is found,
332 * increase its reference count and return it. Otherwise, return NULL.
334 * The pager object list must be locked.
337 vm_pager_object_lookup(struct pagerlst
*pg_list
, void *handle
)
341 TAILQ_FOREACH(object
, pg_list
, pager_object_list
) {
342 if (object
->handle
== handle
) {
343 VM_OBJECT_WLOCK(object
);
344 if ((object
->flags
& OBJ_DEAD
) == 0) {
345 vm_object_reference_locked(object
);
346 VM_OBJECT_WUNLOCK(object
);
349 VM_OBJECT_WUNLOCK(object
);
356 * initialize a physical buffer
360 * XXX This probably belongs in vfs_bio.c
363 initpbuf(struct buf
*bp
)
365 KASSERT(bp
->b_bufobj
== NULL
, ("initpbuf with bufobj"));
366 KASSERT(bp
->b_vp
== NULL
, ("initpbuf with vp"));
367 bp
->b_rcred
= NOCRED
;
368 bp
->b_wcred
= NOCRED
;
369 bp
->b_qindex
= 0; /* On no queue (QUEUE_NONE) */
370 bp
->b_kvabase
= (caddr_t
) (MAXPHYS
* (bp
- swbuf
)) + swapbkva
;
371 bp
->b_data
= bp
->b_kvabase
;
372 bp
->b_kvasize
= MAXPHYS
;
378 BUF_LOCK(bp
, LK_EXCLUSIVE
, NULL
);
382 * allocate a physical buffer
384 * There are a limited number (nswbuf) of physical buffers. We need
385 * to make sure that no single subsystem is able to hog all of them,
386 * so each subsystem implements a counter which is typically initialized
387 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
388 * increments it on release, and blocks if the counter hits zero. A
389 * subsystem may initialize the counter to -1 to disable the feature,
390 * but it must still be sure to match up all uses of getpbuf() with
391 * relpbuf() using the same variable.
393 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
394 * relatively soon when the rest of the subsystems get smart about it. XXX
397 getpbuf(int *pfreecnt
)
405 while (*pfreecnt
== 0) {
406 msleep(pfreecnt
, &pbuf_mtx
, PVM
, "wswbuf0", 0);
410 /* get a bp from the swap buffer header pool */
411 if ((bp
= TAILQ_FIRST(&bswlist
)) != NULL
)
415 msleep(&bswneeded
, &pbuf_mtx
, PVM
, "wswbuf1", 0);
416 /* loop in case someone else grabbed one */
418 TAILQ_REMOVE(&bswlist
, bp
, b_freelist
);
421 mtx_unlock(&pbuf_mtx
);
428 * allocate a physical buffer, if one is available.
430 * Note that there is no NULL hack here - all subsystems using this
431 * call understand how to use pfreecnt.
434 trypbuf(int *pfreecnt
)
439 if (*pfreecnt
== 0 || (bp
= TAILQ_FIRST(&bswlist
)) == NULL
) {
440 mtx_unlock(&pbuf_mtx
);
443 TAILQ_REMOVE(&bswlist
, bp
, b_freelist
);
447 mtx_unlock(&pbuf_mtx
);
455 * release a physical buffer
457 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
458 * relatively soon when the rest of the subsystems get smart about it. XXX
461 relpbuf(struct buf
*bp
, int *pfreecnt
)
464 if (bp
->b_rcred
!= NOCRED
) {
466 bp
->b_rcred
= NOCRED
;
468 if (bp
->b_wcred
!= NOCRED
) {
470 bp
->b_wcred
= NOCRED
;
473 KASSERT(bp
->b_vp
== NULL
, ("relpbuf with vp"));
474 KASSERT(bp
->b_bufobj
== NULL
, ("relpbuf with bufobj"));
479 TAILQ_INSERT_HEAD(&bswlist
, bp
, b_freelist
);
486 if (++*pfreecnt
== 1)
489 mtx_unlock(&pbuf_mtx
);
493 * Associate a p-buffer with a vnode.
495 * Also sets B_PAGING flag to indicate that vnode is not fully associated
496 * with the buffer. i.e. the bp has not been linked into the vnode or
500 pbgetvp(struct vnode
*vp
, struct buf
*bp
)
503 KASSERT(bp
->b_vp
== NULL
, ("pbgetvp: not free"));
504 KASSERT(bp
->b_bufobj
== NULL
, ("pbgetvp: not free (bufobj)"));
507 bp
->b_flags
|= B_PAGING
;
508 bp
->b_bufobj
= &vp
->v_bufobj
;
512 * Associate a p-buffer with a vnode.
514 * Also sets B_PAGING flag to indicate that vnode is not fully associated
515 * with the buffer. i.e. the bp has not been linked into the vnode or
519 pbgetbo(struct bufobj
*bo
, struct buf
*bp
)
522 KASSERT(bp
->b_vp
== NULL
, ("pbgetbo: not free (vnode)"));
523 KASSERT(bp
->b_bufobj
== NULL
, ("pbgetbo: not free (bufobj)"));
525 bp
->b_flags
|= B_PAGING
;
530 * Disassociate a p-buffer from a vnode.
533 pbrelvp(struct buf
*bp
)
536 KASSERT(bp
->b_vp
!= NULL
, ("pbrelvp: NULL"));
537 KASSERT(bp
->b_bufobj
!= NULL
, ("pbrelvp: NULL bufobj"));
538 KASSERT((bp
->b_xflags
& (BX_VNDIRTY
| BX_VNCLEAN
)) == 0,
539 ("pbrelvp: pager buf on vnode list."));
543 bp
->b_flags
&= ~B_PAGING
;
547 * Disassociate a p-buffer from a bufobj.
550 pbrelbo(struct buf
*bp
)
553 KASSERT(bp
->b_vp
== NULL
, ("pbrelbo: vnode"));
554 KASSERT(bp
->b_bufobj
!= NULL
, ("pbrelbo: NULL bufobj"));
555 KASSERT((bp
->b_xflags
& (BX_VNDIRTY
| BX_VNCLEAN
)) == 0,
556 ("pbrelbo: pager buf on vnode list."));
559 bp
->b_flags
&= ~B_PAGING
;