2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $
65 * $DragonFly: src/sys/vm/vm_pager.c,v 1.11 2004/07/14 03:10:17 hmp Exp $
69 * Paging space routine stubs. Emulates a matchmaker-like interface
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/vnode.h>
78 #include <sys/ucred.h>
79 #include <sys/malloc.h>
83 #include <vm/vm_param.h>
84 #include <vm/vm_object.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pager.h>
87 #include <vm/vm_extern.h>
91 MALLOC_DEFINE(M_VMPGDATA
, "VM pgdata", "XXX: VM pager private data");
93 extern struct pagerops defaultpagerops
;
94 extern struct pagerops swappagerops
;
95 extern struct pagerops vnodepagerops
;
96 extern struct pagerops devicepagerops
;
97 extern struct pagerops physpagerops
;
99 int cluster_pbuf_freecnt
= -1; /* unlimited to begin with */
101 static int dead_pager_getpages (vm_object_t
, vm_page_t
*, int, int);
102 static vm_object_t
dead_pager_alloc (void *, vm_ooffset_t
, vm_prot_t
,
104 static void dead_pager_putpages (vm_object_t
, vm_page_t
*, int, int, int *);
105 static boolean_t
dead_pager_haspage (vm_object_t
, vm_pindex_t
, int *, int *);
106 static void dead_pager_dealloc (vm_object_t
);
109 dead_pager_getpages(vm_object_t obj
, vm_page_t
*ma
, int count
, int req
)
111 return VM_PAGER_FAIL
;
115 dead_pager_alloc(void *handle
, vm_ooffset_t size
, vm_prot_t prot
,
122 dead_pager_putpages(vm_object_t object
, vm_page_t
*m
, int count
, int flags
,
127 for (i
= 0; i
< count
; i
++) {
128 rtvals
[i
] = VM_PAGER_AGAIN
;
133 dead_pager_haspage(vm_object_t object
, vm_pindex_t pindex
, int *prev
, int *next
)
143 dead_pager_dealloc(vm_object_t object
)
148 static struct pagerops deadpagerops
= {
158 struct pagerops
*pagertab
[] = {
159 &defaultpagerops
, /* OBJT_DEFAULT */
160 &swappagerops
, /* OBJT_SWAP */
161 &vnodepagerops
, /* OBJT_VNODE */
162 &devicepagerops
, /* OBJT_DEVICE */
163 &physpagerops
, /* OBJT_PHYS */
164 &deadpagerops
/* OBJT_DEAD */
167 int npagers
= sizeof(pagertab
) / sizeof(pagertab
[0]);
170 * Kernel address space for mapping pages.
171 * Used by pagers where KVAs are needed for IO.
173 * XXX needs to be large enough to support the number of pending async
174 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size
175 * (MAXPHYS == 64k) if you want to get the most efficiency.
177 #define PAGER_MAP_SIZE (8 * 1024 * 1024)
179 int pager_map_size
= PAGER_MAP_SIZE
;
181 static int bswneeded
;
182 static vm_offset_t swapbkva
; /* swap buffers kva */
187 struct pagerops
**pgops
;
190 * Initialize known pagers
192 for (pgops
= pagertab
; pgops
< &pagertab
[npagers
]; pgops
++)
193 if (pgops
&& ((*pgops
)->pgo_init
!= NULL
))
194 (*(*pgops
)->pgo_init
) ();
198 vm_pager_bufferinit(void)
205 * Now set up swap and physical I/O buffer headers.
207 for (i
= 0; i
< nswbuf
; i
++, bp
++) {
208 TAILQ_INSERT_HEAD(&bswlist
, bp
, b_freelist
);
210 LIST_INIT(&bp
->b_dep
);
214 cluster_pbuf_freecnt
= nswbuf
/ 2;
216 swapbkva
= kmem_alloc_pageable(pager_map
, nswbuf
* MAXPHYS
);
218 panic("Not enough pager_map VM space for physical buffers");
222 * Allocate an instance of a pager of the given type.
223 * Size, protection and offset parameters are passed in for pagers that
224 * need to perform page-level validation (e.g. the device pager).
227 vm_pager_allocate(objtype_t type
, void *handle
, vm_ooffset_t size
, vm_prot_t prot
,
230 struct pagerops
*ops
;
232 ops
= pagertab
[type
];
234 return ((*ops
->pgo_alloc
) (handle
, size
, prot
, off
));
239 vm_pager_deallocate(vm_object_t object
)
241 (*pagertab
[object
->type
]->pgo_dealloc
) (object
);
247 * called with no specific spl
248 * Execute strategy routine directly to pager.
252 vm_pager_strategy(vm_object_t object
, struct buf
*bp
)
254 if (pagertab
[object
->type
]->pgo_strategy
) {
255 (*pagertab
[object
->type
]->pgo_strategy
)(object
, bp
);
257 bp
->b_flags
|= B_ERROR
;
264 * vm_pager_get_pages() - inline, see vm/vm_pager.h
265 * vm_pager_put_pages() - inline, see vm/vm_pager.h
266 * vm_pager_has_page() - inline, see vm/vm_pager.h
267 * vm_pager_page_inserted() - inline, see vm/vm_pager.h
268 * vm_pager_page_removed() - inline, see vm/vm_pager.h
275 * Called by pageout daemon before going back to sleep.
276 * Gives pagers a chance to clean up any completed async pageing
282 struct pagerops
**pgops
;
284 for (pgops
= pagertab
; pgops
< &pagertab
[npagers
]; pgops
++)
285 if (pgops
&& ((*pgops
)->pgo_sync
!= NULL
))
286 (*(*pgops
)->pgo_sync
) ();
292 vm_pager_object_lookup(struct pagerlst
*pg_list
, void *handle
)
296 for (object
= TAILQ_FIRST(pg_list
); object
!= NULL
; object
= TAILQ_NEXT(object
,pager_object_list
))
297 if (object
->handle
== handle
)
303 * initialize a physical buffer
307 initpbuf(struct buf
*bp
)
309 bp
->b_qindex
= QUEUE_NONE
;
310 bp
->b_data
= (caddr_t
) (MAXPHYS
* (bp
- swbuf
)) + swapbkva
;
311 bp
->b_kvabase
= bp
->b_data
;
312 bp
->b_kvasize
= MAXPHYS
;
316 xio_init(&bp
->b_xio
);
317 BUF_LOCK(bp
, LK_EXCLUSIVE
);
321 * allocate a physical buffer
323 * There are a limited number (nswbuf) of physical buffers. We need
324 * to make sure that no single subsystem is able to hog all of them,
325 * so each subsystem implements a counter which is typically initialized
326 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and
327 * increments it on release, and blocks if the counter hits zero. A
328 * subsystem may initialize the counter to -1 to disable the feature,
329 * but it must still be sure to match up all uses of getpbuf() with
330 * relpbuf() using the same variable.
332 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
333 * relatively soon when the rest of the subsystems get smart about it. XXX
336 getpbuf(int *pfreecnt
)
345 while (*pfreecnt
== 0) {
346 tsleep(pfreecnt
, 0, "wswbuf0", 0);
350 /* get a bp from the swap buffer header pool */
351 if ((bp
= TAILQ_FIRST(&bswlist
)) != NULL
)
355 tsleep(&bswneeded
, 0, "wswbuf1", 0);
356 /* loop in case someone else grabbed one */
358 TAILQ_REMOVE(&bswlist
, bp
, b_freelist
);
368 * allocate a physical buffer, if one is available.
370 * Note that there is no NULL hack here - all subsystems using this
371 * call understand how to use pfreecnt.
374 trypbuf(int *pfreecnt
)
380 if (*pfreecnt
== 0 || (bp
= TAILQ_FIRST(&bswlist
)) == NULL
) {
384 TAILQ_REMOVE(&bswlist
, bp
, b_freelist
);
396 * release a physical buffer
398 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed
399 * relatively soon when the rest of the subsystems get smart about it. XXX
402 relpbuf(struct buf
*bp
, int *pfreecnt
)
413 TAILQ_INSERT_HEAD(&bswlist
, bp
, b_freelist
);
420 if (++*pfreecnt
== 1)
426 /********************************************************
427 * CHAINING FUNCTIONS *
428 ********************************************************
430 * These functions support recursion of I/O operations
431 * on bp's, typically by chaining one or more 'child' bp's
432 * to the parent. Synchronous, asynchronous, and semi-synchronous
433 * chaining is possible.
437 * vm_pager_chain_iodone:
439 * io completion routine for child bp. Currently we fudge a bit
440 * on dealing with b_resid. Since users of these routines may issue
441 * multiple children simultaniously, sequencing of the error can be lost.
445 vm_pager_chain_iodone(struct buf
*nbp
)
449 if ((bp
= nbp
->b_chain
.parent
) != NULL
) {
450 if (nbp
->b_flags
& B_ERROR
) {
451 bp
->b_flags
|= B_ERROR
;
452 bp
->b_error
= nbp
->b_error
;
453 } else if (nbp
->b_resid
!= 0) {
454 bp
->b_flags
|= B_ERROR
;
455 bp
->b_error
= EINVAL
;
457 bp
->b_resid
-= nbp
->b_bcount
;
459 nbp
->b_chain
.parent
= NULL
;
461 if (bp
->b_flags
& B_WANT
) {
462 bp
->b_flags
&= ~B_WANT
;
465 if (!bp
->b_chain
.count
&& (bp
->b_xflags
& BX_AUTOCHAINDONE
)) {
466 bp
->b_xflags
&= ~BX_AUTOCHAINDONE
;
467 if (bp
->b_resid
!= 0 && !(bp
->b_flags
& B_ERROR
)) {
468 bp
->b_flags
|= B_ERROR
;
469 bp
->b_error
= EINVAL
;
474 nbp
->b_flags
|= B_DONE
;
475 nbp
->b_flags
&= ~B_ASYNC
;
482 * Obtain a physical buffer and chain it to its parent buffer. When
483 * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are
484 * automatically propogated to the parent
486 * Since these are brand new buffers, we do not have to clear B_INVAL
487 * and B_ERROR because they are already clear.
491 getchainbuf(struct buf
*bp
, struct vnode
*vp
, int flags
)
493 struct buf
*nbp
= getpbuf(NULL
);
495 nbp
->b_chain
.parent
= bp
;
498 if (bp
->b_chain
.count
> 4)
499 waitchainbuf(bp
, 4, 0);
501 nbp
->b_flags
= B_CALL
| (bp
->b_flags
& B_ORDERED
) | flags
;
502 nbp
->b_iodone
= vm_pager_chain_iodone
;
510 flushchainbuf(struct buf
*nbp
)
513 nbp
->b_bufsize
= nbp
->b_bcount
;
514 if ((nbp
->b_flags
& B_READ
) == 0)
515 nbp
->b_dirtyend
= nbp
->b_bcount
;
517 VOP_STRATEGY(nbp
->b_vp
, nbp
);
524 waitchainbuf(struct buf
*bp
, int count
, int done
)
529 while (bp
->b_chain
.count
> count
) {
530 bp
->b_flags
|= B_WANT
;
531 tsleep(bp
, 0, "bpchain", 0);
534 if (bp
->b_resid
!= 0 && !(bp
->b_flags
& B_ERROR
)) {
535 bp
->b_flags
|= B_ERROR
;
536 bp
->b_error
= EINVAL
;
544 autochaindone(struct buf
*bp
)
549 if (bp
->b_chain
.count
== 0)
552 bp
->b_xflags
|= BX_AUTOCHAINDONE
;