4 * Copyright (c) 1990 University of Utah.
5 * Copyright (c) 1991, 1993
6 * The Regents of the University of California. All rights reserved.
8 * This code is derived from software contributed to Berkeley by
9 * the Systems Programming Group of the University of Utah Computer
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * @(#)device_pager.c 8.1 (Berkeley) 6/11/93
37 * $FreeBSD: src/sys/vm/device_pager.c,v 1.46.2.1 2000/08/02 21:54:37 peter Exp $
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
45 #include <sys/device.h>
46 #include <sys/queue.h>
47 #include <sys/malloc.h>
48 #include <sys/mutex2.h>
51 #include <vm/vm_object.h>
52 #include <vm/vm_page.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vm_zone.h>
55 #include <vm/vm_page2.h>
57 static pgo_dealloc_t dev_pager_dealloc
;
58 static pgo_getpage_t dev_pager_getpage
;
59 static pgo_putpages_t dev_pager_putpages
;
60 static pgo_haspage_t dev_pager_haspage
;
62 struct pagerops devicepagerops
= {
63 .pgo_dealloc
= dev_pager_dealloc
,
64 .pgo_getpage
= dev_pager_getpage
,
65 .pgo_putpages
= dev_pager_putpages
,
66 .pgo_haspage
= dev_pager_haspage
69 /* list of device pager objects */
70 static TAILQ_HEAD(, vm_page
) dev_freepages_list
=
71 TAILQ_HEAD_INITIALIZER(dev_freepages_list
);
72 static MALLOC_DEFINE(M_FICTITIOUS_PAGES
, "device-mapped pages",
73 "Device mapped pages");
75 static vm_page_t
dev_pager_getfake (vm_paddr_t
, int);
76 static void dev_pager_putfake (vm_page_t
);
78 /* list of device pager objects */
79 static struct pagerlst dev_pager_object_list
=
80 TAILQ_HEAD_INITIALIZER(dev_pager_object_list
);
81 /* protect list manipulation */
82 static struct mtx dev_pager_mtx
= MTX_INITIALIZER("devpgr");
84 static int old_dev_pager_ctor(void *handle
, vm_ooffset_t size
, vm_prot_t prot
,
85 vm_ooffset_t foff
, struct ucred
*cred
, u_short
*pg_color
);
86 static void old_dev_pager_dtor(void *handle
);
87 static int old_dev_pager_fault(vm_object_t object
, vm_ooffset_t offset
,
88 int prot
, vm_page_t
*mres
);
90 static struct cdev_pager_ops old_dev_pager_ops
= {
91 .cdev_pg_ctor
= old_dev_pager_ctor
,
92 .cdev_pg_dtor
= old_dev_pager_dtor
,
93 .cdev_pg_fault
= old_dev_pager_fault
97 cdev_pager_lookup(void *handle
)
101 mtx_lock(&dev_pager_mtx
);
102 object
= vm_pager_object_lookup(&dev_pager_object_list
, handle
);
103 mtx_unlock(&dev_pager_mtx
);
109 cdev_pager_allocate(void *handle
, enum obj_type tp
, struct cdev_pager_ops
*ops
,
110 vm_ooffset_t size
, vm_prot_t prot
, vm_ooffset_t foff
, struct ucred
*cred
)
117 * Offset should be page aligned.
119 if (foff
& PAGE_MASK
)
122 size
= round_page64(size
);
124 if (ops
->cdev_pg_ctor(handle
, size
, prot
, foff
, cred
, &color
) != 0)
128 * Look up pager, creating as necessary.
130 mtx_lock(&dev_pager_mtx
);
131 object
= vm_pager_object_lookup(&dev_pager_object_list
, handle
);
132 if (object
== NULL
) {
134 * Allocate object and associate it with the pager.
136 object
= vm_object_allocate_hold(tp
,
137 OFF_TO_IDX(foff
+ size
));
138 object
->handle
= handle
;
139 object
->un_pager
.devp
.ops
= ops
;
140 object
->un_pager
.devp
.dev
= handle
;
141 TAILQ_INIT(&object
->un_pager
.devp
.devp_pglist
);
144 * handle is only a device for old_dev_pager_ctor.
146 if (ops
->cdev_pg_ctor
== old_dev_pager_ctor
) {
148 dev
->si_object
= object
;
151 TAILQ_INSERT_TAIL(&dev_pager_object_list
, object
,
154 vm_object_drop(object
);
157 * Gain a reference to the object.
159 vm_object_hold(object
);
160 vm_object_reference_locked(object
);
161 if (OFF_TO_IDX(foff
+ size
) > object
->size
)
162 object
->size
= OFF_TO_IDX(foff
+ size
);
163 vm_object_drop(object
);
165 mtx_unlock(&dev_pager_mtx
);
174 dev_pager_alloc(void *handle
, off_t size
, vm_prot_t prot
, off_t foff
)
176 return (cdev_pager_allocate(handle
, OBJT_DEVICE
, &old_dev_pager_ops
,
177 size
, prot
, foff
, NULL
));
181 * Caller must hold object lock.
184 cdev_pager_free_page(vm_object_t object
, vm_page_t m
)
186 if (object
->type
== OBJT_MGTDEVICE
) {
187 KKASSERT((m
->flags
& PG_FICTITIOUS
) != 0);
188 pmap_page_protect(m
, VM_PROT_NONE
);
191 } else if (object
->type
== OBJT_DEVICE
) {
192 TAILQ_REMOVE(&object
->un_pager
.devp
.devp_pglist
, m
, pageq
);
193 dev_pager_putfake(m
);
201 dev_pager_dealloc(vm_object_t object
)
206 * NOTE: Callback may recurse into the device pager so do not
207 * obtain dev_pager_mtx until after it returns.
209 * The mutex should only be needed when manipulating the list.
211 object
->un_pager
.devp
.ops
->cdev_pg_dtor(object
->un_pager
.devp
.dev
);
213 mtx_lock(&dev_pager_mtx
);
214 TAILQ_REMOVE(&dev_pager_object_list
, object
, pager_object_entry
);
215 mtx_unlock(&dev_pager_mtx
);
217 if (object
->type
== OBJT_DEVICE
) {
219 * Free up our fake pages.
221 while ((m
= TAILQ_FIRST(&object
->un_pager
.devp
.devp_pglist
)) !=
223 TAILQ_REMOVE(&object
->un_pager
.devp
.devp_pglist
,
225 dev_pager_putfake(m
);
233 * WARNING! Do not obtain dev_pager_mtx here, doing so will cause a
234 * deadlock in DRMs VM paging code.
237 dev_pager_getpage(vm_object_t object
, vm_pindex_t pindex
,
238 vm_page_t
*mpp
, int seqaccess
)
242 error
= object
->un_pager
.devp
.ops
->cdev_pg_fault(
243 object
, IDX_TO_OFF(pindex
),
253 dev_pager_putpages(vm_object_t object
, vm_page_t
*m
,
254 int count
, int flags
, int *rtvals
)
256 panic("dev_pager_putpage called");
263 dev_pager_haspage(vm_object_t object
, vm_pindex_t pindex
)
269 * The caller does not need to hold dev_pager_mtx() but caller must ensure
270 * no page-use collision.
273 dev_pager_getfake(vm_paddr_t paddr
, int pat_mode
)
277 m
= kmalloc(sizeof(*m
), M_FICTITIOUS_PAGES
, M_WAITOK
|M_ZERO
);
281 m
->flags
= PG_FICTITIOUS
| PG_UNQUEUED
;
282 m
->valid
= VM_PAGE_BITS_ALL
;
287 m
->busy_count
= PBUSY_LOCKED
;
290 m
->phys_addr
= paddr
;
291 m
->pat_mode
= pat_mode
;
293 spin_init(&m
->spin
, "dev_page");
299 * The caller does not need to hold dev_pager_mtx() but caller must ensure
300 * no page-use collision within the object.
303 dev_pager_putfake(vm_page_t m
)
305 if (!(m
->flags
& PG_FICTITIOUS
))
306 panic("dev_pager_putfake: bad page");
307 KKASSERT(m
->object
== NULL
);
308 KKASSERT(m
->hold_count
== 0);
309 kfree(m
, M_FICTITIOUS_PAGES
);
313 old_dev_pager_ctor(void *handle
, vm_ooffset_t size
, vm_prot_t prot
,
314 vm_ooffset_t foff
, struct ucred
*cred
, u_short
*color
)
323 * Check that the specified range of the device allows the desired
326 * XXX assumes VM_PROT_* == PROT_*
328 npages
= OFF_TO_IDX(size
);
329 for (off
= foff
; npages
--; off
+= PAGE_SIZE
) {
330 if (dev_dmmap(dev
, off
, (int)prot
, NULL
) == -1)
337 static void old_dev_pager_dtor(void *handle
)
343 KKASSERT(dev
->si_object
);
344 dev
->si_object
= NULL
;
349 old_dev_pager_fault(vm_object_t object
, vm_ooffset_t offset
,
350 int prot
, vm_page_t
*mres
)
354 vm_offset_t pidx
= OFF_TO_IDX(offset
);
358 dev
= object
->handle
;
360 paddr
= pmap_phys_address(dev_dmmap(dev
, offset
, prot
, NULL
));
361 KASSERT(paddr
!= -1,("dev_pager_getpage: map function returns error"));
362 KKASSERT(object
->type
== OBJT_DEVICE
);
364 if (page
->flags
& PG_FICTITIOUS
) {
366 * If the passed in reqpage page is already a fake page,
367 * update it with the new physical address.
369 page
->phys_addr
= paddr
;
370 page
->valid
= VM_PAGE_BITS_ALL
;
373 * Replace the passed in reqpage page with our own fake page
374 * and free up all the original pages. Object lock must be
375 * held when manipulating devp_pglist and inserting the
378 page
= dev_pager_getfake(paddr
, object
->memattr
);
379 vm_object_hold(object
);
380 TAILQ_INSERT_TAIL(&object
->un_pager
.devp
.devp_pglist
,
383 if (vm_page_insert(page
, object
, pidx
) == FALSE
) {
384 panic("dev_pager_getpage: page (%p,%016jx) exists",
385 object
, (uintmax_t)pidx
);
387 vm_object_drop(object
);
389 return (VM_PAGER_OK
);