2 * Copyright (c) 2003, 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Hiten Pandya <hmp@backplane.com>.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * Copyright (c) 1991 Regents of the University of California.
37 * All rights reserved.
39 * This code is derived from software contributed to Berkeley by
40 * The Mach Operating System project at Carnegie-Mellon University.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
67 * $DragonFly: src/sys/vm/vm_contig.c,v 1.21 2006/12/28 21:24:02 dillon Exp $
71 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
72 * All rights reserved.
74 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
76 * Permission to use, copy, modify and distribute this software and
77 * its documentation is hereby granted, provided that both the copyright
78 * notice and this permission notice appear in all copies of the
79 * software, derivative works or modified versions, and any portions
80 * thereof, and that both notices appear in supporting documentation.
82 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
83 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
84 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
86 * Carnegie Mellon requests users of this software to return to
88 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
89 * School of Computer Science
90 * Carnegie Mellon University
91 * Pittsburgh PA 15213-3890
93 * any improvements or extensions that they make and grant Carnegie the
94 * rights to redistribute these changes.
98 * Contiguous memory allocation API.
101 #include <sys/param.h>
102 #include <sys/systm.h>
103 #include <sys/malloc.h>
104 #include <sys/proc.h>
105 #include <sys/lock.h>
106 #include <sys/vmmeter.h>
107 #include <sys/vnode.h>
110 #include <vm/vm_param.h>
111 #include <vm/vm_kern.h>
113 #include <vm/vm_map.h>
114 #include <vm/vm_object.h>
115 #include <vm/vm_page.h>
116 #include <vm/vm_pageout.h>
117 #include <vm/vm_pager.h>
118 #include <vm/vm_extern.h>
120 #include <sys/thread2.h>
121 #include <sys/spinlock2.h>
122 #include <vm/vm_page2.h>
124 static void vm_contig_pg_free(int start
, u_long size
);
127 * vm_contig_pg_clean:
129 * Do a thorough cleanup of the specified 'queue', which can be either
130 * PQ_ACTIVE or PQ_INACTIVE by doing a walkthrough. If the page is not
131 * marked dirty, it is shoved into the page cache, provided no one has
132 * currently aqcuired it, otherwise localized action per object type
133 * is taken for cleanup:
135 * In the OBJT_VNODE case, the whole page range is cleaned up
136 * using the vm_object_page_clean() routine, by specyfing a
137 * start and end of '0'.
139 * Otherwise if the object is of any other type, the generic
140 * pageout (daemon) flush routine is invoked.
143 vm_contig_pg_clean(int queue
, int count
)
147 struct vm_page marker
;
148 struct vpgqueues
*pq
= &vm_page_queues
[queue
];
151 * Setup a local marker
153 bzero(&marker
, sizeof(marker
));
154 marker
.flags
= PG_BUSY
| PG_FICTITIOUS
| PG_MARKER
;
155 marker
.queue
= queue
;
156 marker
.wire_count
= 1;
158 vm_page_queues_spin_lock(queue
);
159 TAILQ_INSERT_HEAD(&pq
->pl
, &marker
, pageq
);
160 vm_page_queues_spin_unlock(queue
);
163 * Iterate the queue. Note that the vm_page spinlock must be
164 * acquired before the pageq spinlock so it's easiest to simply
165 * not hold it in the loop iteration.
167 while (count
-- > 0 && (m
= TAILQ_NEXT(&marker
, pageq
)) != NULL
) {
168 vm_page_and_queue_spin_lock(m
);
169 if (m
!= TAILQ_NEXT(&marker
, pageq
)) {
170 vm_page_and_queue_spin_unlock(m
);
174 KKASSERT(m
->queue
== queue
);
176 TAILQ_REMOVE(&pq
->pl
, &marker
, pageq
);
177 TAILQ_INSERT_AFTER(&pq
->pl
, m
, &marker
, pageq
);
179 if (m
->flags
& PG_MARKER
) {
180 vm_page_and_queue_spin_unlock(m
);
183 if (vm_page_busy_try(m
, TRUE
)) {
184 vm_page_and_queue_spin_unlock(m
);
187 vm_page_and_queue_spin_unlock(m
);
190 * We've successfully busied the page
192 if (m
->queue
- m
->pc
!= queue
) {
196 if (m
->wire_count
|| m
->hold_count
) {
200 if ((object
= m
->object
) == NULL
) {
204 vm_page_test_dirty(m
);
205 if (m
->dirty
|| (m
->flags
& PG_NEED_COMMIT
)) {
206 vm_object_hold(object
);
207 KKASSERT(m
->object
== object
);
209 if (object
->type
== OBJT_VNODE
) {
211 vn_lock(object
->handle
, LK_EXCLUSIVE
|LK_RETRY
);
212 vm_object_page_clean(object
, 0, 0, OBJPC_SYNC
);
213 vn_unlock(((struct vnode
*)object
->handle
));
214 } else if (object
->type
== OBJT_SWAP
||
215 object
->type
== OBJT_DEFAULT
) {
217 vm_pageout_flush(&m_tmp
, 1, 0);
221 vm_object_drop(object
);
222 } else if (m
->hold_count
== 0) {
230 * Scrap our local marker
232 vm_page_queues_spin_lock(queue
);
233 TAILQ_REMOVE(&pq
->pl
, &marker
, pageq
);
234 vm_page_queues_spin_unlock(queue
);
238 * vm_contig_pg_alloc:
240 * Allocate contiguous pages from the VM. This function does not
241 * map the allocated pages into the kernel map, otherwise it is
242 * impossible to make large allocations (i.e. >2G).
244 * Malloc()'s data structures have been used for collection of
245 * statistics and for allocations of less than a page.
248 vm_contig_pg_alloc(unsigned long size
, vm_paddr_t low
, vm_paddr_t high
,
249 unsigned long alignment
, unsigned long boundary
, int mflags
)
251 int i
, q
, start
, pass
;
253 vm_page_t pga
= vm_page_array
;
257 size
= round_page(size
);
259 panic("vm_contig_pg_alloc: size must not be 0");
260 if ((alignment
& (alignment
- 1)) != 0)
261 panic("vm_contig_pg_alloc: alignment must be a power of 2");
262 if ((boundary
& (boundary
- 1)) != 0)
263 panic("vm_contig_pg_alloc: boundary must be a power of 2");
266 * See if we can get the pages from the contiguous page reserve
267 * alist. The returned pages will be allocated and wired but not
270 m
= vm_page_alloc_contig(
271 low
, high
, alignment
, boundary
, size
, VM_MEMATTR_DEFAULT
);
273 return (m
- &pga
[0]);
276 * Three passes (0, 1, 2). Each pass scans the VM page list for
277 * free or cached pages. After each pass if the entire scan failed
278 * we attempt to flush inactive pages and reset the start index back
279 * to 0. For passes 1 and 2 we also attempt to flush active pages.
282 for (pass
= 0; pass
< 3; pass
++) {
284 * Find first page in array that is free, within range,
285 * aligned, and such that the boundary won't be crossed.
288 for (i
= start
; i
< vmstats
.v_page_count
; i
++) {
290 phys
= VM_PAGE_TO_PHYS(m
);
291 pqtype
= m
->queue
- m
->pc
;
292 if (((pqtype
== PQ_FREE
) || (pqtype
== PQ_CACHE
)) &&
293 (phys
>= low
) && (phys
< high
) &&
294 ((phys
& (alignment
- 1)) == 0) &&
295 (((phys
^ (phys
+ size
- 1)) & ~(boundary
- 1)) == 0) &&
296 m
->busy
== 0 && m
->wire_count
== 0 &&
297 m
->hold_count
== 0 &&
298 (m
->flags
& (PG_BUSY
| PG_NEED_COMMIT
)) == 0)
305 * If we cannot find the page in the given range, or we have
306 * crossed the boundary, call the vm_contig_pg_clean() function
307 * for flushing out the queues, and returning it back to
310 if ((i
== vmstats
.v_page_count
) ||
311 ((VM_PAGE_TO_PHYS(&pga
[i
]) + size
) > high
)) {
314 * Best effort flush of all inactive pages.
315 * This is quite quick, for now stall all
316 * callers, even if they've specified M_NOWAIT.
318 for (q
= 0; q
< PQ_L2_SIZE
; ++q
) {
319 vm_contig_pg_clean(PQ_INACTIVE
+ q
,
320 vmstats
.v_inactive_count
);
325 * Best effort flush of active pages.
327 * This is very, very slow.
328 * Only do this if the caller has agreed to M_WAITOK.
330 * If enough pages are flushed, we may succeed on
331 * next (final) pass, if not the caller, contigmalloc(),
332 * will fail in the index < 0 case.
334 if (pass
> 0 && (mflags
& M_WAITOK
)) {
335 for (q
= 0; q
< PQ_L2_SIZE
; ++q
) {
336 vm_contig_pg_clean(PQ_ACTIVE
+ q
,
337 vmstats
.v_active_count
);
343 * We're already too high in the address space
344 * to succeed, reset to 0 for the next iteration.
347 continue; /* next pass */
352 * Check successive pages for contiguous and free.
354 * (still in critical section)
356 for (i
= start
+ 1; i
< (start
+ size
/ PAGE_SIZE
); i
++) {
358 pqtype
= m
->queue
- m
->pc
;
359 if ((VM_PAGE_TO_PHYS(&m
[0]) !=
360 (VM_PAGE_TO_PHYS(&m
[-1]) + PAGE_SIZE
)) ||
361 ((pqtype
!= PQ_FREE
) && (pqtype
!= PQ_CACHE
)) ||
362 m
->busy
|| m
->wire_count
||
364 (m
->flags
& (PG_BUSY
| PG_NEED_COMMIT
)))
372 * Try to allocate the pages, wiring them as we go.
374 * (still in critical section)
376 for (i
= start
; i
< (start
+ size
/ PAGE_SIZE
); i
++) {
379 if (vm_page_busy_try(m
, TRUE
)) {
380 vm_contig_pg_free(start
,
381 (i
- start
) * PAGE_SIZE
);
385 pqtype
= m
->queue
- m
->pc
;
386 if (pqtype
== PQ_CACHE
&&
387 m
->hold_count
== 0 &&
388 m
->wire_count
== 0 &&
389 (m
->flags
& (PG_UNMANAGED
| PG_NEED_COMMIT
)) == 0) {
390 vm_page_protect(m
, VM_PROT_NONE
);
391 KKASSERT((m
->flags
& PG_MAPPED
) == 0);
392 KKASSERT(m
->dirty
== 0);
395 continue; /* retry the page */
397 if (pqtype
!= PQ_FREE
|| m
->hold_count
) {
399 vm_contig_pg_free(start
,
400 (i
- start
) * PAGE_SIZE
);
404 KKASSERT((m
->valid
& m
->dirty
) == 0);
405 KKASSERT(m
->wire_count
== 0);
406 KKASSERT(m
->object
== NULL
);
407 vm_page_unqueue_nowakeup(m
);
408 m
->valid
= VM_PAGE_BITS_ALL
;
409 if (m
->flags
& PG_ZERO
)
410 vm_page_zero_count
--;
411 KASSERT(m
->dirty
== 0,
412 ("vm_contig_pg_alloc: page %p was dirty", m
));
413 KKASSERT(m
->wire_count
== 0);
414 KKASSERT(m
->busy
== 0);
417 * Clear all flags except PG_BUSY, PG_ZERO, and
418 * PG_WANTED, then unbusy the now allocated page.
420 vm_page_flag_clear(m
, ~(PG_BUSY
| PG_SBUSY
|
421 PG_ZERO
| PG_WANTED
));
427 * Our job is done, return the index page of vm_page_array.
429 return (start
); /* aka &pga[start] */
441 * Remove pages previously allocated by vm_contig_pg_alloc, and
442 * assume all references to the pages have been removed, and that
443 * it is OK to add them back to the free list.
445 * Caller must ensure no races on the page range in question.
446 * No other requirements.
449 vm_contig_pg_free(int start
, u_long size
)
451 vm_page_t pga
= vm_page_array
;
453 size
= round_page(size
);
455 panic("vm_contig_pg_free: size must not be 0");
458 * The pages are wired, vm_page_free_contig() determines whether they
459 * belong to the contig space or not and either frees them to that
460 * space (leaving them wired), or unwires the page and frees it to the
461 * normal PQ_FREE queue.
463 vm_page_free_contig(&pga
[start
], size
);
469 * Map previously allocated (vm_contig_pg_alloc) range of pages from
470 * vm_page_array[] into the KVA. Once mapped, the pages are part of
471 * the Kernel, and are to free'ed with kmem_free(&kernel_map, addr, size).
476 vm_contig_pg_kmap(int start
, u_long size
, vm_map_t map
, int flags
)
480 vm_page_t pga
= vm_page_array
;
484 panic("vm_contig_pg_kmap: size must not be 0");
485 size
= round_page(size
);
486 addr
= kmem_alloc_pageable(&kernel_map
, size
);
488 pa
= VM_PAGE_TO_PHYS(&pga
[start
]);
489 for (offset
= 0; offset
< size
; offset
+= PAGE_SIZE
)
490 pmap_kenter_quick(addr
+ offset
, pa
+ offset
);
493 bzero((void *)addr
, size
);
503 unsigned long size
, /* should be size_t here and for malloc() */
504 struct malloc_type
*type
,
508 unsigned long alignment
,
509 unsigned long boundary
)
511 return contigmalloc_map(size
, type
, flags
, low
, high
, alignment
,
512 boundary
, &kernel_map
);
519 contigmalloc_map(unsigned long size
, struct malloc_type
*type
,
520 int flags
, vm_paddr_t low
, vm_paddr_t high
,
521 unsigned long alignment
, unsigned long boundary
,
527 index
= vm_contig_pg_alloc(size
, low
, high
, alignment
, boundary
, flags
);
529 kprintf("contigmalloc_map: failed size %lu low=%llx "
530 "high=%llx align=%lu boundary=%lu flags=%08x\n",
531 size
, (long long)low
, (long long)high
,
532 alignment
, boundary
, flags
);
536 rv
= (void *)vm_contig_pg_kmap(index
, size
, map
, flags
);
538 vm_contig_pg_free(index
, size
);
547 contigfree(void *addr
, unsigned long size
, struct malloc_type
*type
)
553 panic("vm_contig_pg_kmap: size must not be 0");
554 size
= round_page(size
);
556 pa
= pmap_extract(&kernel_pmap
, (vm_offset_t
)addr
);
557 pmap_qremove((vm_offset_t
)addr
, size
/ PAGE_SIZE
);
558 kmem_free(&kernel_map
, (vm_offset_t
)addr
, size
);
560 m
= PHYS_TO_VM_PAGE(pa
);
561 vm_page_free_contig(m
, size
);
568 kmem_alloc_contig(vm_offset_t size
, vm_paddr_t low
, vm_paddr_t high
,
569 vm_offset_t alignment
)
571 return ((vm_offset_t
)contigmalloc_map(size
, M_DEVBUF
, M_NOWAIT
, low
,
572 high
, alignment
, 0ul, &kernel_map
));