2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 * may be used to endorse or promote products derived from this software
18 * without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
36 * All rights reserved.
38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 * Permission to use, copy, modify and distribute this software and
41 * its documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
57 * any improvements or extensions that they make and grant Carnegie the
58 * rights to redistribute these changes.
64 * Resident memory system definitions.
73 * Management of resident (logical) pages.
75 * A small structure is kept for each resident
76 * page, indexed by page number. Each structure
77 * is an element of several collections:
79 * A radix tree used to quickly
80 * perform object/offset lookups
82 * A list of all pages for a given object,
83 * so they can be quickly deactivated at
84 * time of deallocation.
86 * An ordered list of pages due for pageout.
88 * In addition, the structure contains the object
89 * and offset to which this page belongs (for pageout),
90 * and sundry status bits.
92 * In general, operations on this structure's mutable fields are
93 * synchronized using either one of or a combination of the lock on the
94 * object that the page belongs to (O), the pool lock for the page (P),
95 * or the lock for either the free or paging queue (Q). If a field is
96 * annotated below with two of these locks, then holding either lock is
97 * sufficient for read access, but both locks are required for write
100 * In contrast, the synchronization of accesses to the page's
101 * dirty field is machine dependent (M). In the
102 * machine-independent layer, the lock on the object that the
103 * page belongs to must be held in order to operate on the field.
104 * However, the pmap layer is permitted to set all bits within
105 * the field without holding that lock. If the underlying
106 * architecture does not support atomic read-modify-write
107 * operations on the field's type, then the machine-independent
108 * layer uses a 32-bit atomic on the aligned 32-bit word that
109 * contains the dirty field. In the machine-independent layer,
110 * the implementation of read-modify-write operations on the
111 * field is encapsulated in vm_page_clear_dirty_mask().
114 #if PAGE_SIZE == 4096
115 #define VM_PAGE_BITS_ALL 0xffu
116 typedef uint8_t vm_page_bits_t
;
117 #elif PAGE_SIZE == 8192
118 #define VM_PAGE_BITS_ALL 0xffffu
119 typedef uint16_t vm_page_bits_t
;
120 #elif PAGE_SIZE == 16384
121 #define VM_PAGE_BITS_ALL 0xffffffffu
122 typedef uint32_t vm_page_bits_t
;
123 #elif PAGE_SIZE == 32768
124 #define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
125 typedef uint64_t vm_page_bits_t
;
130 TAILQ_ENTRY(vm_page
) q
; /* page queue or free list (Q) */
132 SLIST_ENTRY(vm_page
) ss
; /* private slists */
140 TAILQ_ENTRY(vm_page
) listq
; /* pages in same object (O) */
141 vm_object_t object
; /* which object am I in (O,P) */
142 vm_pindex_t pindex
; /* offset into object (O,P) */
143 vm_paddr_t phys_addr
; /* physical address of page */
144 struct md_page md
; /* machine dependent stuff */
145 u_int wire_count
; /* wired down maps refs (P) */
146 volatile u_int busy_lock
; /* busy owners lock */
147 uint16_t hold_count
; /* page hold count (P) */
148 uint16_t flags
; /* page PG_* flags (P) */
149 uint8_t aflags
; /* access is atomic */
150 uint8_t oflags
; /* page VPO_* flags (O) */
151 uint8_t queue
; /* page queue index (P,Q) */
152 int8_t psind
; /* pagesizes[] index (O) */
154 uint8_t order
; /* index of the buddy queue */
156 u_char act_count
; /* page usage count (P) */
157 /* NOTE that these must support one bit per DEV_BSIZE in a page */
158 /* so, on normal X86 kernels, they must be at least 8 bits wide */
159 vm_page_bits_t valid
; /* map of valid DEV_BSIZE chunks (O) */
160 vm_page_bits_t dirty
; /* map of dirty DEV_BSIZE chunks (M) */
164 * Page flags stored in oflags:
166 * Access to these page flags is synchronized by the lock on the object
167 * containing the page (O).
169 * Note: VPO_UNMANAGED (used by OBJT_DEVICE, OBJT_PHYS and OBJT_SG)
170 * indicates that the page is not under PV management but
171 * otherwise should be treated as a normal page. Pages not
172 * under PV management cannot be paged out via the
173 * object/vm_page_t because there is no knowledge of their pte
174 * mappings, and such pages are also not on any PQ queue.
177 #define VPO_UNUSED01 0x01 /* --available-- */
178 #define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */
179 #define VPO_UNMANAGED 0x04 /* no PV management for page */
180 #define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
181 #define VPO_NOSYNC 0x10 /* do not collect for syncer */
184 * Busy page implementation details.
185 * The algorithm is taken mostly by rwlock(9) and sx(9) locks implementation,
186 * even if the support for owner identity is removed because of size
187 * constraints. Checks on lock recursion are then not possible, while the
188 * lock assertions effectiveness is someway reduced.
190 #define VPB_BIT_SHARED 0x01
191 #define VPB_BIT_EXCLUSIVE 0x02
192 #define VPB_BIT_WAITERS 0x04
193 #define VPB_BIT_FLAGMASK \
194 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
196 #define VPB_SHARERS_SHIFT 3
197 #define VPB_SHARERS(x) \
198 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
199 #define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
200 #define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT)
202 #define VPB_SINGLE_EXCLUSIVER VPB_BIT_EXCLUSIVE
204 #define VPB_UNBUSIED VPB_SHARERS_WORD(0)
207 #define PQ_INACTIVE 0
211 TAILQ_HEAD(pglist
, vm_page
);
212 SLIST_HEAD(spglist
, vm_page
);
214 struct vm_pagequeue
{
218 u_int
* const pq_vcnt
;
219 const char * const pq_name
;
220 } __aligned(CACHE_LINE_SIZE
);
224 struct vm_pagequeue vmd_pagequeues
[PQ_COUNT
];
225 u_int vmd_page_count
;
226 u_int vmd_free_count
;
227 long vmd_segs
; /* bitmask of the segments */
229 int vmd_pass
; /* local pagedaemon pass */
231 int vmd_last_active_scan
;
232 struct vm_page vmd_marker
; /* marker for pagedaemon private use */
233 struct vm_page vmd_inacthead
; /* marker for LRU-defeating insertions */
236 extern struct vm_domain vm_dom
[MAXMEMDOM
];
238 #define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
239 #define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
240 #define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
244 vm_pagequeue_cnt_add(struct vm_pagequeue
*pq
, int addend
)
248 vm_pagequeue_assert_locked(pq
);
250 pq
->pq_cnt
+= addend
;
251 atomic_add_int(pq
->pq_vcnt
, addend
);
253 #define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
254 #define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
257 extern struct mtx_padalign vm_page_queue_free_mtx
;
258 extern struct mtx_padalign pa_lock
[];
261 #define PDRSHIFT PDR_SHIFT
262 #elif !defined(PDRSHIFT)
266 #define pa_index(pa) ((pa) >> PDRSHIFT)
267 #define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
268 #define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
269 #define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
270 #define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
271 #define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
272 #define PA_UNLOCK_COND(pa) \
280 #define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
283 #define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
284 #define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
285 #define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
286 #else /* !KLD_MODULE */
287 #define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
288 #define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
289 #define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
290 #define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
292 #if defined(INVARIANTS)
293 #define vm_page_assert_locked(m) \
294 vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
295 #define vm_page_lock_assert(m, a) \
296 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
298 #define vm_page_assert_locked(m)
299 #define vm_page_lock_assert(m, a)
303 * The vm_page's aflags are updated using atomic operations. To set or clear
304 * these flags, the functions vm_page_aflag_set() and vm_page_aflag_clear()
305 * must be used. Neither these flags nor these functions are part of the KBI.
307 * PGA_REFERENCED may be cleared only if the page is locked. It is set by
308 * both the MI and MD VM layers. However, kernel loadable modules should not
309 * directly set this flag. They should call vm_page_reference() instead.
311 * PGA_WRITEABLE is set exclusively on managed pages by pmap_enter().
312 * When it does so, the object must be locked, or the page must be
313 * exclusive busied. The MI VM layer must never access this flag
314 * directly. Instead, it should call pmap_page_is_write_mapped().
316 * PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
317 * at least one executable mapping. It is not consumed by the MI VM layer.
319 #define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
320 #define PGA_REFERENCED 0x02 /* page has been referenced */
321 #define PGA_EXECUTABLE 0x04 /* page may be mapped executable */
324 * Page flags. If changed at any other time than page allocation or
325 * freeing, the modification must be protected by the vm_page lock.
327 #define PG_CACHED 0x0001 /* page is cached */
328 #define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
329 #define PG_ZERO 0x0008 /* page is zeroed */
330 #define PG_MARKER 0x0010 /* special queue marker page */
331 #define PG_WINATCFLS 0x0040 /* flush dirty page on inactive q */
332 #define PG_NODUMP 0x0080 /* don't include this page in a dump */
333 #define PG_UNHOLDFREE 0x0100 /* delayed free of a held page */
338 #define ACT_DECLINE 1
339 #define ACT_ADVANCE 3
345 #include <sys/systm.h>
347 #include <machine/atomic.h>
350 * Each pageable resident page falls into one of four lists:
353 * Available for allocation now.
356 * Almost available for allocation. Still associated with
357 * an object, but clean and immediately freeable.
359 * The following lists are LRU sorted:
362 * Low activity, candidates for reclamation.
363 * This is the list of pages that should be
367 * Pages that are "active" i.e. they have been
368 * recently referenced.
372 extern int vm_page_zero_count
;
374 extern vm_page_t vm_page_array
; /* First resident page in table */
375 extern long vm_page_array_size
; /* number of vm_page_t's */
376 extern long first_page
; /* first physical page number */
378 #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
381 * PHYS_TO_VM_PAGE() returns the vm_page_t object that represents a memory
382 * page to which the given physical address belongs. The correct vm_page_t
383 * object is returned for addresses that are not page-aligned.
385 vm_page_t
PHYS_TO_VM_PAGE(vm_paddr_t pa
);
388 * Page allocation parameters for vm_page for the functions
389 * vm_page_alloc(), vm_page_grab(), vm_page_alloc_contig() and
390 * vm_page_alloc_freelist(). Some functions support only a subset
391 * of the flags, and ignore others, see the flags legend.
393 * Bits 0 - 1 define class.
394 * Bits 2 - 15 dedicated for flags.
396 * (a) - vm_page_alloc() supports the flag.
397 * (c) - vm_page_alloc_contig() supports the flag.
398 * (f) - vm_page_alloc_freelist() supports the flag.
399 * (g) - vm_page_grab() supports the flag.
400 * Bits above 15 define the count of additional pages that the caller
401 * intends to allocate.
403 #define VM_ALLOC_NORMAL 0
404 #define VM_ALLOC_INTERRUPT 1
405 #define VM_ALLOC_SYSTEM 2
406 #define VM_ALLOC_CLASS_MASK 3
407 #define VM_ALLOC_WIRED 0x0020 /* (acfg) Allocate non pageable page */
408 #define VM_ALLOC_ZERO 0x0040 /* (acfg) Try to obtain a zeroed page */
409 #define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */
410 #define VM_ALLOC_NOBUSY 0x0200 /* (acg) Do not busy the page */
411 #define VM_ALLOC_IFCACHED 0x0400 /* (ag) Fail if page is not cached */
412 #define VM_ALLOC_IFNOTCACHED 0x0800 /* (ag) Fail if page is cached */
413 #define VM_ALLOC_IGN_SBUSY 0x1000 /* (g) Ignore shared busy flag */
414 #define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
415 #define VM_ALLOC_SBUSY 0x4000 /* (acg) Shared busy the page */
416 #define VM_ALLOC_NOWAIT 0x8000 /* (g) Do not sleep, return NULL */
417 #define VM_ALLOC_COUNT_SHIFT 16
418 #define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)
422 malloc2vm_flags(int malloc_flags
)
426 KASSERT((malloc_flags
& M_USE_RESERVE
) == 0 ||
427 (malloc_flags
& M_NOWAIT
) != 0,
428 ("M_USE_RESERVE requires M_NOWAIT"));
429 pflags
= (malloc_flags
& M_USE_RESERVE
) != 0 ? VM_ALLOC_INTERRUPT
:
431 if ((malloc_flags
& M_ZERO
) != 0)
432 pflags
|= VM_ALLOC_ZERO
;
433 if ((malloc_flags
& M_NODUMP
) != 0)
434 pflags
|= VM_ALLOC_NODUMP
;
439 void vm_page_busy_downgrade(vm_page_t m
);
440 void vm_page_busy_sleep(vm_page_t m
, const char *msg
);
441 void vm_page_flash(vm_page_t m
);
442 void vm_page_hold(vm_page_t mem
);
443 void vm_page_unhold(vm_page_t mem
);
444 void vm_page_free(vm_page_t m
);
445 void vm_page_free_zero(vm_page_t m
);
447 void vm_page_activate (vm_page_t
);
448 void vm_page_advise(vm_page_t m
, int advice
);
449 vm_page_t
vm_page_alloc (vm_object_t
, vm_pindex_t
, int);
450 vm_page_t
vm_page_alloc_contig(vm_object_t object
, vm_pindex_t pindex
, int req
,
451 u_long npages
, vm_paddr_t low
, vm_paddr_t high
, u_long alignment
,
452 vm_paddr_t boundary
, vm_memattr_t memattr
);
453 vm_page_t
vm_page_alloc_freelist(int, int);
454 vm_page_t
vm_page_grab (vm_object_t
, vm_pindex_t
, int);
455 void vm_page_cache(vm_page_t
);
456 void vm_page_cache_free(vm_object_t
, vm_pindex_t
, vm_pindex_t
);
457 void vm_page_cache_transfer(vm_object_t
, vm_pindex_t
, vm_object_t
);
458 int vm_page_try_to_cache (vm_page_t
);
459 int vm_page_try_to_free (vm_page_t
);
460 void vm_page_deactivate (vm_page_t
);
461 void vm_page_deactivate_noreuse(vm_page_t
);
462 void vm_page_dequeue(vm_page_t m
);
463 void vm_page_dequeue_locked(vm_page_t m
);
464 vm_page_t
vm_page_find_least(vm_object_t
, vm_pindex_t
);
465 vm_page_t
vm_page_getfake(vm_paddr_t paddr
, vm_memattr_t memattr
);
466 void vm_page_initfake(vm_page_t m
, vm_paddr_t paddr
, vm_memattr_t memattr
);
467 int vm_page_insert (vm_page_t
, vm_object_t
, vm_pindex_t
);
468 boolean_t
vm_page_is_cached(vm_object_t object
, vm_pindex_t pindex
);
469 vm_page_t
vm_page_lookup (vm_object_t
, vm_pindex_t
);
470 vm_page_t
vm_page_next(vm_page_t m
);
471 int vm_page_pa_tryrelock(pmap_t
, vm_paddr_t
, vm_paddr_t
*);
472 struct vm_pagequeue
*vm_page_pagequeue(vm_page_t m
);
473 vm_page_t
vm_page_prev(vm_page_t m
);
474 boolean_t
vm_page_ps_is_valid(vm_page_t m
);
475 void vm_page_putfake(vm_page_t m
);
476 void vm_page_readahead_finish(vm_page_t m
);
477 bool vm_page_reclaim_contig(int req
, u_long npages
, vm_paddr_t low
,
478 vm_paddr_t high
, u_long alignment
, vm_paddr_t boundary
);
479 void vm_page_reference(vm_page_t m
);
480 void vm_page_remove (vm_page_t
);
481 int vm_page_rename (vm_page_t
, vm_object_t
, vm_pindex_t
);
482 vm_page_t
vm_page_replace(vm_page_t mnew
, vm_object_t object
,
484 void vm_page_requeue(vm_page_t m
);
485 void vm_page_requeue_locked(vm_page_t m
);
486 int vm_page_sbusied(vm_page_t m
);
487 vm_page_t
vm_page_scan_contig(u_long npages
, vm_page_t m_start
,
488 vm_page_t m_end
, u_long alignment
, vm_paddr_t boundary
, int options
);
489 void vm_page_set_valid_range(vm_page_t m
, int base
, int size
);
490 int vm_page_sleep_if_busy(vm_page_t m
, const char *msg
);
491 vm_offset_t
vm_page_startup(vm_offset_t vaddr
);
492 void vm_page_sunbusy(vm_page_t m
);
493 int vm_page_trysbusy(vm_page_t m
);
494 void vm_page_unhold_pages(vm_page_t
*ma
, int count
);
495 boolean_t
vm_page_unwire(vm_page_t m
, uint8_t queue
);
496 void vm_page_updatefake(vm_page_t m
, vm_paddr_t paddr
, vm_memattr_t memattr
);
497 void vm_page_wire (vm_page_t
);
498 void vm_page_xunbusy_hard(vm_page_t m
);
499 void vm_page_set_validclean (vm_page_t
, int, int);
500 void vm_page_clear_dirty (vm_page_t
, int, int);
501 void vm_page_set_invalid (vm_page_t
, int, int);
502 int vm_page_is_valid (vm_page_t
, int, int);
503 void vm_page_test_dirty (vm_page_t
);
504 vm_page_bits_t
vm_page_bits(int base
, int size
);
505 void vm_page_zero_invalid(vm_page_t m
, boolean_t setvalid
);
506 void vm_page_free_toq(vm_page_t m
);
507 void vm_page_zero_idle_wakeup(void);
509 void vm_page_dirty_KBI(vm_page_t m
);
510 void vm_page_lock_KBI(vm_page_t m
, const char *file
, int line
);
511 void vm_page_unlock_KBI(vm_page_t m
, const char *file
, int line
);
512 int vm_page_trylock_KBI(vm_page_t m
, const char *file
, int line
);
513 #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
514 void vm_page_assert_locked_KBI(vm_page_t m
, const char *file
, int line
);
515 void vm_page_lock_assert_KBI(vm_page_t m
, int a
, const char *file
, int line
);
518 #define vm_page_assert_sbusied(m) \
519 KASSERT(vm_page_sbusied(m), \
520 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
521 (m), __FILE__, __LINE__))
523 #define vm_page_assert_unbusied(m) \
524 KASSERT(!vm_page_busied(m), \
525 ("vm_page_assert_unbusied: page %p busy @ %s:%d", \
526 (m), __FILE__, __LINE__))
528 #define vm_page_assert_xbusied(m) \
529 KASSERT(vm_page_xbusied(m), \
530 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
531 (m), __FILE__, __LINE__))
533 #define vm_page_busied(m) \
534 ((m)->busy_lock != VPB_UNBUSIED)
536 #define vm_page_sbusy(m) do { \
537 if (!vm_page_trysbusy(m)) \
538 panic("%s: page %p failed shared busying", __func__, \
542 #define vm_page_tryxbusy(m) \
543 (atomic_cmpset_acq_int(&(m)->busy_lock, VPB_UNBUSIED, \
544 VPB_SINGLE_EXCLUSIVER))
546 #define vm_page_xbusied(m) \
547 (((m)->busy_lock & VPB_SINGLE_EXCLUSIVER) != 0)
549 #define vm_page_xbusy(m) do { \
550 if (!vm_page_tryxbusy(m)) \
551 panic("%s: page %p failed exclusive busying", __func__, \
555 /* Note: page m's lock must not be owned by the caller. */
556 #define vm_page_xunbusy(m) do { \
557 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
558 VPB_SINGLE_EXCLUSIVER, VPB_UNBUSIED)) \
559 vm_page_xunbusy_hard(m); \
563 void vm_page_object_lock_assert(vm_page_t m
);
564 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) vm_page_object_lock_assert(m)
565 void vm_page_assert_pga_writeable(vm_page_t m
, uint8_t bits
);
566 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
567 vm_page_assert_pga_writeable(m, bits)
569 #define VM_PAGE_OBJECT_LOCK_ASSERT(m) (void)0
570 #define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
574 * We want to use atomic updates for the aflags field, which is 8 bits wide.
575 * However, not all architectures support atomic operations on 8-bit
576 * destinations. In order that we can easily use a 32-bit operation, we
577 * require that the aflags field be 32-bit aligned.
579 CTASSERT(offsetof(struct vm_page
, aflags
) % sizeof(uint32_t) == 0);
582 * Clear the given bits in the specified page.
585 vm_page_aflag_clear(vm_page_t m
, uint8_t bits
)
590 * The PGA_REFERENCED flag can only be cleared if the page is locked.
592 if ((bits
& PGA_REFERENCED
) != 0)
593 vm_page_assert_locked(m
);
596 * Access the whole 32-bit word containing the aflags field with an
597 * atomic update. Parallel non-atomic updates to the other fields
598 * within this word are handled properly by the atomic update.
600 addr
= (void *)&m
->aflags
;
601 KASSERT(((uintptr_t)addr
& (sizeof(uint32_t) - 1)) == 0,
602 ("vm_page_aflag_clear: aflags is misaligned"));
604 #if BYTE_ORDER == BIG_ENDIAN
607 atomic_clear_32(addr
, val
);
611 * Set the given bits in the specified page.
614 vm_page_aflag_set(vm_page_t m
, uint8_t bits
)
618 VM_PAGE_ASSERT_PGA_WRITEABLE(m
, bits
);
621 * Access the whole 32-bit word containing the aflags field with an
622 * atomic update. Parallel non-atomic updates to the other fields
623 * within this word are handled properly by the atomic update.
625 addr
= (void *)&m
->aflags
;
626 KASSERT(((uintptr_t)addr
& (sizeof(uint32_t) - 1)) == 0,
627 ("vm_page_aflag_set: aflags is misaligned"));
629 #if BYTE_ORDER == BIG_ENDIAN
632 atomic_set_32(addr
, val
);
638 * Set all bits in the page's dirty field.
640 * The object containing the specified page must be locked if the
641 * call is made from the machine-independent layer.
643 * See vm_page_clear_dirty_mask().
646 vm_page_dirty(vm_page_t m
)
649 /* Use vm_page_dirty_KBI() under INVARIANTS to save memory. */
650 #if defined(KLD_MODULE) || defined(INVARIANTS)
651 vm_page_dirty_KBI(m
);
653 m
->dirty
= VM_PAGE_BITS_ALL
;
660 * If the given page is in a page queue, then remove it from that page
663 * The page must be locked.
666 vm_page_remque(vm_page_t m
)
669 if (m
->queue
!= PQ_NONE
)
676 * Set page to not be dirty. Note: does not clear pmap modify bits
679 vm_page_undirty(vm_page_t m
)
682 VM_PAGE_OBJECT_LOCK_ASSERT(m
);
687 vm_page_replace_checked(vm_page_t mnew
, vm_object_t object
, vm_pindex_t pindex
,
692 mret
= vm_page_replace(mnew
, object
, pindex
);
693 KASSERT(mret
== mold
,
694 ("invalid page replacement, mold=%p, mret=%p", mold
, mret
));
696 /* Unused if !INVARIANTS. */
702 #endif /* !_VM_PAGE_ */