arcmsr(4): Use MSI if it is supported by the device.
[dragonfly.git] / sys / vm / vm_page.h
blob2c2fe776725f98d0b204795af010bcbe2e9ee0d6
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $
68 * Resident memory system definitions.
71 #ifndef _VM_VM_PAGE_H_
72 #define _VM_VM_PAGE_H_
74 #ifndef _SYS_TYPES_H_
75 #include <sys/types.h>
76 #endif
77 #ifndef _SYS_TREE_H_
78 #include <sys/tree.h>
79 #endif
80 #ifndef _MACHINE_PMAP_H_
81 #include <machine/pmap.h>
82 #endif
83 #ifndef _VM_PMAP_H_
84 #include <vm/pmap.h>
85 #endif
86 #include <machine/atomic.h>
88 #ifdef _KERNEL
90 #ifndef _SYS_SYSTM_H_
91 #include <sys/systm.h>
92 #endif
93 #ifndef _SYS_THREAD2_H_
94 #include <sys/thread2.h>
95 #endif
97 #ifdef __x86_64__
98 #include <machine/vmparam.h>
99 #endif
101 #endif
103 typedef enum vm_page_event { VMEVENT_NONE, VMEVENT_COW } vm_page_event_t;
105 struct vm_page_action {
106 LIST_ENTRY(vm_page_action) entry;
107 struct vm_page *m;
108 vm_page_event_t event;
109 void (*func)(struct vm_page *,
110 struct vm_page_action *);
111 void *data;
114 typedef struct vm_page_action *vm_page_action_t;
117 * Management of resident (logical) pages.
119 * A small structure is kept for each resident
120 * page, indexed by page number. Each structure
121 * is an element of several lists:
123 * A hash table bucket used to quickly
124 * perform object/offset lookups
126 * A list of all pages for a given object,
127 * so they can be quickly deactivated at
128 * time of deallocation.
130 * An ordered list of pages due for pageout.
132 * In addition, the structure contains the object
133 * and offset to which this page belongs (for pageout),
134 * and sundry status bits.
136 * Fields in this structure are locked either by the lock on the
137 * object that the page belongs to (O) or by the lock on the page
138 * queues (P).
140 * The 'valid' and 'dirty' fields are distinct. A page may have dirty
141 * bits set without having associated valid bits set. This is used by
142 * NFS to implement piecemeal writes.
145 TAILQ_HEAD(pglist, vm_page);
147 struct vm_object;
149 int rb_vm_page_compare(struct vm_page *, struct vm_page *);
151 struct vm_page_rb_tree;
152 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry, rb_vm_page_compare, vm_pindex_t);
154 struct vm_page {
155 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */
156 RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */
158 struct vm_object *object; /* which object am I in (O,P)*/
159 vm_pindex_t pindex; /* offset into object (O,P) */
160 vm_paddr_t phys_addr; /* physical address of page */
161 struct md_page md; /* machine dependant stuff */
162 u_short queue; /* page queue index */
163 u_short pc; /* page color */
164 u_char act_count; /* page usage count */
165 u_char busy; /* page busy count */
166 u_char unused01;
167 u_char unused02;
168 u_int32_t flags; /* see below */
169 u_int wire_count; /* wired down maps refs (P) */
170 int hold_count; /* page hold count */
173 * NOTE that these must support one bit per DEV_BSIZE in a page!!!
174 * so, on normal X86 kernels, they must be at least 8 bits wide.
176 u_char valid; /* map of valid DEV_BSIZE chunks */
177 u_char dirty; /* map of dirty DEV_BSIZE chunks */
179 int ku_pagecnt; /* kmalloc helper */
180 #ifdef VM_PAGE_DEBUG
181 const char *busy_func;
182 int busy_line;
183 #endif
186 #ifdef VM_PAGE_DEBUG
187 #define VM_PAGE_DEBUG_EXT(name) name ## _debug
188 #define VM_PAGE_DEBUG_ARGS , const char *func, int lineno
189 #else
190 #define VM_PAGE_DEBUG_EXT(name) name
191 #define VM_PAGE_DEBUG_ARGS
192 #endif
194 #ifndef __VM_PAGE_T_DEFINED__
195 #define __VM_PAGE_T_DEFINED__
196 typedef struct vm_page *vm_page_t;
197 #endif
200 * Page coloring parameters. We use generous parameters designed to
201 * statistically spread pages over available cpu cache space. This has
202 * become less important over time as cache associativity is higher
203 * in modern times but we still use the core algorithm to help reduce
204 * lock contention between cpus.
206 * Page coloring cannot be disabled.
209 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
210 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
211 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */
213 #if 0
214 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
215 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
216 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */
218 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */
219 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */
220 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */
222 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
223 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */
224 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */
226 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */
227 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */
228 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */
229 #endif
231 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
233 #define PQ_NONE 0
234 #define PQ_FREE (1 + 0*PQ_L2_SIZE)
235 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
236 #define PQ_ACTIVE (1 + 2*PQ_L2_SIZE)
237 #define PQ_CACHE (1 + 3*PQ_L2_SIZE)
238 #define PQ_HOLD (1 + 4*PQ_L2_SIZE)
239 #define PQ_COUNT (1 + 5*PQ_L2_SIZE)
242 * Scan support
244 struct vm_map;
246 struct rb_vm_page_scan_info {
247 vm_pindex_t start_pindex;
248 vm_pindex_t end_pindex;
249 int limit;
250 int desired;
251 int error;
252 int pagerflags;
253 vm_offset_t addr;
254 vm_pindex_t backing_offset_index;
255 struct vm_object *object;
256 struct vm_object *backing_object;
257 struct vm_page *mpte;
258 struct pmap *pmap;
259 struct vm_map *map;
262 int rb_vm_page_scancmp(struct vm_page *, void *);
264 struct vpgqueues {
265 struct pglist pl;
266 int *cnt;
267 int lcnt;
268 int flipflop; /* probably not the best place */
269 struct spinlock spin;
270 char unused[64 - sizeof(struct pglist) -
271 sizeof(int *) - sizeof(int) * 2];
274 extern struct vpgqueues vm_page_queues[PQ_COUNT];
277 * These are the flags defined for vm_page.
279 * PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
280 * not under PV management but otherwise should be treated as a
281 * normal page. Pages not under PV management cannot be paged out
282 * via the object/vm_page_t because there is no knowledge of their
283 * pte mappings, nor can they be removed from their objects via
284 * the object, and such pages are also not on any PQ queue. The
285 * PG_MAPPED and PG_WRITEABLE flags are not applicable.
287 * PG_MAPPED only applies to managed pages, indicating whether the page
288 * is mapped onto one or more pmaps. A page might still be mapped to
289 * special pmaps in an unmanaged fashion, for example when mapped into a
290 * buffer cache buffer, without setting PG_MAPPED.
292 * PG_WRITEABLE indicates that there may be a writeable managed pmap entry
293 * somewhere, and that the page can be dirtied by hardware at any time
294 * and may have to be tested for that. The modified bit in unmanaged
295 * mappings or in the special clean map is not tested.
297 * PG_SWAPPED indicates that the page is backed by a swap block. Any
298 * VM object type other than OBJT_DEFAULT can have swap-backed pages now.
300 * PG_SBUSY is set when m->busy != 0. PG_SBUSY and m->busy are only modified
301 * when the page is PG_BUSY.
303 #define PG_BUSY 0x00000001 /* page is in transit (O) */
304 #define PG_WANTED 0x00000002 /* someone is waiting for page (O) */
305 #define PG_WINATCFLS 0x00000004 /* flush dirty page on inactive q */
306 #define PG_FICTITIOUS 0x00000008 /* physical page doesn't exist (O) */
307 #define PG_WRITEABLE 0x00000010 /* page is writeable */
308 #define PG_MAPPED 0x00000020 /* page is mapped (managed) */
309 #define PG_ZERO 0x00000040 /* page is zeroed */
310 #define PG_REFERENCED 0x00000080 /* page has been referenced */
311 #define PG_CLEANCHK 0x00000100 /* page will be checked for cleaning */
312 #define PG_SWAPINPROG 0x00000200 /* swap I/O in progress on page */
313 #define PG_NOSYNC 0x00000400 /* do not collect for syncer */
314 #define PG_UNMANAGED 0x00000800 /* No PV management for page */
315 #define PG_MARKER 0x00001000 /* special queue marker page */
316 #define PG_RAM 0x00002000 /* read ahead mark */
317 #define PG_SWAPPED 0x00004000 /* backed by swap */
318 #define PG_NOTMETA 0x00008000 /* do not back with swap */
319 #define PG_ACTIONLIST 0x00010000 /* lookaside action list present */
320 #define PG_SBUSY 0x00020000 /* soft-busy also set */
321 #define PG_NEED_COMMIT 0x00040000 /* clean page requires commit */
324 * Misc constants.
327 #define ACT_DECLINE 1
328 #define ACT_ADVANCE 3
329 #define ACT_INIT 5
330 #define ACT_MAX 64
332 #ifdef _KERNEL
334 * Each pageable resident page falls into one of four lists:
336 * free
337 * Available for allocation now.
339 * The following are all LRU sorted:
341 * cache
342 * Almost available for allocation. Still in an
343 * object, but clean and immediately freeable at
344 * non-interrupt times.
346 * inactive
347 * Low activity, candidates for reclamation.
348 * This is the list of pages that should be
349 * paged out next.
351 * active
352 * Pages that are "active" i.e. they have been
353 * recently referenced.
355 * zero
356 * Pages that are really free and have been pre-zeroed
360 extern int vm_page_zero_count;
361 extern struct vm_page *vm_page_array; /* First resident page in table */
362 extern int vm_page_array_size; /* number of vm_page_t's */
363 extern long first_page; /* first physical page number */
365 #define VM_PAGE_TO_PHYS(entry) \
366 ((entry)->phys_addr)
368 #define PHYS_TO_VM_PAGE(pa) \
369 (&vm_page_array[atop(pa) - first_page])
372 * Functions implemented as macros
375 static __inline void
376 vm_page_flag_set(vm_page_t m, unsigned int bits)
378 atomic_set_int(&(m)->flags, bits);
381 static __inline void
382 vm_page_flag_clear(vm_page_t m, unsigned int bits)
384 atomic_clear_int(&(m)->flags, bits);
388 * Wakeup anyone waiting for the page after potentially unbusying
389 * (hard or soft) or doing other work on a page that might make a
390 * waiter ready. The setting of PG_WANTED is integrated into the
391 * related flags and it can't be set once the flags are already
392 * clear, so there should be no races here.
395 static __inline void
396 vm_page_flash(vm_page_t m)
398 if (m->flags & PG_WANTED) {
399 vm_page_flag_clear(m, PG_WANTED);
400 wakeup(m);
404 #if PAGE_SIZE == 4096
405 #define VM_PAGE_BITS_ALL 0xff
406 #endif
409 * Note: the code will always use nominally free pages from the free list
410 * before trying other flag-specified sources.
412 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
413 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
414 * is also specified.
416 #define VM_ALLOC_NORMAL 0x0001 /* ok to use cache pages */
417 #define VM_ALLOC_SYSTEM 0x0002 /* ok to exhaust most of free list */
418 #define VM_ALLOC_INTERRUPT 0x0004 /* ok to exhaust entire free list */
419 #define VM_ALLOC_ZERO 0x0008 /* req pre-zero'd memory if avail */
420 #define VM_ALLOC_QUICK 0x0010 /* like NORMAL but do not use cache */
421 #define VM_ALLOC_FORCE_ZERO 0x0020 /* zero page even if already valid */
422 #define VM_ALLOC_NULL_OK 0x0040 /* ok to return NULL on collision */
423 #define VM_ALLOC_RETRY 0x0080 /* indefinite block (vm_page_grab()) */
424 #define VM_ALLOC_USE_GD 0x0100 /* use per-gd cache */
426 void vm_page_queue_spin_lock(vm_page_t);
427 void vm_page_queues_spin_lock(u_short);
428 void vm_page_and_queue_spin_lock(vm_page_t);
430 void vm_page_queue_spin_unlock(vm_page_t);
431 void vm_page_queues_spin_unlock(u_short);
432 void vm_page_and_queue_spin_unlock(vm_page_t m);
434 void vm_page_io_finish(vm_page_t m);
435 void vm_page_io_start(vm_page_t m);
436 void vm_page_need_commit(vm_page_t m);
437 void vm_page_clear_commit(vm_page_t m);
438 void vm_page_wakeup(vm_page_t m);
439 void vm_page_hold(vm_page_t);
440 void vm_page_unhold(vm_page_t);
441 void vm_page_activate (vm_page_t);
442 void vm_page_pcpu_cache(void);
443 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
444 vm_page_t vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
445 unsigned long alignment, unsigned long boundary,
446 unsigned long size);
447 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
448 void vm_page_cache (vm_page_t);
449 int vm_page_try_to_cache (vm_page_t);
450 int vm_page_try_to_free (vm_page_t);
451 void vm_page_dontneed (vm_page_t);
452 void vm_page_deactivate (vm_page_t);
453 void vm_page_deactivate_locked (vm_page_t);
454 int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
455 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
456 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(
457 struct vm_object *, vm_pindex_t, int, const char *
458 VM_PAGE_DEBUG_ARGS);
459 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(
460 struct vm_object *, vm_pindex_t, int, int *
461 VM_PAGE_DEBUG_ARGS);
462 void vm_page_remove (vm_page_t);
463 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
464 void vm_page_startup (void);
465 void vm_page_unmanage (vm_page_t);
466 void vm_page_unwire (vm_page_t, int);
467 void vm_page_wire (vm_page_t);
468 void vm_page_unqueue (vm_page_t);
469 void vm_page_unqueue_nowakeup (vm_page_t);
470 vm_page_t vm_page_next (vm_page_t);
471 void vm_page_set_validclean (vm_page_t, int, int);
472 void vm_page_set_validdirty (vm_page_t, int, int);
473 void vm_page_set_valid (vm_page_t, int, int);
474 void vm_page_set_dirty (vm_page_t, int, int);
475 void vm_page_clear_dirty (vm_page_t, int, int);
476 void vm_page_set_invalid (vm_page_t, int, int);
477 int vm_page_is_valid (vm_page_t, int, int);
478 void vm_page_test_dirty (vm_page_t);
479 int vm_page_bits (int, int);
480 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero);
481 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
482 void vm_page_free_toq(vm_page_t m);
483 void vm_page_free_contig(vm_page_t m, unsigned long size);
484 vm_page_t vm_page_free_fromq_fast(void);
485 void vm_page_event_internal(vm_page_t, vm_page_event_t);
486 void vm_page_dirty(vm_page_t m);
487 void vm_page_register_action(vm_page_action_t action, vm_page_event_t event);
488 void vm_page_unregister_action(vm_page_action_t action);
489 void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg);
490 void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m, int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS);
491 int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m, int also_m_busy VM_PAGE_DEBUG_ARGS);
493 #ifdef VM_PAGE_DEBUG
495 #define vm_page_lookup_busy_wait(object, pindex, alsob, msg) \
496 vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg, \
497 __func__, __LINE__)
499 #define vm_page_lookup_busy_try(object, pindex, alsob, errorp) \
500 vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp, \
501 __func__, __LINE__)
503 #define vm_page_busy_wait(m, alsob, msg) \
504 vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__)
506 #define vm_page_busy_try(m, alsob) \
507 vm_page_busy_try_debug(m, alsob, __func__, __LINE__)
509 #endif
512 * Reduce the protection of a page. This routine never raises the
513 * protection and therefore can be safely called if the page is already
514 * at VM_PROT_NONE (it will be a NOP effectively ).
516 * VM_PROT_NONE will remove all user mappings of a page. This is often
517 * necessary when a page changes state (for example, turns into a copy-on-write
518 * page or needs to be frozen for write I/O) in order to force a fault, or
519 * to force a page's dirty bits to be synchronized and avoid hardware
520 * (modified/accessed) bit update races with pmap changes.
522 * Since 'prot' is usually a constant, this inline usually winds up optimizing
523 * out the primary conditional.
525 * WARNING: VM_PROT_NONE can block, but will loop until all mappings have
526 * been cleared. Callers should be aware that other page related elements
527 * might have changed, however.
529 static __inline void
530 vm_page_protect(vm_page_t m, int prot)
532 KKASSERT(m->flags & PG_BUSY);
533 if (prot == VM_PROT_NONE) {
534 if (m->flags & (PG_WRITEABLE|PG_MAPPED)) {
535 pmap_page_protect(m, VM_PROT_NONE);
536 /* PG_WRITEABLE & PG_MAPPED cleared by call */
538 } else if ((prot == VM_PROT_READ) && (m->flags & PG_WRITEABLE)) {
539 pmap_page_protect(m, VM_PROT_READ);
540 /* PG_WRITEABLE cleared by call */
545 * Zero-fill the specified page. The entire contents of the page will be
546 * zero'd out.
548 static __inline boolean_t
549 vm_page_zero_fill(vm_page_t m)
551 pmap_zero_page(VM_PAGE_TO_PHYS(m));
552 return (TRUE);
556 * Copy the contents of src_m to dest_m. The pages must be stable but spl
557 * and other protections depend on context.
559 static __inline void
560 vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
562 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
563 dest_m->valid = VM_PAGE_BITS_ALL;
564 dest_m->dirty = VM_PAGE_BITS_ALL;
568 * Free a page. The page must be marked BUSY.
570 * Always clear PG_ZERO when freeing a page, which ensures the flag is not
571 * set unless we are absolutely certain the page is zerod. This is
572 * particularly important when the vm_page_alloc*() code moves pages from
573 * PQ_CACHE to PQ_FREE.
575 static __inline void
576 vm_page_free(vm_page_t m)
578 vm_page_flag_clear(m, PG_ZERO);
579 vm_page_free_toq(m);
583 * Free a page to the zerod-pages queue. The caller must ensure that the
584 * page has been zerod.
586 static __inline void
587 vm_page_free_zero(vm_page_t m)
589 #ifdef PMAP_DEBUG
590 #ifdef PHYS_TO_DMAP
591 char *p = (char *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
592 int i;
594 for (i = 0; i < PAGE_SIZE; i++) {
595 if (p[i] != 0) {
596 panic("non-zero page in vm_page_free_zero()");
599 #endif
600 #endif
601 vm_page_flag_set(m, PG_ZERO);
602 vm_page_free_toq(m);
606 * Set page to not be dirty. Note: does not clear pmap modify bits .
608 static __inline void
609 vm_page_undirty(vm_page_t m)
611 m->dirty = 0;
614 #endif /* _KERNEL */
615 #endif /* !_VM_VM_PAGE_H_ */