Fix malloc->kmalloc leftover to fix kernel without VGA_NO_MODE_CHANGE
[dragonfly.git] / sys / vm / vm_page.h
blob83cf5f19a87d8bcf45a78073f157b2987dc43b7c
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by the University of
19 * California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
64 * $FreeBSD: src/sys/vm/vm_page.h,v 1.75.2.8 2002/03/06 01:07:09 dillon Exp $
65 * $DragonFly: src/sys/vm/vm_page.h,v 1.24 2006/05/21 03:43:47 dillon Exp $
69 * Resident memory system definitions.
72 #ifndef _VM_VM_PAGE_H_
73 #define _VM_VM_PAGE_H_
75 #if !defined(KLD_MODULE) && defined(_KERNEL)
76 #include "opt_vmpage.h"
77 #endif
79 #ifndef _SYS_TYPES_H_
80 #include <sys/types.h>
81 #endif
82 #ifndef _MACHINE_PMAP_H_
83 #include <machine/pmap.h>
84 #endif
85 #ifndef _VM_PMAP_H_
86 #include <vm/pmap.h>
87 #endif
88 #ifndef _MACHINE_ATOMIC_H_
89 #include <machine/atomic.h>
90 #endif
92 #ifdef _KERNEL
94 #ifndef _SYS_SYSTM_H_
95 #include <sys/systm.h>
96 #endif
97 #ifndef _SYS_THREAD2_H_
98 #include <sys/thread2.h>
99 #endif
101 #endif
104 * Management of resident (logical) pages.
106 * A small structure is kept for each resident
107 * page, indexed by page number. Each structure
108 * is an element of several lists:
110 * A hash table bucket used to quickly
111 * perform object/offset lookups
113 * A list of all pages for a given object,
114 * so they can be quickly deactivated at
115 * time of deallocation.
117 * An ordered list of pages due for pageout.
119 * In addition, the structure contains the object
120 * and offset to which this page belongs (for pageout),
121 * and sundry status bits.
123 * Fields in this structure are locked either by the lock on the
124 * object that the page belongs to (O) or by the lock on the page
125 * queues (P).
127 * The 'valid' and 'dirty' fields are distinct. A page may have dirty
128 * bits set without having associated valid bits set. This is used by
129 * NFS to implement piecemeal writes.
132 TAILQ_HEAD(pglist, vm_page);
134 struct msf_buf;
135 struct vm_object;
137 struct vm_page {
138 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */
139 struct vm_page *hnext; /* hash table link (O,P) */
140 TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */
142 struct vm_object *object; /* which object am I in (O,P)*/
143 vm_pindex_t pindex; /* offset into object (O,P) */
144 vm_paddr_t phys_addr; /* physical address of page */
145 struct md_page md; /* machine dependant stuff */
146 u_short queue; /* page queue index */
147 u_short flags; /* see below */
148 u_short pc; /* page color */
149 u_short wire_count; /* wired down maps refs (P) */
150 short hold_count; /* page hold count */
151 u_char act_count; /* page usage count */
152 u_char busy; /* page busy count */
155 * NOTE that these must support one bit per DEV_BSIZE in a page!!!
156 * so, on normal X86 kernels, they must be at least 8 bits wide.
158 #if PAGE_SIZE == 4096
159 u_char valid; /* map of valid DEV_BSIZE chunks */
160 u_char dirty; /* map of dirty DEV_BSIZE chunks */
161 u_char unused1;
162 u_char unused2;
163 #elif PAGE_SIZE == 8192
164 u_short valid; /* map of valid DEV_BSIZE chunks */
165 u_short dirty; /* map of dirty DEV_BSIZE chunks */
166 #endif
167 struct msf_buf *msf_hint; /* first page of an msfbuf map */
170 #ifndef __VM_PAGE_T_DEFINED__
171 #define __VM_PAGE_T_DEFINED__
172 typedef struct vm_page *vm_page_t;
173 #endif
176 * note: currently use SWAPBLK_NONE as an absolute value rather then
177 * a flag bit.
179 #define SWAPBLK_MASK ((daddr_t)((u_daddr_t)-1 >> 1)) /* mask */
180 #define SWAPBLK_NONE ((daddr_t)((u_daddr_t)SWAPBLK_MASK + 1))/* flag */
183 * Page coloring parameters. We default to a middle of the road optimization.
184 * Larger selections would not really hurt us but if a machine does not have
185 * a lot of memory it could cause vm_page_alloc() to eat more cpu cycles
186 * looking for free pages.
188 * Page coloring cannot be disabled. Modules do not have access to most PQ
189 * constants because they can change between builds.
191 #if defined(_KERNEL) && !defined(KLD_MODULE)
193 #if !defined(PQ_CACHESIZE)
194 #define PQ_CACHESIZE 256 /* max is 1024 (MB) */
195 #endif
197 #if PQ_CACHESIZE >= 1024
198 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
199 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
200 #define PQ_L2_SIZE 256 /* A number of colors opt for 1M cache */
202 #elif PQ_CACHESIZE >= 512
203 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
204 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
205 #define PQ_L2_SIZE 128 /* A number of colors opt for 512K cache */
207 #elif PQ_CACHESIZE >= 256
208 #define PQ_PRIME1 13 /* Prime number somewhat less than PQ_HASH_SIZE */
209 #define PQ_PRIME2 7 /* Prime number somewhat less than PQ_HASH_SIZE */
210 #define PQ_L2_SIZE 64 /* A number of colors opt for 256K cache */
212 #elif PQ_CACHESIZE >= 128
213 #define PQ_PRIME1 9 /* Produces a good PQ_L2_SIZE/3 + PQ_PRIME1 */
214 #define PQ_PRIME2 5 /* Prime number somewhat less than PQ_HASH_SIZE */
215 #define PQ_L2_SIZE 32 /* A number of colors opt for 128k cache */
217 #else
218 #define PQ_PRIME1 5 /* Prime number somewhat less than PQ_HASH_SIZE */
219 #define PQ_PRIME2 3 /* Prime number somewhat less than PQ_HASH_SIZE */
220 #define PQ_L2_SIZE 16 /* A reasonable number of colors (opt for 64K cache) */
222 #endif
224 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
226 #endif /* KERNEL && !KLD_MODULE */
230 * The queue array is always based on PQ_MAXL2_SIZE regardless of the actual
231 * cache size chosen in order to present a uniform interface for modules.
233 #define PQ_MAXL2_SIZE 256 /* fixed maximum (in pages) / module compat */
235 #if PQ_L2_SIZE > PQ_MAXL2_SIZE
236 #error "Illegal PQ_L2_SIZE"
237 #endif
239 #define PQ_NONE 0
240 #define PQ_FREE 1
241 #define PQ_INACTIVE (1 + 1*PQ_MAXL2_SIZE)
242 #define PQ_ACTIVE (2 + 1*PQ_MAXL2_SIZE)
243 #define PQ_CACHE (3 + 1*PQ_MAXL2_SIZE)
244 #define PQ_HOLD (3 + 2*PQ_MAXL2_SIZE)
245 #define PQ_COUNT (4 + 2*PQ_MAXL2_SIZE)
247 struct vpgqueues {
248 struct pglist pl;
249 int *cnt;
250 int lcnt;
251 int flipflop; /* probably not the best place */
254 extern struct vpgqueues vm_page_queues[PQ_COUNT];
257 * These are the flags defined for vm_page.
259 * Note: PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
260 * not under PV management but otherwise should be treated as a
261 * normal page. Pages not under PV management cannot be paged out
262 * via the object/vm_page_t because there is no knowledge of their
263 * pte mappings, nor can they be removed from their objects via
264 * the object, and such pages are also not on any PQ queue.
266 #define PG_BUSY 0x0001 /* page is in transit (O) */
267 #define PG_WANTED 0x0002 /* someone is waiting for page (O) */
268 #define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */
269 #define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */
270 #define PG_WRITEABLE 0x0010 /* page is mapped writeable */
271 #define PG_MAPPED 0x0020 /* page is mapped */
272 #define PG_ZERO 0x0040 /* page is zeroed */
273 #define PG_REFERENCED 0x0080 /* page has been referenced */
274 #define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */
275 #define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */
276 #define PG_NOSYNC 0x0400 /* do not collect for syncer */
277 #define PG_UNMANAGED 0x0800 /* No PV management for page */
278 #define PG_MARKER 0x1000 /* special queue marker page */
281 * Misc constants.
284 #define ACT_DECLINE 1
285 #define ACT_ADVANCE 3
286 #define ACT_INIT 5
287 #define ACT_MAX 64
289 #ifdef _KERNEL
291 * Each pageable resident page falls into one of four lists:
293 * free
294 * Available for allocation now.
296 * The following are all LRU sorted:
298 * cache
299 * Almost available for allocation. Still in an
300 * object, but clean and immediately freeable at
301 * non-interrupt times.
303 * inactive
304 * Low activity, candidates for reclamation.
305 * This is the list of pages that should be
306 * paged out next.
308 * active
309 * Pages that are "active" i.e. they have been
310 * recently referenced.
312 * zero
313 * Pages that are really free and have been pre-zeroed
317 extern int vm_page_zero_count;
318 extern struct vm_page *vm_page_array; /* First resident page in table */
319 extern int vm_page_array_size; /* number of vm_page_t's */
320 extern long first_page; /* first physical page number */
322 #define VM_PAGE_TO_PHYS(entry) \
323 ((entry)->phys_addr)
325 #define PHYS_TO_VM_PAGE(pa) \
326 (&vm_page_array[atop(pa) - first_page])
329 * Functions implemented as macros
332 static __inline void
333 vm_page_flag_set(vm_page_t m, unsigned int bits)
335 atomic_set_short(&(m)->flags, bits);
338 static __inline void
339 vm_page_flag_clear(vm_page_t m, unsigned int bits)
341 atomic_clear_short(&(m)->flags, bits);
344 static __inline void
345 vm_page_busy(vm_page_t m)
347 KASSERT((m->flags & PG_BUSY) == 0,
348 ("vm_page_busy: page already busy!!!"));
349 vm_page_flag_set(m, PG_BUSY);
353 * vm_page_flash:
355 * wakeup anyone waiting for the page.
358 static __inline void
359 vm_page_flash(vm_page_t m)
361 if (m->flags & PG_WANTED) {
362 vm_page_flag_clear(m, PG_WANTED);
363 wakeup(m);
368 * Clear the PG_BUSY flag and wakeup anyone waiting for the page. This
369 * is typically the last call you make on a page before moving onto
370 * other things.
372 static __inline void
373 vm_page_wakeup(vm_page_t m)
375 KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
376 vm_page_flag_clear(m, PG_BUSY);
377 vm_page_flash(m);
381 * These routines manipulate the 'soft busy' count for a page. A soft busy
382 * is almost like PG_BUSY except that it allows certain compatible operations
383 * to occur on the page while it is busy. For example, a page undergoing a
384 * write can still be mapped read-only.
386 static __inline void
387 vm_page_io_start(vm_page_t m)
389 atomic_add_char(&(m)->busy, 1);
392 static __inline void
393 vm_page_io_finish(vm_page_t m)
395 atomic_subtract_char(&m->busy, 1);
396 if (m->busy == 0)
397 vm_page_flash(m);
401 #if PAGE_SIZE == 4096
402 #define VM_PAGE_BITS_ALL 0xff
403 #endif
405 #if PAGE_SIZE == 8192
406 #define VM_PAGE_BITS_ALL 0xffff
407 #endif
410 * Note: the code will always use nominally free pages from the free list
411 * before trying other flag-specified sources.
413 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
414 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
415 * is also specified.
417 #define VM_ALLOC_NORMAL 0x01 /* ok to use cache pages */
418 #define VM_ALLOC_SYSTEM 0x02 /* ok to exhaust most of free list */
419 #define VM_ALLOC_INTERRUPT 0x04 /* ok to exhaust entire free list */
420 #define VM_ALLOC_ZERO 0x08 /* req pre-zero'd memory if avail */
421 #define VM_ALLOC_RETRY 0x80 /* indefinite block (vm_page_grab()) */
423 void vm_page_unhold(vm_page_t mem);
424 void vm_page_activate (vm_page_t);
425 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
426 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
427 void vm_page_cache (vm_page_t);
428 int vm_page_try_to_cache (vm_page_t);
429 int vm_page_try_to_free (vm_page_t);
430 void vm_page_dontneed (vm_page_t);
431 void vm_page_deactivate (vm_page_t);
432 void vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
433 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
434 void vm_page_remove (vm_page_t);
435 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
436 vm_offset_t vm_page_startup (vm_offset_t);
437 vm_page_t vm_add_new_page (vm_paddr_t pa);
438 void vm_page_unmanage (vm_page_t);
439 void vm_page_unwire (vm_page_t, int);
440 void vm_page_wire (vm_page_t);
441 void vm_page_unqueue (vm_page_t);
442 void vm_page_unqueue_nowakeup (vm_page_t);
443 void vm_page_set_validclean (vm_page_t, int, int);
444 void vm_page_set_dirty (vm_page_t, int, int);
445 void vm_page_clear_dirty (vm_page_t, int, int);
446 void vm_page_set_invalid (vm_page_t, int, int);
447 int vm_page_is_valid (vm_page_t, int, int);
448 void vm_page_test_dirty (vm_page_t);
449 int vm_page_bits (int, int);
450 vm_page_t vm_page_list_find(int basequeue, int index, boolean_t prefer_zero);
451 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
452 void vm_page_free_toq(vm_page_t m);
453 vm_offset_t vm_contig_pg_kmap(int, u_long, vm_map_t, int);
454 void vm_contig_pg_free(int, u_long);
457 * Holding a page keeps it from being reused. Other parts of the system
458 * can still disassociate the page from its current object and free it, or
459 * perform read or write I/O on it and/or otherwise manipulate the page,
460 * but if the page is held the VM system will leave the page and its data
461 * intact and not reuse the page for other purposes until the last hold
462 * reference is released. (see vm_page_wire() if you want to prevent the
463 * page from being disassociated from its object too).
465 * This routine must be called while at splvm() or better.
467 * The caller must still validate the contents of the page and, if necessary,
468 * wait for any pending I/O (e.g. vm_page_sleep_busy() loop) to complete
469 * before manipulating the page.
471 static __inline void
472 vm_page_hold(vm_page_t mem)
474 mem->hold_count++;
478 * Reduce the protection of a page. This routine never raises the
479 * protection and therefore can be safely called if the page is already
480 * at VM_PROT_NONE (it will be a NOP effectively ).
482 * VM_PROT_NONE will remove all user mappings of a page. This is often
483 * necessary when a page changes state (for example, turns into a copy-on-write
484 * page or needs to be frozen for write I/O) in order to force a fault, or
485 * to force a page's dirty bits to be synchronized and avoid hardware
486 * (modified/accessed) bit update races with pmap changes.
488 * Since 'prot' is usually a constant, this inline usually winds up optimizing
489 * out the primary conditional.
491 static __inline void
492 vm_page_protect(vm_page_t mem, int prot)
494 if (prot == VM_PROT_NONE) {
495 if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
496 pmap_page_protect(mem, VM_PROT_NONE);
497 vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
499 } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
500 pmap_page_protect(mem, VM_PROT_READ);
501 vm_page_flag_clear(mem, PG_WRITEABLE);
506 * Zero-fill the specified page. The entire contents of the page will be
507 * zero'd out.
509 static __inline boolean_t
510 vm_page_zero_fill(vm_page_t m)
512 pmap_zero_page(VM_PAGE_TO_PHYS(m));
513 return (TRUE);
517 * Copy the contents of src_m to dest_m. The pages must be stable but spl
518 * and other protections depend on context.
520 static __inline void
521 vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
523 pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
524 dest_m->valid = VM_PAGE_BITS_ALL;
528 * Free a page. The page must be marked BUSY.
530 * The clearing of PG_ZERO is a temporary safety until the code can be
531 * reviewed to determine that PG_ZERO is being properly cleared on
532 * write faults or maps. PG_ZERO was previously cleared in
533 * vm_page_alloc().
535 static __inline void
536 vm_page_free(vm_page_t m)
538 vm_page_flag_clear(m, PG_ZERO);
539 vm_page_free_toq(m);
543 * Free a page to the zerod-pages queue
545 static __inline void
546 vm_page_free_zero(vm_page_t m)
548 vm_page_flag_set(m, PG_ZERO);
549 vm_page_free_toq(m);
553 * Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
554 * m->busy is zero. Returns TRUE if it had to sleep ( including if
555 * it almost had to sleep and made temporary spl*() mods), FALSE
556 * otherwise.
558 * This routine assumes that interrupts can only remove the busy
559 * status from a page, not set the busy status or change it from
560 * PG_BUSY to m->busy or vise versa (which would create a timing
561 * window).
563 * Note: as an inline, 'also_m_busy' is usually a constant and well
564 * optimized.
566 static __inline int
567 vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
569 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
570 crit_enter();
571 if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
573 * Page is busy. Wait and retry.
575 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
576 tsleep(m, 0, msg, 0);
578 crit_exit();
579 return(TRUE);
580 /* not reached */
582 return(FALSE);
586 * Make page all dirty
588 static __inline void
589 _vm_page_dirty(vm_page_t m, const char *info)
591 #ifdef INVARIANTS
592 int pqtype = m->queue - m->pc;
593 #endif
594 KASSERT(pqtype != PQ_CACHE && pqtype != PQ_FREE,
595 ("vm_page_dirty: page in free/cache queue!"));
596 m->dirty = VM_PAGE_BITS_ALL;
599 #define vm_page_dirty(m) _vm_page_dirty(m, __FUNCTION__)
602 * Set page to not be dirty. Note: does not clear pmap modify bits .
604 static __inline void
605 vm_page_undirty(vm_page_t m)
607 m->dirty = 0;
610 #endif /* _KERNEL */
611 #endif /* !_VM_VM_PAGE_H_ */