inet6: require RTF_ANNOUNCE to proxy NS
[dragonfly.git] / sys / vm / vm_page.h
blob9bf13ce03c1f38f7634a23cb092bb5f3440dd625
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2003-2019 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
9 * This code is derived from software contributed to The DragonFly Project
10 * by Matthew Dillon <dillon@backplane.com>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
66 * Resident memory system definitions.
69 #ifndef _VM_VM_PAGE_H_
70 #define _VM_VM_PAGE_H_
72 #ifndef _SYS_TYPES_H_
73 #include <sys/types.h>
74 #endif
75 #ifndef _SYS_TREE_H_
76 #include <sys/tree.h>
77 #endif
78 #ifndef _MACHINE_PMAP_H_
79 #include <machine/pmap.h>
80 #endif
81 #ifndef _VM_PMAP_H_
82 #include <vm/pmap.h>
83 #endif
84 #include <machine/atomic.h>
86 #ifdef _KERNEL
88 #ifndef _SYS_SYSTM_H_
89 #include <sys/systm.h>
90 #endif
91 #ifndef _SYS_SPINLOCK_H_
92 #include <sys/spinlock.h>
93 #endif
95 #ifdef __x86_64__
96 #include <machine/vmparam.h>
97 #endif
99 #endif
102 * The vm_page structure is the heart of the entire system. It's fairly
103 * bulky, eating 3.125% of available memory (128 bytes vs 4K page size).
104 * Most normal uses of the structure, representing physical memory, uses
105 * the type-stable vm_page_array[]. Device mappings exposed to mmap()
106 * (such as GPUs) generally use temporary vm_page's outside of this array
107 * and will be flagged FICTITIOUS. Devices which use the kernel's contig
108 * memory allocator get normal pages, but for convenience the pages will
109 * be temporarily flagged as FICTITIOUS.
111 * Soft-busying or Hard-busying guarantees a stable m->object, m->pindex,
112 * and m->valid field. A page cannot be validated or invalidated unless
113 * hard-busied.
115 * The page must be hard-busied to make the following changes:
117 * (1) Any change to m->object or m->pindex (also requires the
118 * related object to be exclusively locked).
120 * (2) Any transition of m->wire_count to 0 or from 0. Other
121 * transitions (e.g. 2->1, 1->2, etc) are allowed without
122 * locks.
124 * (3) Any change to m->valid.
126 * (4) Clearing PG_MAPPED or PG_WRITEABLE (note that because of
127 * this, these bits may be left lazily set until they can
128 * be cleared later on.
130 * Most other fields of the vm_page can change at any time with certain
131 * restrictions.
133 * (1) PG_WRITEABLE and PG_MAPPED may be set with the page soft-busied
134 * or hard-busied.
136 * (2) m->dirty may be set to VM_PAGE_BITS_ALL by a page fault at
137 * any time if PG_WRITEABLE is flagged. Tests of m->dirty are
138 * only tentative until all writeable mappings of the page are
139 * removed. This may occur unlocked. A hard-busy is required
140 * if modifying m->dirty under other conditions.
142 * (3) PG_REFERENCED may be set at any time by the pmap code to
143 * synchronized the [A]ccessed bit, if PG_MAPPED is flagged,
144 * unlocked. A hard-busy is required for any other time.
146 * (3) hold_count can be incremented or decremented at any time,
147 * including transitions to or from 0. Holding a page via
148 * vm_page_hold() does NOT stop major changes from being made
149 * to the page, but WILL prevent the page from being freed
150 * or reallocated. If the hold is emplaced with the page in
151 * a known state it can prevent the underlying data from being
152 * destroyed.
154 * (4) Each individual flag may have a different behavior. Some flags
155 * can be set or cleared at any time, some require hard-busying,
156 * etc.
158 * Moving the page between queues (aka m->pageq and m->queue) requires
159 * m->spin to be exclusively locked first, and then also the spinlock related
160 * to the queue.
162 * (1) This is the only use that requires m->spin any more.
164 * (2) There is one special case and that is the pageout daemon is
165 * allowed to reorder the page within the same queue while just
166 * holding the queue's spin-lock.
168 * Please see the flags section below for flag documentation.
170 TAILQ_HEAD(pglist, vm_page);
172 struct vm_object;
174 int rb_vm_page_compare(struct vm_page *, struct vm_page *);
176 struct vm_page_rb_tree;
177 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry,
178 rb_vm_page_compare, vm_pindex_t);
179 RB_HEAD(vm_page_rb_tree, vm_page);
181 struct vm_page {
182 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list */
183 RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */
184 struct spinlock spin;
185 struct md_page md; /* machine dependant stuff */
186 uint32_t wire_count; /* wired down maps refs (P) */
187 uint32_t busy_count; /* soft-busy and hard-busy */
188 int hold_count; /* page hold count */
189 int ku_pagecnt; /* help kmalloc() w/oversized allocs */
190 struct vm_object *object; /* which object am I in */
191 vm_pindex_t pindex; /* offset into object */
192 vm_paddr_t phys_addr; /* physical address of page */
193 uint16_t queue; /* page queue index */
194 uint16_t pc; /* page color */
195 uint8_t act_count; /* page usage count */
196 uint8_t pat_mode; /* hardware page attribute */
197 uint8_t valid; /* map of valid DEV_BSIZE chunks */
198 uint8_t dirty; /* map of dirty DEV_BSIZE chunks */
199 uint32_t flags; /* see below */
200 int unused01; /* available */
201 /* 128 bytes */
202 #ifdef VM_PAGE_DEBUG
203 const char *busy_func;
204 int busy_line;
205 #endif
208 #define PBUSY_LOCKED 0x80000000U
209 #define PBUSY_WANTED 0x40000000U
210 #define PBUSY_SWAPINPROG 0x20000000U
211 #define PBUSY_MASK 0x1FFFFFFFU
213 #ifndef __VM_PAGE_T_DEFINED__
214 #define __VM_PAGE_T_DEFINED__
215 typedef struct vm_page *vm_page_t;
216 #endif
219 * Page coloring parameters. We use generous parameters designed to
220 * statistically spread pages over available cpu cache space. This has
221 * become less important over time as cache associativity is higher
222 * in modern times but we still use the core algorithm to help reduce
223 * lock contention between cpus.
225 * Page coloring cannot be disabled.
227 * In today's world of many-core systems, we must be able to provide enough VM
228 * page queues for each logical cpu thread to cover the L1/L2/L3 cache set
229 * associativity. If we don't, the cpu caches will not be properly utilized.
231 * Using 2048 allows 8-way set-assoc with 256 logical cpus, but seems to
232 * have a number of downsides when queues are assymetrically starved.
234 * Using 1024 allows 4-way set-assoc with 256 logical cpus, and more with
235 * fewer cpus.
237 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
238 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
239 #define PQ_L2_SIZE 1024 /* Must be enough for maximal ncpus x hw set-assoc */
240 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
242 #define PQ_NONE 0
243 #define PQ_FREE (1 + 0*PQ_L2_SIZE)
244 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
245 #define PQ_ACTIVE (1 + 2*PQ_L2_SIZE)
246 #define PQ_CACHE (1 + 3*PQ_L2_SIZE)
247 #define PQ_HOLD (1 + 4*PQ_L2_SIZE)
248 #define PQ_COUNT (1 + 5*PQ_L2_SIZE)
251 * Scan support
253 struct vm_map;
255 struct rb_vm_page_scan_info {
256 vm_pindex_t start_pindex;
257 vm_pindex_t end_pindex;
258 int limit;
259 int desired;
260 int error;
261 int pagerflags;
262 int count;
263 int unused01;
264 vm_offset_t addr;
265 struct vm_map_entry *entry;
266 struct vm_object *object;
267 struct vm_object *dest_object;
268 struct vm_page *mpte;
269 struct pmap *pmap;
270 struct vm_map *map;
273 int rb_vm_page_scancmp(struct vm_page *, void *);
275 struct vpgqueues {
276 struct spinlock spin;
277 struct pglist pl;
278 long lcnt;
279 long adds; /* heuristic, add operations */
280 int cnt_offset; /* offset into vmstats structure (int) */
281 int lastq; /* heuristic, skip empty queues */
282 } __aligned(64);
284 extern struct vpgqueues vm_page_queues[PQ_COUNT];
287 * The m->flags field is generally categorized as follows. Unless otherwise
288 * noted, a flag may only be updated while the page is hard-busied.
290 * PG_UNQUEUED - This prevents the page from being placed on any queue.
292 * PG_FICTITIOUS - This indicates to the pmap subsystem that the
293 * page might not be reverse-addressable via
294 * PHYS_TO_VM_PAGE(). The vm_page_t might be
295 * temporary and not exist in the vm_page_array[].
297 * This also generally means that the pmap subsystem
298 * cannot synchronize the [M]odified and [A]ccessed
299 * bits with the related vm_page_t, and in fact that
300 * there might not even BE a related vm_page_t.
302 * Unlike the old system, the new pmap subsystem is
303 * able to do bulk operations on virtual address ranges
304 * containing fictitious pages, and can also pick-out
305 * specific fictitious pages by matching m->phys_addr
306 * if you supply a fake vm_page to it.
308 * Fictitious pages can still be organized into vm_objects
309 * if desired.
311 * PG_MAPPED - Indicates that the page MIGHT be mapped into a pmap.
312 * If not set, guarantees that the page is not mapped.
314 * This bit can be set unlocked but only cleared while
315 * vm_page is hard-busied.
317 * For FICTITIOUS pages, this bit will be set automatically
318 * via a page fault (aka pmap_enter()), but must be cleared
319 * manually.
321 * PG_MAPPEDMULTI - Possibly mapped to multiple pmaps or to multiple locations
322 * ine one pmap.
324 * PG_WRITEABLE - Indicates that the page MIGHT be writeable via a pte.
325 * If not set, guarantees that the page is not writeable.
327 * This bit can be set unlocked but only cleared while
328 * vm_page is hard-busied.
330 * For FICTITIOUS pages, this bit will be set automatically
331 * via a page fault (aka pmap_enter()), but must be cleared
332 * manually.
334 * PG_SWAPPED - Indicates that the page is backed by a swap block.
335 * Any VM object type other than OBJT_DEFAULT can contain
336 * swap-backed pages now. The bit may only be adjusted
337 * while the page is hard-busied.
339 * PG_RAM - Heuristic read-ahead-marker. When I/O brings pages in,
340 * this bit is set on one of them to force a page fault on
341 * it to proactively read-ahead additional pages.
343 * Can be set or cleared at any time unlocked.
345 * PG_WINATCFLS - This is used to give dirty pages a second chance
346 * on the inactive queue before getting flushed by
347 * the pageout daemon.
349 * PG_REFERENCED - Indicates that the page has been accessed. If the
350 * page is PG_MAPPED, this bit might not reflect the
351 * actual state of the page. The pmap code synchronizes
352 * the [A]ccessed bit to this flag and then clears the
353 * [A]ccessed bit.
355 * PG_MARKER - Used by any queue-scanning code to recognize a fake
356 * vm_page being used only as a scan marker.
358 * PG_NOTMETA - Distinguish pages representing content from pages
359 * representing meta-data.
361 * PG_NEED_COMMIT - May only be modified while the page is hard-busied.
362 * Indicates that even if the page might not appear to
363 * be dirty, it must still be validated against some
364 * remote entity (e.g. NFS) before it can be thrown away.
366 * PG_CLEANCHK - Used by the vm_object subsystem to detect pages that
367 * might have been inserted during a scan. May be changed
368 * at any time by the VM system (usually while holding the
369 * related vm_object's lock).
371 #define PG_UNUSED0001 0x00000001
372 #define PG_UNUSED0002 0x00000002
373 #define PG_WINATCFLS 0x00000004 /* flush dirty page on inactive q */
374 #define PG_FICTITIOUS 0x00000008 /* No reverse-map or tracking */
375 #define PG_WRITEABLE 0x00000010 /* page may be writeable */
376 #define PG_MAPPED 0x00000020 /* page may be mapped (managed) */
377 #define PG_MAPPEDMULTI 0x00000040 /* multiple mappings */
378 #define PG_REFERENCED 0x00000080 /* page has been referenced */
379 #define PG_CLEANCHK 0x00000100 /* page will be checked for cleaning */
380 #define PG_UNUSED0200 0x00000200
381 #define PG_NOSYNC 0x00000400 /* do not collect for syncer */
382 #define PG_UNQUEUED 0x00000800 /* No queue management for page */
383 #define PG_MARKER 0x00001000 /* special queue marker page */
384 #define PG_RAM 0x00002000 /* read ahead mark */
385 #define PG_SWAPPED 0x00004000 /* backed by swap */
386 #define PG_NOTMETA 0x00008000 /* do not back with swap */
387 #define PG_UNUSED10000 0x00010000
388 #define PG_UNUSED20000 0x00020000
389 #define PG_NEED_COMMIT 0x00040000 /* clean page requires commit */
391 #define PG_KEEP_NEWPAGE_MASK (0)
394 * Misc constants.
397 #define ACT_DECLINE 1
398 #define ACT_ADVANCE 3
399 #define ACT_INIT 5
400 #define ACT_MAX 64
402 #ifdef VM_PAGE_DEBUG
403 #define VM_PAGE_DEBUG_EXT(name) name ## _debug
404 #define VM_PAGE_DEBUG_ARGS , const char *func, int lineno
405 #else
406 #define VM_PAGE_DEBUG_EXT(name) name
407 #define VM_PAGE_DEBUG_ARGS
408 #endif
410 #ifdef _KERNEL
412 * Each pageable resident page falls into one of four lists:
414 * free
415 * Available for allocation now.
417 * The following are all LRU sorted:
419 * cache
420 * Almost available for allocation. Still in an
421 * object, but clean and immediately freeable at
422 * non-interrupt times.
424 * inactive
425 * Low activity, candidates for reclamation.
426 * This is the list of pages that should be
427 * paged out next.
429 * active
430 * Pages that are "active" i.e. they have been
431 * recently referenced.
433 * zero
434 * Pages that are really free and have been pre-zeroed
438 extern struct vm_page *vm_page_array; /* First resident page in table */
439 extern vm_pindex_t vm_page_array_size; /* number of vm_page_t's */
440 extern vm_pindex_t first_page; /* first physical page number */
442 #define VM_PAGE_TO_PHYS(entry) \
443 ((entry)->phys_addr)
445 #define PHYS_TO_VM_PAGE(pa) \
446 (&vm_page_array[atop(pa) - first_page])
449 #if PAGE_SIZE == 4096
450 #define VM_PAGE_BITS_ALL 0xff
451 #endif
454 * Note: the code will always use nominally free pages from the free list
455 * before trying other flag-specified sources.
457 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
458 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
459 * is also specified.
461 #define VM_ALLOC_NORMAL 0x0001 /* ok to use cache pages */
462 #define VM_ALLOC_SYSTEM 0x0002 /* ok to exhaust most of free list */
463 #define VM_ALLOC_INTERRUPT 0x0004 /* ok to exhaust entire free list */
464 #define VM_ALLOC_ZERO 0x0008 /* req pre-zero'd memory if avail */
465 #define VM_ALLOC_QUICK 0x0010 /* like NORMAL but do not use cache */
466 #define VM_ALLOC_FORCE_ZERO 0x0020 /* zero page even if already valid */
467 #define VM_ALLOC_NULL_OK 0x0040 /* ok to return NULL on collision */
468 #define VM_ALLOC_RETRY 0x0080 /* indefinite block (vm_page_grab()) */
469 #define VM_ALLOC_USE_GD 0x0100 /* use per-gd cache */
470 #define VM_ALLOC_CPU_SPEC 0x0200
472 #define VM_ALLOC_CPU_SHIFT 16
473 #define VM_ALLOC_CPU(n) (((n) << VM_ALLOC_CPU_SHIFT) | \
474 VM_ALLOC_CPU_SPEC)
475 #define VM_ALLOC_GETCPU(flags) ((flags) >> VM_ALLOC_CPU_SHIFT)
477 void vm_page_queue_spin_lock(vm_page_t);
478 void vm_page_queues_spin_lock(u_short);
479 void vm_page_and_queue_spin_lock(vm_page_t);
481 void vm_page_queue_spin_unlock(vm_page_t);
482 void vm_page_queues_spin_unlock(u_short);
483 void vm_page_and_queue_spin_unlock(vm_page_t m);
485 void vm_page_init(vm_page_t m);
486 void vm_page_io_finish(vm_page_t m);
487 void vm_page_io_start(vm_page_t m);
488 void vm_page_need_commit(vm_page_t m);
489 void vm_page_clear_commit(vm_page_t m);
490 void vm_page_wakeup(vm_page_t m);
491 void vm_page_hold(vm_page_t);
492 void vm_page_unhold(vm_page_t);
493 void vm_page_activate (vm_page_t);
494 void vm_page_soft_activate (vm_page_t);
496 vm_size_t vm_contig_avail_pages(void);
497 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
498 vm_page_t vm_page_alloczwq (vm_pindex_t, int);
499 void vm_page_freezwq (vm_page_t m);
500 vm_page_t vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
501 unsigned long alignment, unsigned long boundary,
502 unsigned long size, vm_memattr_t memattr);
504 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
505 void vm_page_cache (vm_page_t);
506 int vm_page_try_to_cache (vm_page_t);
507 int vm_page_try_to_free (vm_page_t);
508 void vm_page_dontneed (vm_page_t);
509 void vm_page_deactivate (vm_page_t);
510 void vm_page_deactivate_locked (vm_page_t);
511 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
512 int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
514 vm_page_t vm_page_hash_get(vm_object_t object, vm_pindex_t pindex);
516 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
517 vm_page_t vm_page_lookup_sbusy_try(struct vm_object *object,
518 vm_pindex_t pindex, int pgoff, int pgbytes);
519 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(
520 struct vm_object *, vm_pindex_t, int, const char *
521 VM_PAGE_DEBUG_ARGS);
522 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(
523 struct vm_object *, vm_pindex_t, int, int *
524 VM_PAGE_DEBUG_ARGS);
525 void vm_page_remove (vm_page_t);
526 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
527 void vm_page_startup (void);
528 void vm_numa_organize(vm_paddr_t ran_beg, vm_paddr_t bytes, int physid);
529 void vm_numa_organize_finalize(void);
530 void vm_page_unwire (vm_page_t, int);
531 void vm_page_wire (vm_page_t);
532 void vm_page_unqueue (vm_page_t);
533 void vm_page_unqueue_nowakeup (vm_page_t);
534 vm_page_t vm_page_next (vm_page_t);
535 void vm_page_set_validclean (vm_page_t, int, int);
536 void vm_page_set_validdirty (vm_page_t, int, int);
537 void vm_page_set_valid (vm_page_t, int, int);
538 void vm_page_set_dirty (vm_page_t, int, int);
539 void vm_page_clear_dirty (vm_page_t, int, int);
540 void vm_page_set_invalid (vm_page_t, int, int);
541 int vm_page_is_valid (vm_page_t, int, int);
542 void vm_page_test_dirty (vm_page_t);
543 int vm_page_bits (int, int);
544 vm_page_t vm_page_list_find(int basequeue, int index);
545 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
546 void vm_page_free_toq(vm_page_t m);
547 void vm_page_free_contig(vm_page_t m, unsigned long size);
548 vm_page_t vm_page_free_fromq_fast(void);
549 void vm_page_dirty(vm_page_t m);
550 void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg);
551 int vm_page_sbusy_try(vm_page_t m);
552 void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m,
553 int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS);
554 int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m,
555 int also_m_busy VM_PAGE_DEBUG_ARGS);
556 u_short vm_get_pg_color(int cpuid, vm_object_t object, vm_pindex_t pindex);
558 #ifdef VM_PAGE_DEBUG
560 #define vm_page_lookup_busy_wait(object, pindex, alsob, msg) \
561 vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg, \
562 __func__, __LINE__)
564 #define vm_page_lookup_busy_try(object, pindex, alsob, errorp) \
565 vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp, \
566 __func__, __LINE__)
568 #define vm_page_busy_wait(m, alsob, msg) \
569 vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__)
571 #define vm_page_busy_try(m, alsob) \
572 vm_page_busy_try_debug(m, alsob, __func__, __LINE__)
574 #endif
576 #endif /* _KERNEL */
577 #endif /* !_VM_VM_PAGE_H_ */