kernel - Fix NUMA contention due to assymetric memory
[dragonfly.git] / sys / vm / vm_page.h
blob1ff10ee96bc3635abccc958c383b4e1730ee39c3
1 /*
2 * Copyright (c) 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * Copyright (c) 2003-2017 The DragonFly Project. All rights reserved.
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
9 * This code is derived from software contributed to The DragonFly Project
10 * by Matthew Dillon <dillon@backplane.com>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
36 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
39 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
40 * All rights reserved.
42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
44 * Permission to use, copy, modify and distribute this software and
45 * its documentation is hereby granted, provided that both the copyright
46 * notice and this permission notice appear in all copies of the
47 * software, derivative works or modified versions, and any portions
48 * thereof, and that both notices appear in supporting documentation.
50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
54 * Carnegie Mellon requests users of this software to return to
56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
57 * School of Computer Science
58 * Carnegie Mellon University
59 * Pittsburgh PA 15213-3890
61 * any improvements or extensions that they make and grant Carnegie the
62 * rights to redistribute these changes.
66 * Resident memory system definitions.
69 #ifndef _VM_VM_PAGE_H_
70 #define _VM_VM_PAGE_H_
72 #ifndef _SYS_TYPES_H_
73 #include <sys/types.h>
74 #endif
75 #ifndef _SYS_TREE_H_
76 #include <sys/tree.h>
77 #endif
78 #ifndef _MACHINE_PMAP_H_
79 #include <machine/pmap.h>
80 #endif
81 #ifndef _VM_PMAP_H_
82 #include <vm/pmap.h>
83 #endif
84 #include <machine/atomic.h>
86 #ifdef _KERNEL
88 #ifndef _SYS_SYSTM_H_
89 #include <sys/systm.h>
90 #endif
91 #ifndef _SYS_SPINLOCK_H_
92 #include <sys/spinlock.h>
93 #endif
94 #ifndef _SYS_THREAD2_H_
95 #include <sys/thread2.h>
96 #endif
98 #ifdef __x86_64__
99 #include <machine/vmparam.h>
100 #endif
102 #endif
105 * vm_page structure
107 TAILQ_HEAD(pglist, vm_page);
109 struct vm_object;
111 int rb_vm_page_compare(struct vm_page *, struct vm_page *);
113 struct vm_page_rb_tree;
114 RB_PROTOTYPE2(vm_page_rb_tree, vm_page, rb_entry,
115 rb_vm_page_compare, vm_pindex_t);
117 struct vm_page {
118 TAILQ_ENTRY(vm_page) pageq; /* vm_page_queues[] list (P) */
119 RB_ENTRY(vm_page) rb_entry; /* Red-Black tree based at object */
120 struct spinlock spin;
121 struct vm_object *object; /* which object am I in (O,P)*/
122 vm_pindex_t pindex; /* offset into object (O,P) */
123 vm_paddr_t phys_addr; /* physical address of page */
124 struct md_page md; /* machine dependant stuff */
125 uint16_t queue; /* page queue index */
126 uint16_t pc; /* page color */
127 uint8_t act_count; /* page usage count */
128 uint8_t pat_mode; /* hardware page attribute */
129 uint8_t valid; /* map of valid DEV_BSIZE chunks */
130 uint8_t dirty; /* map of dirty DEV_BSIZE chunks */
131 uint32_t flags; /* see below */
132 uint32_t wire_count; /* wired down maps refs (P) */
133 uint32_t busy_count; /* soft-busy and hard-busy */
134 int hold_count; /* page hold count */
135 int ku_pagecnt; /* kmalloc helper */
136 #ifdef VM_PAGE_DEBUG
137 const char *busy_func;
138 int busy_line;
139 #endif
142 #define PBUSY_LOCKED 0x80000000U
143 #define PBUSY_WANTED 0x40000000U
144 #define PBUSY_SWAPINPROG 0x20000000U
145 #define PBUSY_MASK 0x1FFFFFFFU
147 #ifndef __VM_PAGE_T_DEFINED__
148 #define __VM_PAGE_T_DEFINED__
149 typedef struct vm_page *vm_page_t;
150 #endif
153 * Page coloring parameters. We use generous parameters designed to
154 * statistically spread pages over available cpu cache space. This has
155 * become less important over time as cache associativity is higher
156 * in modern times but we still use the core algorithm to help reduce
157 * lock contention between cpus.
159 * Page coloring cannot be disabled.
161 * In today's world of many-core systems, we must be able to provide enough VM
162 * page queues for each logical cpu thread to cover the L1/L2/L3 cache set
163 * associativity. If we don't, the cpu caches will not be properly utilized.
164 * Using 2048 allows 8-way set-assoc with 256 logical cpus.
166 #define PQ_PRIME1 31 /* Prime number somewhat less than PQ_HASH_SIZE */
167 #define PQ_PRIME2 23 /* Prime number somewhat less than PQ_HASH_SIZE */
168 #define PQ_L2_SIZE 2048 /* Must be enough for maximal ncpus x hw set-assoc */
169 #define PQ_L2_MASK (PQ_L2_SIZE - 1)
171 #define PQ_NONE 0
172 #define PQ_FREE (1 + 0*PQ_L2_SIZE)
173 #define PQ_INACTIVE (1 + 1*PQ_L2_SIZE)
174 #define PQ_ACTIVE (1 + 2*PQ_L2_SIZE)
175 #define PQ_CACHE (1 + 3*PQ_L2_SIZE)
176 #define PQ_HOLD (1 + 4*PQ_L2_SIZE)
177 #define PQ_COUNT (1 + 5*PQ_L2_SIZE)
180 * Scan support
182 struct vm_map;
184 struct rb_vm_page_scan_info {
185 vm_pindex_t start_pindex;
186 vm_pindex_t end_pindex;
187 int limit;
188 int desired;
189 int error;
190 int pagerflags;
191 int count;
192 int unused01;
193 vm_offset_t addr;
194 vm_pindex_t backing_offset_index;
195 struct vm_object *object;
196 struct vm_object *backing_object;
197 struct vm_page *mpte;
198 struct pmap *pmap;
199 struct vm_map *map;
202 int rb_vm_page_scancmp(struct vm_page *, void *);
204 struct vpgqueues {
205 struct spinlock spin;
206 struct pglist pl;
207 int cnt_offset; /* offset into vmstats structure (int) */
208 int lcnt;
209 int flipflop; /* probably not the best place */
210 int unused00;
211 int unused01;
212 char unused[64 - sizeof(struct pglist) -
213 sizeof(int *) - sizeof(int) * 4];
216 extern struct vpgqueues vm_page_queues[PQ_COUNT];
219 * These are the flags defined for vm_page.
221 * PG_UNMANAGED (used by OBJT_PHYS) indicates that the page is
222 * not under PV management but otherwise should be treated as a
223 * normal page. Pages not under PV management cannot be paged out
224 * via the object/vm_page_t because there is no knowledge of their
225 * pte mappings, nor can they be removed from their objects via
226 * the object, and such pages are also not on any PQ queue. The
227 * PG_MAPPED and PG_WRITEABLE flags are not applicable.
229 * PG_MAPPED only applies to managed pages, indicating whether the page
230 * is mapped onto one or more pmaps. A page might still be mapped to
231 * special pmaps in an unmanaged fashion, for example when mapped into a
232 * buffer cache buffer, without setting PG_MAPPED.
234 * PG_WRITEABLE indicates that there may be a writeable managed pmap entry
235 * somewhere, and that the page can be dirtied by hardware at any time
236 * and may have to be tested for that. The modified bit in unmanaged
237 * mappings or in the special clean map is not tested.
239 * PG_SWAPPED indicates that the page is backed by a swap block. Any
240 * VM object type other than OBJT_DEFAULT can have swap-backed pages now.
242 #define PG_UNUSED0001 0x00000001
243 #define PG_UNUSED0002 0x00000002
244 #define PG_WINATCFLS 0x00000004 /* flush dirty page on inactive q */
245 #define PG_FICTITIOUS 0x00000008 /* physical page doesn't exist (O) */
246 #define PG_WRITEABLE 0x00000010 /* page is writeable */
247 #define PG_MAPPED 0x00000020 /* page is mapped (managed) */
248 #define PG_UNUSED0040 0x00000040
249 #define PG_REFERENCED 0x00000080 /* page has been referenced */
250 #define PG_CLEANCHK 0x00000100 /* page will be checked for cleaning */
251 #define PG_UNUSED0200 0x00000200
252 #define PG_NOSYNC 0x00000400 /* do not collect for syncer */
253 #define PG_UNMANAGED 0x00000800 /* No PV management for page */
254 #define PG_MARKER 0x00001000 /* special queue marker page */
255 #define PG_RAM 0x00002000 /* read ahead mark */
256 #define PG_SWAPPED 0x00004000 /* backed by swap */
257 #define PG_NOTMETA 0x00008000 /* do not back with swap */
258 #define PG_UNUSED10000 0x00010000
259 #define PG_UNUSED20000 0x00020000
260 #define PG_NEED_COMMIT 0x00040000 /* clean page requires commit */
262 #define PG_KEEP_NEWPAGE_MASK (0)
265 * Misc constants.
268 #define ACT_DECLINE 1
269 #define ACT_ADVANCE 3
270 #define ACT_INIT 5
271 #define ACT_MAX 64
273 #ifdef VM_PAGE_DEBUG
274 #define VM_PAGE_DEBUG_EXT(name) name ## _debug
275 #define VM_PAGE_DEBUG_ARGS , const char *func, int lineno
276 #else
277 #define VM_PAGE_DEBUG_EXT(name) name
278 #define VM_PAGE_DEBUG_ARGS
279 #endif
281 #ifdef _KERNEL
283 * Each pageable resident page falls into one of four lists:
285 * free
286 * Available for allocation now.
288 * The following are all LRU sorted:
290 * cache
291 * Almost available for allocation. Still in an
292 * object, but clean and immediately freeable at
293 * non-interrupt times.
295 * inactive
296 * Low activity, candidates for reclamation.
297 * This is the list of pages that should be
298 * paged out next.
300 * active
301 * Pages that are "active" i.e. they have been
302 * recently referenced.
304 * zero
305 * Pages that are really free and have been pre-zeroed
309 extern struct vm_page *vm_page_array; /* First resident page in table */
310 extern vm_pindex_t vm_page_array_size; /* number of vm_page_t's */
311 extern vm_pindex_t first_page; /* first physical page number */
313 #define VM_PAGE_TO_PHYS(entry) \
314 ((entry)->phys_addr)
316 #define PHYS_TO_VM_PAGE(pa) \
317 (&vm_page_array[atop(pa) - first_page])
320 #if PAGE_SIZE == 4096
321 #define VM_PAGE_BITS_ALL 0xff
322 #endif
325 * Note: the code will always use nominally free pages from the free list
326 * before trying other flag-specified sources.
328 * At least one of VM_ALLOC_NORMAL|VM_ALLOC_SYSTEM|VM_ALLOC_INTERRUPT
329 * must be specified. VM_ALLOC_RETRY may only be specified if VM_ALLOC_NORMAL
330 * is also specified.
332 #define VM_ALLOC_NORMAL 0x0001 /* ok to use cache pages */
333 #define VM_ALLOC_SYSTEM 0x0002 /* ok to exhaust most of free list */
334 #define VM_ALLOC_INTERRUPT 0x0004 /* ok to exhaust entire free list */
335 #define VM_ALLOC_ZERO 0x0008 /* req pre-zero'd memory if avail */
336 #define VM_ALLOC_QUICK 0x0010 /* like NORMAL but do not use cache */
337 #define VM_ALLOC_FORCE_ZERO 0x0020 /* zero page even if already valid */
338 #define VM_ALLOC_NULL_OK 0x0040 /* ok to return NULL on collision */
339 #define VM_ALLOC_RETRY 0x0080 /* indefinite block (vm_page_grab()) */
340 #define VM_ALLOC_USE_GD 0x0100 /* use per-gd cache */
341 #define VM_ALLOC_CPU_SPEC 0x0200
343 #define VM_ALLOC_CPU_SHIFT 16
344 #define VM_ALLOC_CPU(n) (((n) << VM_ALLOC_CPU_SHIFT) | \
345 VM_ALLOC_CPU_SPEC)
346 #define VM_ALLOC_GETCPU(flags) ((flags) >> VM_ALLOC_CPU_SHIFT)
348 void vm_page_queue_spin_lock(vm_page_t);
349 void vm_page_queues_spin_lock(u_short);
350 void vm_page_and_queue_spin_lock(vm_page_t);
352 void vm_page_queue_spin_unlock(vm_page_t);
353 void vm_page_queues_spin_unlock(u_short);
354 void vm_page_and_queue_spin_unlock(vm_page_t m);
356 void vm_page_init(vm_page_t m);
357 void vm_page_io_finish(vm_page_t m);
358 void vm_page_io_start(vm_page_t m);
359 void vm_page_need_commit(vm_page_t m);
360 void vm_page_clear_commit(vm_page_t m);
361 void vm_page_wakeup(vm_page_t m);
362 void vm_page_hold(vm_page_t);
363 void vm_page_unhold(vm_page_t);
364 void vm_page_activate (vm_page_t);
366 vm_size_t vm_contig_avail_pages(void);
367 vm_page_t vm_page_alloc (struct vm_object *, vm_pindex_t, int);
368 vm_page_t vm_page_alloc_contig(vm_paddr_t low, vm_paddr_t high,
369 unsigned long alignment, unsigned long boundary,
370 unsigned long size, vm_memattr_t memattr);
372 vm_page_t vm_page_grab (struct vm_object *, vm_pindex_t, int);
373 void vm_page_cache (vm_page_t);
374 int vm_page_try_to_cache (vm_page_t);
375 int vm_page_try_to_free (vm_page_t);
376 void vm_page_dontneed (vm_page_t);
377 void vm_page_deactivate (vm_page_t);
378 void vm_page_deactivate_locked (vm_page_t);
379 void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
380 int vm_page_insert (vm_page_t, struct vm_object *, vm_pindex_t);
381 vm_page_t vm_page_lookup (struct vm_object *, vm_pindex_t);
382 vm_page_t vm_page_lookup_sbusy_try(struct vm_object *object,
383 vm_pindex_t pindex, int pgoff, int pgbytes);
384 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_wait)(
385 struct vm_object *, vm_pindex_t, int, const char *
386 VM_PAGE_DEBUG_ARGS);
387 vm_page_t VM_PAGE_DEBUG_EXT(vm_page_lookup_busy_try)(
388 struct vm_object *, vm_pindex_t, int, int *
389 VM_PAGE_DEBUG_ARGS);
390 void vm_page_remove (vm_page_t);
391 void vm_page_rename (vm_page_t, struct vm_object *, vm_pindex_t);
392 void vm_page_startup (void);
393 void vm_numa_organize(vm_paddr_t ran_beg, vm_paddr_t bytes, int physid);
394 void vm_numa_organize_finalize(void);
395 void vm_page_unmanage (vm_page_t);
396 void vm_page_unwire (vm_page_t, int);
397 void vm_page_wire (vm_page_t);
398 void vm_page_unqueue (vm_page_t);
399 void vm_page_unqueue_nowakeup (vm_page_t);
400 vm_page_t vm_page_next (vm_page_t);
401 void vm_page_set_validclean (vm_page_t, int, int);
402 void vm_page_set_validdirty (vm_page_t, int, int);
403 void vm_page_set_valid (vm_page_t, int, int);
404 void vm_page_set_dirty (vm_page_t, int, int);
405 void vm_page_clear_dirty (vm_page_t, int, int);
406 void vm_page_set_invalid (vm_page_t, int, int);
407 int vm_page_is_valid (vm_page_t, int, int);
408 void vm_page_test_dirty (vm_page_t);
409 int vm_page_bits (int, int);
410 vm_page_t vm_page_list_find(int basequeue, int index);
411 void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid);
412 void vm_page_free_toq(vm_page_t m);
413 void vm_page_free_contig(vm_page_t m, unsigned long size);
414 vm_page_t vm_page_free_fromq_fast(void);
415 void vm_page_dirty(vm_page_t m);
416 void vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg);
417 int vm_page_sbusy_try(vm_page_t m);
418 void VM_PAGE_DEBUG_EXT(vm_page_busy_wait)(vm_page_t m,
419 int also_m_busy, const char *wmsg VM_PAGE_DEBUG_ARGS);
420 int VM_PAGE_DEBUG_EXT(vm_page_busy_try)(vm_page_t m,
421 int also_m_busy VM_PAGE_DEBUG_ARGS);
422 u_short vm_get_pg_color(int cpuid, vm_object_t object, vm_pindex_t pindex);
424 #ifdef VM_PAGE_DEBUG
426 #define vm_page_lookup_busy_wait(object, pindex, alsob, msg) \
427 vm_page_lookup_busy_wait_debug(object, pindex, alsob, msg, \
428 __func__, __LINE__)
430 #define vm_page_lookup_busy_try(object, pindex, alsob, errorp) \
431 vm_page_lookup_busy_try_debug(object, pindex, alsob, errorp, \
432 __func__, __LINE__)
434 #define vm_page_busy_wait(m, alsob, msg) \
435 vm_page_busy_wait_debug(m, alsob, msg, __func__, __LINE__)
437 #define vm_page_busy_try(m, alsob) \
438 vm_page_busy_try_debug(m, alsob, __func__, __LINE__)
440 #endif
442 #endif /* _KERNEL */
443 #endif /* !_VM_VM_PAGE_H_ */