4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright 2012 Joyent, Inc. All rights reserved.
29 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
33 * Portions of this source code were derived from Berkeley 4.3 BSD
34 * under license from the Regents of the University of California.
39 * This file contains common functions to access and manage the page lists.
40 * Many of these routines originated from platform dependent modules
41 * (sun4/vm/vm_dep.c, i86pc/vm/vm_machdep.c) and modified to function in
42 * a platform independent manner.
44 * vm/vm_dep.h provides for platform specific support.
47 #include <sys/types.h>
48 #include <sys/debug.h>
49 #include <sys/cmn_err.h>
50 #include <sys/systm.h>
51 #include <sys/atomic.h>
52 #include <sys/sysmacros.h>
55 #include <vm/seg_kmem.h>
56 #include <vm/seg_vn.h>
57 #include <sys/vmsystm.h>
58 #include <sys/memnode.h>
59 #include <vm/vm_dep.h>
61 #include <sys/mem_config.h>
62 #include <sys/callb.h>
63 #include <sys/mem_cage.h>
65 #include <sys/dumphdr.h>
68 extern uint_t vac_colors
;
70 #define MAX_PRAGMA_ALIGN 128
72 /* vm_cpu_data0 for the boot cpu before kmem is initialized */
74 #if L2CACHE_ALIGN_MAX <= MAX_PRAGMA_ALIGN
75 #pragma align L2CACHE_ALIGN_MAX(vm_cpu_data0)
77 #pragma align MAX_PRAGMA_ALIGN(vm_cpu_data0)
79 char vm_cpu_data0
[VM_CPU_DATA_PADSIZE
];
82 * number of page colors equivalent to reqested color in page_get routines.
83 * If set, keeps large pages intact longer and keeps MPO allocation
84 * from the local mnode in favor of acquiring the 'correct' page color from
85 * a demoted large page or from a remote mnode.
90 * color equivalency mask for each page size.
91 * Mask is computed based on cpu L2$ way sizes and colorequiv global.
92 * High 4 bits determine the number of high order bits of the color to ignore.
93 * Low 4 bits determines number of low order bits of color to ignore (it's only
94 * relevant for hashed index based page coloring).
96 uchar_t colorequivszc
[MMU_PAGE_SIZES
];
99 * if set, specifies the percentage of large pages that are free from within
100 * a large page region before attempting to lock those pages for
101 * page_get_contig_pages processing.
103 * Should be turned on when kpr is available when page_trylock_contig_pages
104 * can be more selective.
110 * Limit page get contig page search based on failure cnts in pgcpfailcnt[].
111 * Enabled by default via pgcplimitsearch.
113 * pgcpfailcnt[] is bounded by PGCPFAILMAX (>= 1/2 of installed
114 * memory). When reached, pgcpfailcnt[] is reset to 1/2 of this upper
115 * bound. This upper bound range guarantees:
116 * - all large page 'slots' will be searched over time
117 * - the minimum (1) large page candidates considered on each pgcp call
118 * - count doesn't wrap around to 0
120 pgcnt_t pgcpfailcnt
[MMU_PAGE_SIZES
];
121 int pgcplimitsearch
= 1;
123 #define PGCPFAILMAX (1 << (highbit(physinstalled) - 1))
124 #define SETPGCPFAILCNT(szc) \
125 if (++pgcpfailcnt[szc] >= PGCPFAILMAX) \
126 pgcpfailcnt[szc] = PGCPFAILMAX / 2;
129 struct vmm_vmstats_str vmm_vmstats
;
131 #endif /* VM_STATS */
136 /* enable page_get_contig_pages */
140 int pg_contig_disable
;
141 int pg_lpgcreate_nocage
= LPGCREATE
;
144 * page_freelist_split pfn flag to signify no lo or hi pfn requirement.
148 /* Flags involved in promotion and demotion routines */
149 #define PC_FREE 0x1 /* put page on freelist */
150 #define PC_ALLOC 0x2 /* return page for allocation */
153 * Flag for page_demote to be used with PC_FREE to denote that we don't care
154 * what the color is as the color parameter to the function is ignored.
156 #define PC_NO_COLOR (-1)
158 /* mtype value for page_promote to use when mtype does not matter */
159 #define PC_MTYPE_ANY (-1)
162 * page counters candidates info
163 * See page_ctrs_cands comment below for more details.
164 * fields are as follows:
165 * pcc_pages_free: # pages which freelist coalesce can create
166 * pcc_color_free: pointer to page free counts per color
168 typedef struct pcc_info
{
169 pgcnt_t pcc_pages_free
;
170 pgcnt_t
*pcc_color_free
;
175 * On big machines it can take a long time to check page_counters
176 * arrays. page_ctrs_cands is a summary array whose elements are a dynamically
177 * updated sum of all elements of the corresponding page_counters arrays.
178 * page_freelist_coalesce() searches page_counters only if an appropriate
179 * element of page_ctrs_cands array is greater than 0.
181 * page_ctrs_cands is indexed by mutex (i), region (r), mnode (m), mrange (g)
183 pcc_info_t
**page_ctrs_cands
[NPC_MUTEX
][MMU_PAGE_SIZES
];
186 * Return in val the total number of free pages which can be created
187 * for the given mnode (m), mrange (g), and region size (r)
189 #define PGCTRS_CANDS_GETVALUE(m, g, r, val) { \
192 for (i = 0; i < NPC_MUTEX; i++) { \
193 val += page_ctrs_cands[i][(r)][(m)][(g)].pcc_pages_free; \
198 * Return in val the total number of free pages which can be created
199 * for the given mnode (m), mrange (g), region size (r), and color (c)
201 #define PGCTRS_CANDS_GETVALUECOLOR(m, g, r, c, val) { \
204 ASSERT((c) < PAGE_GET_PAGECOLORS(r)); \
205 for (i = 0; i < NPC_MUTEX; i++) { \
207 page_ctrs_cands[i][(r)][(m)][(g)].pcc_color_free[(c)]; \
212 * We can only allow a single thread to update a counter within the physical
213 * range of the largest supported page size. That is the finest granularity
214 * possible since the counter values are dependent on each other
215 * as you move accross region sizes. PP_CTR_LOCK_INDX is used to determine the
216 * ctr_mutex lock index for a particular physical range.
218 static kmutex_t
*ctr_mutex
[NPC_MUTEX
];
220 #define PP_CTR_LOCK_INDX(pp) \
221 (((pp)->p_pagenum >> \
222 (PAGE_BSZS_SHIFT(mmu_page_sizes - 1))) & (NPC_MUTEX - 1))
224 #define INVALID_COLOR 0xffffffff
225 #define INVALID_MASK 0xffffffff
228 * Local functions prototypes.
231 void page_ctr_add(int, int, page_t
*, int);
232 void page_ctr_add_internal(int, int, page_t
*, int);
233 void page_ctr_sub(int, int, page_t
*, int);
234 void page_ctr_sub_internal(int, int, page_t
*, int);
235 void page_freelist_lock(int);
236 void page_freelist_unlock(int);
237 page_t
*page_promote(int, pfn_t
, uchar_t
, int, int);
238 page_t
*page_demote(int, pfn_t
, pfn_t
, uchar_t
, uchar_t
, int, int);
239 page_t
*page_freelist_split(uchar_t
,
240 uint_t
, int, int, pfn_t
, pfn_t
, page_list_walker_t
*);
241 page_t
*page_get_mnode_cachelist(uint_t
, uint_t
, int, int);
242 static int page_trylock_cons(page_t
*pp
, se_t se
);
245 * The page_counters array below is used to keep track of free contiguous
246 * physical memory. A hw_page_map_t will be allocated per mnode per szc.
247 * This contains an array of counters, the size of the array, a shift value
248 * used to convert a pagenum into a counter array index or vice versa, as
249 * well as a cache of the last successful index to be promoted to a larger
250 * page size. As an optimization, we keep track of the last successful index
251 * to be promoted per page color for the given size region, and this is
252 * allocated dynamically based upon the number of colors for a given
255 * Conceptually, the page counters are represented as:
257 * page_counters[region_size][mnode]
259 * region_size: size code of a candidate larger page made up
260 * of contiguous free smaller pages.
262 * page_counters[region_size][mnode].hpm_counters[index]:
263 * represents how many (region_size - 1) pages either
264 * exist or can be created within the given index range.
266 * Let's look at a sparc example:
267 * If we want to create a free 512k page, we look at region_size 2
268 * for the mnode we want. We calculate the index and look at a specific
269 * hpm_counters location. If we see 8 (FULL_REGION_CNT on sparc) at
270 * this location, it means that 8 64k pages either exist or can be created
271 * from 8K pages in order to make a single free 512k page at the given
272 * index. Note that when a region is full, it will contribute to the
273 * counts in the region above it. Thus we will not know what page
274 * size the free pages will be which can be promoted to this new free
275 * page unless we look at all regions below the current region.
279 * Note: hpmctr_t is defined in platform vm_dep.h
280 * hw_page_map_t contains all the information needed for the page_counters
281 * logic. The fields are as follows:
283 * hpm_counters: dynamically allocated array to hold counter data
284 * hpm_entries: entries in hpm_counters
285 * hpm_shift: shift for pnum/array index conv
286 * hpm_base: PFN mapped to counter index 0
287 * hpm_color_current: last index in counter array for this color at
288 * which we successfully created a large page
290 typedef struct hw_page_map
{
291 hpmctr_t
*hpm_counters
;
295 size_t *hpm_color_current
[MAX_MNODE_MRANGES
];
302 * Element zero is not used, but is allocated for convenience.
304 static hw_page_map_t
*page_counters
[MMU_PAGE_SIZES
];
307 * Cached value of MNODE_RANGE_CNT(mnode).
308 * This is a function call in x86.
310 static int mnode_nranges
[MAX_MEM_NODES
];
311 static int mnode_maxmrange
[MAX_MEM_NODES
];
314 * The following macros are convenient ways to get access to the individual
315 * elements of the page_counters arrays. They can be used on both
316 * the left side and right side of equations.
318 #define PAGE_COUNTERS(mnode, rg_szc, idx) \
319 (page_counters[(rg_szc)][(mnode)].hpm_counters[(idx)])
321 #define PAGE_COUNTERS_COUNTERS(mnode, rg_szc) \
322 (page_counters[(rg_szc)][(mnode)].hpm_counters)
324 #define PAGE_COUNTERS_SHIFT(mnode, rg_szc) \
325 (page_counters[(rg_szc)][(mnode)].hpm_shift)
327 #define PAGE_COUNTERS_ENTRIES(mnode, rg_szc) \
328 (page_counters[(rg_szc)][(mnode)].hpm_entries)
330 #define PAGE_COUNTERS_BASE(mnode, rg_szc) \
331 (page_counters[(rg_szc)][(mnode)].hpm_base)
333 #define PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode, rg_szc, g) \
334 (page_counters[(rg_szc)][(mnode)].hpm_color_current[(g)])
336 #define PAGE_COUNTERS_CURRENT_COLOR(mnode, rg_szc, color, mrange) \
337 (page_counters[(rg_szc)][(mnode)]. \
338 hpm_color_current[(mrange)][(color)])
340 #define PNUM_TO_IDX(mnode, rg_szc, pnum) \
341 (((pnum) - PAGE_COUNTERS_BASE((mnode), (rg_szc))) >> \
342 PAGE_COUNTERS_SHIFT((mnode), (rg_szc)))
344 #define IDX_TO_PNUM(mnode, rg_szc, index) \
345 (PAGE_COUNTERS_BASE((mnode), (rg_szc)) + \
346 ((index) << PAGE_COUNTERS_SHIFT((mnode), (rg_szc))))
349 * Protects the hpm_counters and hpm_color_current memory from changing while
350 * looking at page counters information.
351 * Grab the write lock to modify what these fields point at.
352 * Grab the read lock to prevent any pointers from changing.
353 * The write lock can not be held during memory allocation due to a possible
354 * recursion deadlock with trying to grab the read lock while the
355 * write lock is already held.
357 krwlock_t page_ctrs_rwlock
[MAX_MEM_NODES
];
361 * initialize cpu_vm_data to point at cache aligned vm_cpu_data_t.
364 cpu_vm_data_init(struct cpu
*cp
)
367 cp
->cpu_vm_data
= (void *)&vm_cpu_data0
;
373 align
= (L2CACHE_ALIGN
) ? L2CACHE_ALIGN
: L2CACHE_ALIGN_MAX
;
374 sz
= P2ROUNDUP(sizeof (vm_cpu_data_t
), align
) + align
;
375 kmptr
= kmem_zalloc(sz
, KM_SLEEP
);
376 cp
->cpu_vm_data
= (void *) P2ROUNDUP((uintptr_t)kmptr
, align
);
377 ((vm_cpu_data_t
*)cp
->cpu_vm_data
)->vc_kmptr
= kmptr
;
378 ((vm_cpu_data_t
*)cp
->cpu_vm_data
)->vc_kmsize
= sz
;
386 cpu_vm_data_destroy(struct cpu
*cp
)
388 if (cp
->cpu_seqid
&& cp
->cpu_vm_data
) {
390 kmem_free(((vm_cpu_data_t
*)cp
->cpu_vm_data
)->vc_kmptr
,
391 ((vm_cpu_data_t
*)cp
->cpu_vm_data
)->vc_kmsize
);
393 cp
->cpu_vm_data
= NULL
;
398 * page size to page size code
401 page_szc(size_t pagesize
)
405 while (hw_page_array
[i
].hp_size
) {
406 if (pagesize
== hw_page_array
[i
].hp_size
)
414 * page size to page size code with the restriction that it be a supported
415 * user page size. If it's not a supported user page size, -1 will be returned.
418 page_szc_user_filtered(size_t pagesize
)
420 int szc
= page_szc(pagesize
);
421 if ((szc
!= -1) && (SZC_2_USERSZC(szc
) != -1)) {
428 * Return how many page sizes are available for the user to use. This is
429 * what the hardware supports and not based upon how the OS implements the
430 * support of different page sizes.
432 * If legacy is non-zero, return the number of pagesizes available to legacy
433 * applications. The number of legacy page sizes might be less than the
434 * exported user page sizes. This is to prevent legacy applications that
435 * use the largest page size returned from getpagesizes(3c) from inadvertantly
436 * using the 'new' large pagesizes.
439 page_num_user_pagesizes(int legacy
)
442 return (mmu_legacy_page_sizes
);
443 return (mmu_exported_page_sizes
);
447 page_num_pagesizes(void)
449 return (mmu_page_sizes
);
453 * returns the count of the number of base pagesize pages associated with szc
456 page_get_pagecnt(uint_t szc
)
458 if (szc
>= mmu_page_sizes
)
459 panic("page_get_pagecnt: out of range %d", szc
);
460 return (hw_page_array
[szc
].hp_pgcnt
);
464 page_get_pagesize(uint_t szc
)
466 if (szc
>= mmu_page_sizes
)
467 panic("page_get_pagesize: out of range %d", szc
);
468 return (hw_page_array
[szc
].hp_size
);
472 * Return the size of a page based upon the index passed in. An index of
473 * zero refers to the smallest page size in the system, and as index increases
474 * it refers to the next larger supported page size in the system.
475 * Note that szc and userszc may not be the same due to unsupported szc's on
479 page_get_user_pagesize(uint_t userszc
)
481 uint_t szc
= USERSZC_2_SZC(userszc
);
483 if (szc
>= mmu_page_sizes
)
484 panic("page_get_user_pagesize: out of range %d", szc
);
485 return (hw_page_array
[szc
].hp_size
);
489 page_get_shift(uint_t szc
)
491 if (szc
>= mmu_page_sizes
)
492 panic("page_get_shift: out of range %d", szc
);
493 return (PAGE_GET_SHIFT(szc
));
497 page_get_pagecolors(uint_t szc
)
499 if (szc
>= mmu_page_sizes
)
500 panic("page_get_pagecolors: out of range %d", szc
);
501 return (PAGE_GET_PAGECOLORS(szc
));
505 * this assigns the desired equivalent color after a split
508 page_correct_color(uchar_t szc
, uchar_t nszc
, uint_t color
,
509 uint_t ncolor
, uint_t ceq_mask
)
512 ASSERT(szc
< mmu_page_sizes
);
513 ASSERT(color
< PAGE_GET_PAGECOLORS(szc
));
514 ASSERT(ncolor
< PAGE_GET_PAGECOLORS(nszc
));
517 ncolor
= PAGE_CONVERT_COLOR(ncolor
, szc
, nszc
);
518 return (color
| (ncolor
& ~ceq_mask
));
522 * The interleaved_mnodes flag is set when mnodes overlap in
523 * the physbase..physmax range, but have disjoint slices.
524 * In this case hpm_counters is shared by all mnodes.
525 * This flag is set dynamically by the platform.
527 int interleaved_mnodes
= 0;
530 * Called by startup().
531 * Size up the per page size free list counters based on physmax
532 * of each node and max_mem_nodes.
534 * If interleaved_mnodes is set we need to find the first mnode that
535 * exists. hpm_counters for the first mnode will then be shared by
536 * all other mnodes. If interleaved_mnodes is not set, just set
537 * first=mnode each time. That means there will be no sharing.
542 int r
; /* region size */
544 int firstmn
; /* first mnode that exists */
550 pgcnt_t colors_per_szc
[MMU_PAGE_SIZES
];
553 * We need to determine how many page colors there are for each
554 * page size in order to allocate memory for any color specific
557 for (i
= 0; i
< mmu_page_sizes
; i
++) {
558 colors_per_szc
[i
] = PAGE_GET_PAGECOLORS(i
);
561 for (firstmn
= -1, mnode
= 0; mnode
< max_mem_nodes
; mnode
++) {
567 if (mem_node_config
[mnode
].exists
== 0)
570 HPM_COUNTERS_LIMITS(mnode
, physbase
, physmax
, firstmn
);
571 nranges
= MNODE_RANGE_CNT(mnode
);
572 mnode_nranges
[mnode
] = nranges
;
573 mnode_maxmrange
[mnode
] = MNODE_MAX_MRANGE(mnode
);
576 * determine size needed for page counter arrays with
577 * base aligned to large page size.
579 for (r
= 1; r
< mmu_page_sizes
; r
++) {
580 /* add in space for hpm_color_current */
581 ctrs_sz
+= sizeof (size_t) *
582 colors_per_szc
[r
] * nranges
;
584 if (firstmn
!= mnode
)
587 /* add in space for hpm_counters */
588 r_align
= page_get_pagecnt(r
);
590 r_base
&= ~(r_align
- 1);
591 r_pgcnt
= howmany(physmax
- r_base
+ 1, r_align
);
594 * Round up to always allocate on pointer sized
597 ctrs_sz
+= P2ROUNDUP((r_pgcnt
* sizeof (hpmctr_t
)),
598 sizeof (hpmctr_t
*));
602 for (r
= 1; r
< mmu_page_sizes
; r
++) {
603 ctrs_sz
+= (max_mem_nodes
* sizeof (hw_page_map_t
));
606 /* add in space for page_ctrs_cands and pcc_color_free */
607 ctrs_sz
+= sizeof (pcc_info_t
*) * max_mem_nodes
*
608 mmu_page_sizes
* NPC_MUTEX
;
610 for (mnode
= 0; mnode
< max_mem_nodes
; mnode
++) {
612 if (mem_node_config
[mnode
].exists
== 0)
615 nranges
= mnode_nranges
[mnode
];
616 ctrs_sz
+= sizeof (pcc_info_t
) * nranges
*
617 mmu_page_sizes
* NPC_MUTEX
;
618 for (r
= 1; r
< mmu_page_sizes
; r
++) {
619 ctrs_sz
+= sizeof (pgcnt_t
) * nranges
*
620 colors_per_szc
[r
] * NPC_MUTEX
;
625 ctrs_sz
+= (max_mem_nodes
* NPC_MUTEX
* sizeof (kmutex_t
));
627 /* size for page list counts */
631 * add some slop for roundups. page_ctrs_alloc will roundup the start
632 * address of the counters to ecache_alignsize boundary for every
635 return (ctrs_sz
+ max_mem_nodes
* L2CACHE_ALIGN
);
639 page_ctrs_alloc(caddr_t alloc_base
)
643 int r
; /* region size */
645 int firstmn
; /* first mnode that exists */
648 pgcnt_t colors_per_szc
[MMU_PAGE_SIZES
];
651 * We need to determine how many page colors there are for each
652 * page size in order to allocate memory for any color specific
655 for (i
= 0; i
< mmu_page_sizes
; i
++) {
656 colors_per_szc
[i
] = PAGE_GET_PAGECOLORS(i
);
659 for (r
= 1; r
< mmu_page_sizes
; r
++) {
660 page_counters
[r
] = (hw_page_map_t
*)alloc_base
;
661 alloc_base
+= (max_mem_nodes
* sizeof (hw_page_map_t
));
664 /* page_ctrs_cands and pcc_color_free array */
665 for (i
= 0; i
< NPC_MUTEX
; i
++) {
666 for (r
= 1; r
< mmu_page_sizes
; r
++) {
668 page_ctrs_cands
[i
][r
] = (pcc_info_t
**)alloc_base
;
669 alloc_base
+= sizeof (pcc_info_t
*) * max_mem_nodes
;
671 for (mnode
= 0; mnode
< max_mem_nodes
; mnode
++) {
674 if (mem_node_config
[mnode
].exists
== 0)
677 nranges
= mnode_nranges
[mnode
];
679 pi
= (pcc_info_t
*)alloc_base
;
680 alloc_base
+= sizeof (pcc_info_t
) * nranges
;
681 page_ctrs_cands
[i
][r
][mnode
] = pi
;
683 for (mrange
= 0; mrange
< nranges
; mrange
++) {
685 (pgcnt_t
*)alloc_base
;
686 alloc_base
+= sizeof (pgcnt_t
) *
695 for (i
= 0; i
< NPC_MUTEX
; i
++) {
696 ctr_mutex
[i
] = (kmutex_t
*)alloc_base
;
697 alloc_base
+= (max_mem_nodes
* sizeof (kmutex_t
));
700 /* initialize page list counts */
701 PLCNT_INIT(alloc_base
);
703 for (firstmn
= -1, mnode
= 0; mnode
< max_mem_nodes
; mnode
++) {
709 int nranges
= mnode_nranges
[mnode
];
711 if (mem_node_config
[mnode
].exists
== 0)
714 HPM_COUNTERS_LIMITS(mnode
, physbase
, physmax
, firstmn
);
716 for (r
= 1; r
< mmu_page_sizes
; r
++) {
718 * the page_counters base has to be aligned to the
719 * page count of page size code r otherwise the counts
720 * will cross large page boundaries.
722 r_align
= page_get_pagecnt(r
);
724 /* base needs to be aligned - lower to aligned value */
725 r_base
&= ~(r_align
- 1);
726 r_pgcnt
= howmany(physmax
- r_base
+ 1, r_align
);
727 r_shift
= PAGE_BSZS_SHIFT(r
);
729 PAGE_COUNTERS_SHIFT(mnode
, r
) = r_shift
;
730 PAGE_COUNTERS_ENTRIES(mnode
, r
) = r_pgcnt
;
731 PAGE_COUNTERS_BASE(mnode
, r
) = r_base
;
732 for (mrange
= 0; mrange
< nranges
; mrange
++) {
733 PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode
,
734 r
, mrange
) = (size_t *)alloc_base
;
735 alloc_base
+= sizeof (size_t) *
738 for (i
= 0; i
< colors_per_szc
[r
]; i
++) {
739 uint_t color_mask
= colors_per_szc
[r
] - 1;
740 pfn_t pfnum
= r_base
;
743 MEM_NODE_ITERATOR_DECL(it
);
745 MEM_NODE_ITERATOR_INIT(pfnum
, mnode
, r
, &it
);
746 if (pfnum
== (pfn_t
)-1) {
749 PAGE_NEXT_PFN_FOR_COLOR(pfnum
, r
, i
,
750 color_mask
, color_mask
, &it
);
751 idx
= PNUM_TO_IDX(mnode
, r
, pfnum
);
752 idx
= (idx
>= r_pgcnt
) ? 0 : idx
;
754 for (mrange
= 0; mrange
< nranges
; mrange
++) {
755 PAGE_COUNTERS_CURRENT_COLOR(mnode
,
760 /* hpm_counters may be shared by all mnodes */
761 if (firstmn
== mnode
) {
762 PAGE_COUNTERS_COUNTERS(mnode
, r
) =
763 (hpmctr_t
*)alloc_base
;
765 P2ROUNDUP((sizeof (hpmctr_t
) * r_pgcnt
),
766 sizeof (hpmctr_t
*));
768 PAGE_COUNTERS_COUNTERS(mnode
, r
) =
769 PAGE_COUNTERS_COUNTERS(firstmn
, r
);
773 * Verify that PNUM_TO_IDX and IDX_TO_PNUM
774 * satisfy the identity requirement.
775 * We should be able to go from one to the other
776 * and get consistent values.
778 ASSERT(PNUM_TO_IDX(mnode
, r
,
779 (IDX_TO_PNUM(mnode
, r
, 0))) == 0);
780 ASSERT(IDX_TO_PNUM(mnode
, r
,
781 (PNUM_TO_IDX(mnode
, r
, r_base
))) == r_base
);
784 * Roundup the start address of the page_counters to
785 * cache aligned boundary for every memory node.
786 * page_ctrs_sz() has added some slop for these roundups.
788 alloc_base
= (caddr_t
)P2ROUNDUP((uintptr_t)alloc_base
,
792 /* Initialize other page counter specific data structures. */
793 for (mnode
= 0; mnode
< MAX_MEM_NODES
; mnode
++) {
794 rw_init(&page_ctrs_rwlock
[mnode
], NULL
, RW_DEFAULT
, NULL
);
801 * Functions to adjust region counters for each size free list.
802 * Caller is responsible to acquire the ctr_mutex lock if necessary and
803 * thus can be called during startup without locks.
807 page_ctr_add_internal(int mnode
, int mtype
, page_t
*pp
, int flags
)
809 ssize_t r
; /* region size */
814 ASSERT(mnode
== PP_2_MEM_NODE(pp
));
815 ASSERT(mtype
== PP_2_MTYPE(pp
));
817 ASSERT(pp
->p_szc
< mmu_page_sizes
);
819 PLCNT_INCR(pp
, mnode
, mtype
, pp
->p_szc
, flags
);
821 /* no counter update needed for largest page size */
822 if (pp
->p_szc
>= mmu_page_sizes
- 1) {
827 pfnum
= pp
->p_pagenum
;
828 lckidx
= PP_CTR_LOCK_INDX(pp
);
831 * Increment the count of free pages for the current
832 * region. Continue looping up in region size incrementing
833 * count if the preceeding region is full.
835 while (r
< mmu_page_sizes
) {
836 idx
= PNUM_TO_IDX(mnode
, r
, pfnum
);
838 ASSERT(idx
< PAGE_COUNTERS_ENTRIES(mnode
, r
));
839 ASSERT(PAGE_COUNTERS(mnode
, r
, idx
) < FULL_REGION_CNT(r
));
841 if (++PAGE_COUNTERS(mnode
, r
, idx
) != FULL_REGION_CNT(r
)) {
844 int root_mtype
= PP_2_MTYPE(PP_GROUPLEADER(pp
, r
));
845 pcc_info_t
*cand
= &page_ctrs_cands
[lckidx
][r
][mnode
]
846 [MTYPE_2_MRANGE(mnode
, root_mtype
)];
848 cand
->pcc_pages_free
++;
849 cand
->pcc_color_free
[PP_2_BIN_SZC(pp
, r
)]++;
856 page_ctr_add(int mnode
, int mtype
, page_t
*pp
, int flags
)
858 int lckidx
= PP_CTR_LOCK_INDX(pp
);
859 kmutex_t
*lock
= &ctr_mutex
[lckidx
][mnode
];
862 page_ctr_add_internal(mnode
, mtype
, pp
, flags
);
867 page_ctr_sub_internal(int mnode
, int mtype
, page_t
*pp
, int flags
)
870 ssize_t r
; /* region size */
874 ASSERT(mnode
== PP_2_MEM_NODE(pp
));
875 ASSERT(mtype
== PP_2_MTYPE(pp
));
877 ASSERT(pp
->p_szc
< mmu_page_sizes
);
879 PLCNT_DECR(pp
, mnode
, mtype
, pp
->p_szc
, flags
);
881 /* no counter update needed for largest page size */
882 if (pp
->p_szc
>= mmu_page_sizes
- 1) {
887 pfnum
= pp
->p_pagenum
;
888 lckidx
= PP_CTR_LOCK_INDX(pp
);
891 * Decrement the count of free pages for the current
892 * region. Continue looping up in region size decrementing
893 * count if the preceeding region was full.
895 while (r
< mmu_page_sizes
) {
896 idx
= PNUM_TO_IDX(mnode
, r
, pfnum
);
898 ASSERT(idx
< PAGE_COUNTERS_ENTRIES(mnode
, r
));
899 ASSERT(PAGE_COUNTERS(mnode
, r
, idx
) > 0);
901 if (--PAGE_COUNTERS(mnode
, r
, idx
) != FULL_REGION_CNT(r
) - 1) {
904 int root_mtype
= PP_2_MTYPE(PP_GROUPLEADER(pp
, r
));
905 pcc_info_t
*cand
= &page_ctrs_cands
[lckidx
][r
][mnode
]
906 [MTYPE_2_MRANGE(mnode
, root_mtype
)];
908 ASSERT(cand
->pcc_pages_free
!= 0);
909 ASSERT(cand
->pcc_color_free
[PP_2_BIN_SZC(pp
, r
)] != 0);
911 cand
->pcc_pages_free
--;
912 cand
->pcc_color_free
[PP_2_BIN_SZC(pp
, r
)]--;
919 page_ctr_sub(int mnode
, int mtype
, page_t
*pp
, int flags
)
921 int lckidx
= PP_CTR_LOCK_INDX(pp
);
922 kmutex_t
*lock
= &ctr_mutex
[lckidx
][mnode
];
925 page_ctr_sub_internal(mnode
, mtype
, pp
, flags
);
930 * Adjust page counters following a memory attach, since typically the
931 * size of the array needs to change, and the PFN to counter index
932 * mapping needs to change.
934 * It is possible this mnode did not exist at startup. In that case
935 * allocate pcc_info_t and pcc_color_free arrays. Also, allow for nranges
936 * to change (a theoretical possibility on x86), which means pcc_color_free
937 * arrays must be extended.
940 page_ctrs_adjust(int mnode
)
943 int r
; /* region size */
945 size_t pcsz
, old_csz
;
946 hpmctr_t
*new_ctr
, *old_ctr
;
947 pfn_t oldbase
, newbase
;
948 pfn_t physbase
, physmax
;
950 hpmctr_t
*ctr_cache
[MMU_PAGE_SIZES
];
951 size_t size_cache
[MMU_PAGE_SIZES
];
952 size_t *color_cache
[MMU_PAGE_SIZES
][MAX_MNODE_MRANGES
];
953 size_t *old_color_array
[MAX_MNODE_MRANGES
];
954 pgcnt_t colors_per_szc
[MMU_PAGE_SIZES
];
955 pcc_info_t
**cands_cache
;
956 pcc_info_t
*old_pi
, *pi
;
958 int nr
, old_nranges
, mrange
, nranges
= MNODE_RANGE_CNT(mnode
);
959 int cands_cache_nranges
;
960 int old_maxmrange
, new_maxmrange
;
964 cands_cache
= kmem_zalloc(sizeof (pcc_info_t
*) * NPC_MUTEX
*
965 MMU_PAGE_SIZES
, KM_NOSLEEP
);
966 if (cands_cache
== NULL
)
970 HPM_COUNTERS_LIMITS(mnode
, physbase
, physmax
, i
);
972 newbase
= physbase
& ~PC_BASE_ALIGN_MASK
;
973 npgs
= roundup(physmax
, PC_BASE_ALIGN
) - newbase
;
975 /* prepare to free non-null pointers on the way out */
976 cands_cache_nranges
= nranges
;
977 bzero(ctr_cache
, sizeof (ctr_cache
));
978 bzero(color_cache
, sizeof (color_cache
));
981 * We need to determine how many page colors there are for each
982 * page size in order to allocate memory for any color specific
985 for (r
= 0; r
< mmu_page_sizes
; r
++) {
986 colors_per_szc
[r
] = PAGE_GET_PAGECOLORS(r
);
990 * Preallocate all of the new hpm_counters arrays as we can't
991 * hold the page_ctrs_rwlock as a writer and allocate memory.
992 * If we can't allocate all of the arrays, undo our work so far
993 * and return failure.
995 for (r
= 1; r
< mmu_page_sizes
; r
++) {
996 pcsz
= npgs
>> PAGE_BSZS_SHIFT(r
);
997 size_cache
[r
] = pcsz
;
998 ctr_cache
[r
] = kmem_zalloc(pcsz
*
999 sizeof (hpmctr_t
), KM_NOSLEEP
);
1000 if (ctr_cache
[r
] == NULL
) {
1007 * Preallocate all of the new color current arrays as we can't
1008 * hold the page_ctrs_rwlock as a writer and allocate memory.
1009 * If we can't allocate all of the arrays, undo our work so far
1010 * and return failure.
1012 for (r
= 1; r
< mmu_page_sizes
; r
++) {
1013 for (mrange
= 0; mrange
< nranges
; mrange
++) {
1014 color_cache
[r
][mrange
] = kmem_zalloc(sizeof (size_t) *
1015 colors_per_szc
[r
], KM_NOSLEEP
);
1016 if (color_cache
[r
][mrange
] == NULL
) {
1024 * Preallocate all of the new pcc_info_t arrays as we can't
1025 * hold the page_ctrs_rwlock as a writer and allocate memory.
1026 * If we can't allocate all of the arrays, undo our work so far
1027 * and return failure.
1029 for (r
= 1; r
< mmu_page_sizes
; r
++) {
1030 for (i
= 0; i
< NPC_MUTEX
; i
++) {
1031 pi
= kmem_zalloc(nranges
* sizeof (pcc_info_t
),
1037 cands_cache
[i
* MMU_PAGE_SIZES
+ r
] = pi
;
1039 for (mrange
= 0; mrange
< nranges
; mrange
++, pi
++) {
1040 pgcntp
= kmem_zalloc(colors_per_szc
[r
] *
1041 sizeof (pgcnt_t
), KM_NOSLEEP
);
1042 if (pgcntp
== NULL
) {
1046 pi
->pcc_color_free
= pgcntp
;
1052 * Grab the write lock to prevent others from walking these arrays
1053 * while we are modifying them.
1055 PAGE_CTRS_WRITE_LOCK(mnode
);
1058 * For interleaved mnodes, find the first mnode
1059 * with valid page counters since the current
1060 * mnode may have just been added and not have
1061 * valid page counters.
1063 if (interleaved_mnodes
) {
1064 for (i
= 0; i
< max_mem_nodes
; i
++)
1065 if (PAGE_COUNTERS_COUNTERS(i
, 1) != NULL
)
1067 ASSERT(i
< max_mem_nodes
);
1072 old_nranges
= mnode_nranges
[mnode
];
1073 cands_cache_nranges
= old_nranges
;
1074 mnode_nranges
[mnode
] = nranges
;
1075 old_maxmrange
= mnode_maxmrange
[mnode
];
1076 mnode_maxmrange
[mnode
] = MNODE_MAX_MRANGE(mnode
);
1077 new_maxmrange
= mnode_maxmrange
[mnode
];
1079 for (r
= 1; r
< mmu_page_sizes
; r
++) {
1080 PAGE_COUNTERS_SHIFT(mnode
, r
) = PAGE_BSZS_SHIFT(r
);
1081 old_ctr
= PAGE_COUNTERS_COUNTERS(oldmnode
, r
);
1082 old_csz
= PAGE_COUNTERS_ENTRIES(oldmnode
, r
);
1083 oldbase
= PAGE_COUNTERS_BASE(oldmnode
, r
);
1084 old_npgs
= old_csz
<< PAGE_COUNTERS_SHIFT(oldmnode
, r
);
1085 for (mrange
= 0; mrange
< MAX_MNODE_MRANGES
; mrange
++) {
1086 old_color_array
[mrange
] =
1087 PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode
,
1091 pcsz
= npgs
>> PAGE_COUNTERS_SHIFT(mnode
, r
);
1092 new_ctr
= ctr_cache
[r
];
1093 ctr_cache
[r
] = NULL
;
1094 if (old_ctr
!= NULL
&&
1095 (oldbase
+ old_npgs
> newbase
) &&
1096 (newbase
+ npgs
> oldbase
)) {
1098 * Map the intersection of the old and new
1099 * counters into the new array.
1102 if (newbase
> oldbase
) {
1103 offset
= (newbase
- oldbase
) >>
1104 PAGE_COUNTERS_SHIFT(mnode
, r
);
1105 bcopy(old_ctr
+ offset
, new_ctr
,
1106 MIN(pcsz
, (old_csz
- offset
)) *
1109 offset
= (oldbase
- newbase
) >>
1110 PAGE_COUNTERS_SHIFT(mnode
, r
);
1111 bcopy(old_ctr
, new_ctr
+ offset
,
1112 MIN(pcsz
- offset
, old_csz
) *
1117 PAGE_COUNTERS_COUNTERS(mnode
, r
) = new_ctr
;
1118 PAGE_COUNTERS_ENTRIES(mnode
, r
) = pcsz
;
1119 PAGE_COUNTERS_BASE(mnode
, r
) = newbase
;
1121 /* update shared hpm_counters in other mnodes */
1122 if (interleaved_mnodes
) {
1123 for (i
= 0; i
< max_mem_nodes
; i
++) {
1125 (mem_node_config
[i
].exists
== 0))
1128 PAGE_COUNTERS_COUNTERS(i
, r
) == old_ctr
||
1129 PAGE_COUNTERS_COUNTERS(i
, r
) == NULL
);
1130 PAGE_COUNTERS_COUNTERS(i
, r
) = new_ctr
;
1131 PAGE_COUNTERS_ENTRIES(i
, r
) = pcsz
;
1132 PAGE_COUNTERS_BASE(i
, r
) = newbase
;
1136 for (mrange
= 0; mrange
< MAX_MNODE_MRANGES
; mrange
++) {
1137 PAGE_COUNTERS_CURRENT_COLOR_ARRAY(mnode
, r
, mrange
) =
1138 color_cache
[r
][mrange
];
1139 color_cache
[r
][mrange
] = NULL
;
1142 * for now, just reset on these events as it's probably
1143 * not worthwhile to try and optimize this.
1145 for (i
= 0; i
< colors_per_szc
[r
]; i
++) {
1146 uint_t color_mask
= colors_per_szc
[r
] - 1;
1147 int mlo
= interleaved_mnodes
? 0 : mnode
;
1148 int mhi
= interleaved_mnodes
? max_mem_nodes
:
1153 MEM_NODE_ITERATOR_DECL(it
);
1155 for (m
= mlo
; m
< mhi
; m
++) {
1156 if (mem_node_config
[m
].exists
== 0)
1159 MEM_NODE_ITERATOR_INIT(pfnum
, m
, r
, &it
);
1160 if (pfnum
== (pfn_t
)-1) {
1163 PAGE_NEXT_PFN_FOR_COLOR(pfnum
, r
, i
,
1164 color_mask
, color_mask
, &it
);
1165 idx
= PNUM_TO_IDX(m
, r
, pfnum
);
1166 idx
= (idx
< pcsz
) ? idx
: 0;
1168 for (mrange
= 0; mrange
< nranges
; mrange
++) {
1169 if (PAGE_COUNTERS_CURRENT_COLOR_ARRAY(m
,
1171 PAGE_COUNTERS_CURRENT_COLOR(m
,
1172 r
, i
, mrange
) = idx
;
1177 /* cache info for freeing out of the critical path */
1178 if ((caddr_t
)old_ctr
>= kernelheap
&&
1179 (caddr_t
)old_ctr
< ekernelheap
) {
1180 ctr_cache
[r
] = old_ctr
;
1181 size_cache
[r
] = old_csz
;
1183 for (mrange
= 0; mrange
< MAX_MNODE_MRANGES
; mrange
++) {
1184 size_t *tmp
= old_color_array
[mrange
];
1185 if ((caddr_t
)tmp
>= kernelheap
&&
1186 (caddr_t
)tmp
< ekernelheap
) {
1187 color_cache
[r
][mrange
] = tmp
;
1191 * Verify that PNUM_TO_IDX and IDX_TO_PNUM
1192 * satisfy the identity requirement.
1193 * We should be able to go from one to the other
1194 * and get consistent values.
1196 ASSERT(PNUM_TO_IDX(mnode
, r
,
1197 (IDX_TO_PNUM(mnode
, r
, 0))) == 0);
1198 ASSERT(IDX_TO_PNUM(mnode
, r
,
1199 (PNUM_TO_IDX(mnode
, r
, newbase
))) == newbase
);
1201 /* pcc_info_t and pcc_color_free */
1202 for (i
= 0; i
< NPC_MUTEX
; i
++) {
1204 pcc_info_t
*eold_pi
;
1206 pi
= cands_cache
[i
* MMU_PAGE_SIZES
+ r
];
1207 old_pi
= page_ctrs_cands
[i
][r
][mnode
];
1208 page_ctrs_cands
[i
][r
][mnode
] = pi
;
1209 cands_cache
[i
* MMU_PAGE_SIZES
+ r
] = old_pi
;
1211 /* preserve old pcc_color_free values, if any */
1216 * when/if x86 does DR, must account for
1217 * possible change in range index when
1218 * preserving pcc_info
1221 eold_pi
= &old_pi
[old_nranges
];
1222 if (new_maxmrange
> old_maxmrange
) {
1223 pi
+= new_maxmrange
- old_maxmrange
;
1224 } else if (new_maxmrange
< old_maxmrange
) {
1225 old_pi
+= old_maxmrange
- new_maxmrange
;
1227 for (; pi
< epi
&& old_pi
< eold_pi
; pi
++, old_pi
++) {
1228 pcc_info_t tmp
= *pi
;
1234 PAGE_CTRS_WRITE_UNLOCK(mnode
);
1237 * Now that we have dropped the write lock, it is safe to free all
1238 * of the memory we have cached above.
1239 * We come thru here to free memory when pre-alloc fails, and also to
1240 * free old pointers which were recorded while locked.
1243 for (r
= 1; r
< mmu_page_sizes
; r
++) {
1244 if (ctr_cache
[r
] != NULL
) {
1245 kmem_free(ctr_cache
[r
],
1246 size_cache
[r
] * sizeof (hpmctr_t
));
1248 for (mrange
= 0; mrange
< MAX_MNODE_MRANGES
; mrange
++) {
1249 if (color_cache
[r
][mrange
] != NULL
) {
1250 kmem_free(color_cache
[r
][mrange
],
1251 colors_per_szc
[r
] * sizeof (size_t));
1254 for (i
= 0; i
< NPC_MUTEX
; i
++) {
1255 pi
= cands_cache
[i
* MMU_PAGE_SIZES
+ r
];
1258 nr
= cands_cache_nranges
;
1259 for (mrange
= 0; mrange
< nr
; mrange
++, pi
++) {
1260 pgcntp
= pi
->pcc_color_free
;
1263 if ((caddr_t
)pgcntp
>= kernelheap
&&
1264 (caddr_t
)pgcntp
< ekernelheap
) {
1270 pi
= cands_cache
[i
* MMU_PAGE_SIZES
+ r
];
1271 if ((caddr_t
)pi
>= kernelheap
&&
1272 (caddr_t
)pi
< ekernelheap
) {
1273 kmem_free(pi
, nr
* sizeof (pcc_info_t
));
1278 kmem_free(cands_cache
,
1279 sizeof (pcc_info_t
*) * NPC_MUTEX
* MMU_PAGE_SIZES
);
1284 * Cleanup the hpm_counters field in the page counters
1288 page_ctrs_cleanup(void)
1290 int r
; /* region size */
1291 int i
; /* mnode index */
1294 * Get the page counters write lock while we are
1295 * setting the page hpm_counters field to NULL
1296 * for non-existent mnodes.
1298 for (i
= 0; i
< max_mem_nodes
; i
++) {
1299 PAGE_CTRS_WRITE_LOCK(i
);
1300 if (mem_node_config
[i
].exists
) {
1301 PAGE_CTRS_WRITE_UNLOCK(i
);
1304 for (r
= 1; r
< mmu_page_sizes
; r
++) {
1305 PAGE_COUNTERS_COUNTERS(i
, r
) = NULL
;
1307 PAGE_CTRS_WRITE_UNLOCK(i
);
1314 * confirm pp is a large page corresponding to szc
1317 chk_lpg(page_t
*pp
, uchar_t szc
)
1319 spgcnt_t npgs
= page_get_pagecnt(pp
->p_szc
);
1323 ASSERT(pp
->p_szc
== 0);
1324 ASSERT(pp
->p_next
== pp
);
1325 ASSERT(pp
->p_prev
== pp
);
1329 ASSERT(pp
->p_vpnext
== pp
|| pp
->p_vpnext
== NULL
);
1330 ASSERT(pp
->p_vpprev
== pp
|| pp
->p_vpprev
== NULL
);
1332 ASSERT(IS_P2ALIGNED(pp
->p_pagenum
, npgs
));
1333 ASSERT(pp
->p_pagenum
== (pp
->p_next
->p_pagenum
- 1));
1334 ASSERT(pp
->p_prev
->p_pagenum
== (pp
->p_pagenum
+ (npgs
- 1)));
1335 ASSERT(pp
->p_prev
== (pp
+ (npgs
- 1)));
1338 * Check list of pages.
1340 noreloc
= PP_ISNORELOC(pp
);
1343 ASSERT(pp
->p_pagenum
== pp
->p_next
->p_pagenum
- 1);
1344 ASSERT(pp
->p_next
== (pp
+ 1));
1346 ASSERT(pp
->p_szc
== szc
);
1347 ASSERT(PP_ISFREE(pp
));
1348 ASSERT(PP_ISAGED(pp
));
1349 ASSERT(pp
->p_vpnext
== pp
|| pp
->p_vpnext
== NULL
);
1350 ASSERT(pp
->p_vpprev
== pp
|| pp
->p_vpprev
== NULL
);
1351 ASSERT(pp
->p_vnode
== NULL
);
1352 ASSERT(PP_ISNORELOC(pp
) == noreloc
);
1360 page_freelist_lock(int mnode
)
1363 for (i
= 0; i
< NPC_MUTEX
; i
++) {
1364 mutex_enter(FPC_MUTEX(mnode
, i
));
1365 mutex_enter(CPC_MUTEX(mnode
, i
));
1370 page_freelist_unlock(int mnode
)
1373 for (i
= 0; i
< NPC_MUTEX
; i
++) {
1374 mutex_exit(FPC_MUTEX(mnode
, i
));
1375 mutex_exit(CPC_MUTEX(mnode
, i
));
1380 * add pp to the specified page list. Defaults to head of the page list
1381 * unless PG_LIST_TAIL is specified.
1384 page_list_add(page_t
*pp
, int flags
)
1391 ASSERT(PAGE_EXCL(pp
) || (flags
& PG_LIST_ISINIT
));
1392 ASSERT(PP_ISFREE(pp
));
1393 ASSERT(!hat_page_is_mapped(pp
));
1394 ASSERT(hat_page_getshare(pp
) == 0);
1397 * Large pages should be freed via page_list_add_pages().
1399 ASSERT(pp
->p_szc
== 0);
1402 * Don't need to lock the freelist first here
1403 * because the page isn't on the freelist yet.
1404 * This means p_szc can't change on us.
1408 mnode
= PP_2_MEM_NODE(pp
);
1409 mtype
= PP_2_MTYPE(pp
);
1411 if (flags
& PG_LIST_ISINIT
) {
1413 * PG_LIST_ISINIT is set during system startup (ie. single
1414 * threaded), add a page to the free list and add to the
1415 * the free region counters w/o any locking
1417 ppp
= &PAGE_FREELISTS(mnode
, 0, bin
, mtype
);
1419 /* inline version of page_add() */
1422 pp
->p_prev
= (*ppp
)->p_prev
;
1423 (*ppp
)->p_prev
= pp
;
1424 pp
->p_prev
->p_next
= pp
;
1428 page_ctr_add_internal(mnode
, mtype
, pp
, flags
);
1429 VM_STAT_ADD(vmm_vmstats
.pladd_free
[0]);
1431 pcm
= PC_BIN_MUTEX(mnode
, bin
, flags
);
1433 if (flags
& PG_FREE_LIST
) {
1434 VM_STAT_ADD(vmm_vmstats
.pladd_free
[0]);
1435 ASSERT(PP_ISAGED(pp
));
1436 ppp
= &PAGE_FREELISTS(mnode
, 0, bin
, mtype
);
1439 VM_STAT_ADD(vmm_vmstats
.pladd_cache
);
1440 ASSERT(pp
->p_vnode
);
1441 ASSERT((pp
->p_offset
& PAGEOFFSET
) == 0);
1442 ppp
= &PAGE_CACHELISTS(mnode
, bin
, mtype
);
1447 if (flags
& PG_LIST_TAIL
)
1448 *ppp
= (*ppp
)->p_next
;
1450 * Add counters before releasing pcm mutex to avoid a race with
1451 * page_freelist_coalesce and page_freelist_split.
1453 page_ctr_add(mnode
, mtype
, pp
, flags
);
1458 #if defined(__sparc)
1459 if (PP_ISNORELOC(pp
)) {
1460 kcage_freemem_add(1);
1464 * It is up to the caller to unlock the page!
1466 ASSERT(PAGE_EXCL(pp
) || (flags
& PG_LIST_ISINIT
));
1472 * This routine is only used by kcage_init during system startup.
1473 * It performs the function of page_list_sub/PP_SETNORELOC/page_list_add
1474 * without the overhead of taking locks and updating counters.
1477 page_list_noreloc_startup(page_t
*pp
)
1486 * If this is a large page on the freelist then
1487 * break it up into smaller pages.
1490 page_boot_demote(pp
);
1493 * Get list page is currently on.
1496 mnode
= PP_2_MEM_NODE(pp
);
1497 mtype
= PP_2_MTYPE(pp
);
1498 ASSERT(mtype
== MTYPE_RELOC
);
1499 ASSERT(pp
->p_szc
== 0);
1501 if (PP_ISAGED(pp
)) {
1502 ppp
= &PAGE_FREELISTS(mnode
, 0, bin
, mtype
);
1503 flags
|= PG_FREE_LIST
;
1505 ppp
= &PAGE_CACHELISTS(mnode
, bin
, mtype
);
1506 flags
|= PG_CACHE_LIST
;
1509 ASSERT(*ppp
!= NULL
);
1512 * Delete page from current list.
1515 *ppp
= pp
->p_next
; /* go to next page */
1517 *ppp
= NULL
; /* page list is gone */
1519 pp
->p_prev
->p_next
= pp
->p_next
;
1520 pp
->p_next
->p_prev
= pp
->p_prev
;
1524 * Decrement page counters
1526 page_ctr_sub_internal(mnode
, mtype
, pp
, flags
);
1529 * Set no reloc for cage initted pages.
1533 mtype
= PP_2_MTYPE(pp
);
1534 ASSERT(mtype
== MTYPE_NORELOC
);
1537 * Get new list for page.
1539 if (PP_ISAGED(pp
)) {
1540 ppp
= &PAGE_FREELISTS(mnode
, 0, bin
, mtype
);
1542 ppp
= &PAGE_CACHELISTS(mnode
, bin
, mtype
);
1546 * Insert page on new list.
1550 pp
->p_next
= pp
->p_prev
= pp
;
1553 pp
->p_prev
= (*ppp
)->p_prev
;
1554 (*ppp
)->p_prev
= pp
;
1555 pp
->p_prev
->p_next
= pp
;
1559 * Increment page counters
1561 page_ctr_add_internal(mnode
, mtype
, pp
, flags
);
1564 * Update cage freemem counter
1566 atomic_inc_ulong(&kcage_freemem
);
1572 page_list_noreloc_startup(page_t
*pp
)
1574 panic("page_list_noreloc_startup: should be here only for sparc");
1579 page_list_add_pages(page_t
*pp
, int flags
)
1583 uint_t bin
, mtype
, i
;
1586 /* default to freelist/head */
1587 ASSERT((flags
& (PG_CACHE_LIST
| PG_LIST_TAIL
)) == 0);
1589 CHK_LPG(pp
, pp
->p_szc
);
1590 VM_STAT_ADD(vmm_vmstats
.pladd_free
[pp
->p_szc
]);
1593 mnode
= PP_2_MEM_NODE(pp
);
1594 mtype
= PP_2_MTYPE(pp
);
1596 if (flags
& PG_LIST_ISINIT
) {
1597 ASSERT(pp
->p_szc
== mmu_page_sizes
- 1);
1598 page_vpadd(&PAGE_FREELISTS(mnode
, pp
->p_szc
, bin
, mtype
), pp
);
1599 ASSERT(!PP_ISNORELOC(pp
));
1600 PLCNT_INCR(pp
, mnode
, mtype
, pp
->p_szc
, flags
);
1603 ASSERT(pp
->p_szc
!= 0 && pp
->p_szc
< mmu_page_sizes
);
1605 pcm
= PC_BIN_MUTEX(mnode
, bin
, PG_FREE_LIST
);
1608 page_vpadd(&PAGE_FREELISTS(mnode
, pp
->p_szc
, bin
, mtype
), pp
);
1609 page_ctr_add(mnode
, mtype
, pp
, PG_FREE_LIST
);
1612 pgcnt
= page_get_pagecnt(pp
->p_szc
);
1613 #if defined(__sparc)
1614 if (PP_ISNORELOC(pp
))
1615 kcage_freemem_add(pgcnt
);
1617 for (i
= 0; i
< pgcnt
; i
++, pp
++)
1618 page_unlock_nocapture(pp
);
1623 * During boot, need to demote a large page to base
1624 * pagesize pages for seg_kmem for use in boot_alloc()
1627 page_boot_demote(page_t
*pp
)
1629 ASSERT(pp
->p_szc
!= 0);
1630 ASSERT(PP_ISFREE(pp
));
1631 ASSERT(PP_ISAGED(pp
));
1633 (void) page_demote(PP_2_MEM_NODE(pp
),
1634 PFN_BASE(pp
->p_pagenum
, pp
->p_szc
), 0, pp
->p_szc
, 0, PC_NO_COLOR
,
1637 ASSERT(PP_ISFREE(pp
));
1638 ASSERT(PP_ISAGED(pp
));
1639 ASSERT(pp
->p_szc
== 0);
1643 * Take a particular page off of whatever freelist the page
1644 * is claimed to be on.
1646 * NOTE: Only used for PAGESIZE pages.
1649 page_list_sub(page_t
*pp
, int flags
)
1657 ASSERT(PAGE_EXCL(pp
));
1658 ASSERT(PP_ISFREE(pp
));
1661 * The p_szc field can only be changed by page_promote()
1662 * and page_demote(). Only free pages can be promoted and
1663 * demoted and the free list MUST be locked during these
1664 * operations. So to prevent a race in page_list_sub()
1665 * between computing which bin of the freelist lock to
1666 * grab and actually grabing the lock we check again that
1667 * the bin we locked is still the correct one. Notice that
1668 * the p_szc field could have actually changed on us but
1669 * if the bin happens to still be the same we are safe.
1673 mnode
= PP_2_MEM_NODE(pp
);
1674 pcm
= PC_BIN_MUTEX(mnode
, bin
, flags
);
1676 if (PP_2_BIN(pp
) != bin
) {
1680 mtype
= PP_2_MTYPE(pp
);
1682 if (flags
& PG_FREE_LIST
) {
1683 VM_STAT_ADD(vmm_vmstats
.plsub_free
[0]);
1684 ASSERT(PP_ISAGED(pp
));
1685 ppp
= &PAGE_FREELISTS(mnode
, pp
->p_szc
, bin
, mtype
);
1687 VM_STAT_ADD(vmm_vmstats
.plsub_cache
);
1688 ASSERT(!PP_ISAGED(pp
));
1689 ppp
= &PAGE_CACHELISTS(mnode
, bin
, mtype
);
1693 * Common PAGESIZE case.
1695 * Note that we locked the freelist. This prevents
1696 * any page promotion/demotion operations. Therefore
1697 * the p_szc will not change until we drop pcm mutex.
1699 if (pp
->p_szc
== 0) {
1702 * Subtract counters before releasing pcm mutex
1703 * to avoid race with page_freelist_coalesce.
1705 page_ctr_sub(mnode
, mtype
, pp
, flags
);
1708 #if defined(__sparc)
1709 if (PP_ISNORELOC(pp
)) {
1710 kcage_freemem_sub(1);
1717 * Large pages on the cache list are not supported.
1719 if (flags
& PG_CACHE_LIST
)
1720 panic("page_list_sub: large page on cachelist");
1725 * Somebody wants this particular page which is part
1726 * of a large page. In this case we just demote the page
1727 * if it's on the freelist.
1729 * We have to drop pcm before locking the entire freelist.
1730 * Once we have re-locked the freelist check to make sure
1731 * the page hasn't already been demoted or completely
1735 page_freelist_lock(mnode
);
1736 if (pp
->p_szc
!= 0) {
1738 * Large page is on freelist.
1740 (void) page_demote(mnode
, PFN_BASE(pp
->p_pagenum
, pp
->p_szc
),
1741 0, pp
->p_szc
, 0, PC_NO_COLOR
, PC_FREE
);
1743 ASSERT(PP_ISFREE(pp
));
1744 ASSERT(PP_ISAGED(pp
));
1745 ASSERT(pp
->p_szc
== 0);
1748 * Subtract counters before releasing pcm mutex
1749 * to avoid race with page_freelist_coalesce.
1752 mtype
= PP_2_MTYPE(pp
);
1753 ppp
= &PAGE_FREELISTS(mnode
, pp
->p_szc
, bin
, mtype
);
1756 page_ctr_sub(mnode
, mtype
, pp
, flags
);
1757 page_freelist_unlock(mnode
);
1759 #if defined(__sparc)
1760 if (PP_ISNORELOC(pp
)) {
1761 kcage_freemem_sub(1);
1767 page_list_sub_pages(page_t
*pp
, uint_t szc
)
1773 ASSERT(PAGE_EXCL(pp
));
1774 ASSERT(PP_ISFREE(pp
));
1775 ASSERT(PP_ISAGED(pp
));
1778 * See comment in page_list_sub().
1782 mnode
= PP_2_MEM_NODE(pp
);
1783 pcm
= PC_BIN_MUTEX(mnode
, bin
, PG_FREE_LIST
);
1785 if (PP_2_BIN(pp
) != bin
) {
1791 * If we're called with a page larger than szc or it got
1792 * promoted above szc before we locked the freelist then
1793 * drop pcm and re-lock entire freelist. If page still larger
1794 * than szc then demote it.
1796 if (pp
->p_szc
> szc
) {
1799 page_freelist_lock(mnode
);
1800 if (pp
->p_szc
> szc
) {
1801 VM_STAT_ADD(vmm_vmstats
.plsubpages_szcbig
);
1802 (void) page_demote(mnode
,
1803 PFN_BASE(pp
->p_pagenum
, pp
->p_szc
), 0,
1804 pp
->p_szc
, szc
, PC_NO_COLOR
, PC_FREE
);
1808 ASSERT(PP_ISFREE(pp
));
1809 ASSERT(PP_ISAGED(pp
));
1810 ASSERT(pp
->p_szc
<= szc
);
1811 ASSERT(pp
== PP_PAGEROOT(pp
));
1813 VM_STAT_ADD(vmm_vmstats
.plsub_free
[pp
->p_szc
]);
1815 mtype
= PP_2_MTYPE(pp
);
1816 if (pp
->p_szc
!= 0) {
1817 page_vpsub(&PAGE_FREELISTS(mnode
, pp
->p_szc
, bin
, mtype
), pp
);
1818 CHK_LPG(pp
, pp
->p_szc
);
1820 VM_STAT_ADD(vmm_vmstats
.plsubpages_szc0
);
1821 page_sub(&PAGE_FREELISTS(mnode
, pp
->p_szc
, bin
, mtype
), pp
);
1823 page_ctr_sub(mnode
, mtype
, pp
, PG_FREE_LIST
);
1828 page_freelist_unlock(mnode
);
1831 #if defined(__sparc)
1832 if (PP_ISNORELOC(pp
)) {
1835 pgcnt
= page_get_pagecnt(pp
->p_szc
);
1836 kcage_freemem_sub(pgcnt
);
1842 * Add the page to the front of a linked list of pages
1843 * using the p_next & p_prev pointers for the list.
1844 * The caller is responsible for protecting the list pointers.
1847 mach_page_add(page_t
**ppp
, page_t
*pp
)
1850 pp
->p_next
= pp
->p_prev
= pp
;
1853 pp
->p_prev
= (*ppp
)->p_prev
;
1854 (*ppp
)->p_prev
= pp
;
1855 pp
->p_prev
->p_next
= pp
;
1861 * Remove this page from a linked list of pages
1862 * using the p_next & p_prev pointers for the list.
1864 * The caller is responsible for protecting the list pointers.
1867 mach_page_sub(page_t
**ppp
, page_t
*pp
)
1869 ASSERT(PP_ISFREE(pp
));
1871 if (*ppp
== NULL
|| pp
== NULL
)
1872 panic("mach_page_sub");
1875 *ppp
= pp
->p_next
; /* go to next page */
1878 *ppp
= NULL
; /* page list is gone */
1880 pp
->p_prev
->p_next
= pp
->p_next
;
1881 pp
->p_next
->p_prev
= pp
->p_prev
;
1883 pp
->p_prev
= pp
->p_next
= pp
; /* make pp a list of one */
1887 * Routine fsflush uses to gradually coalesce the free list into larger pages.
1890 page_promote_size(page_t
*pp
, uint_t cur_szc
)
1895 int new_szc
= cur_szc
+ 1;
1896 int full
= FULL_REGION_CNT(new_szc
);
1898 pfn
= page_pptonum(pp
);
1899 mnode
= PFN_2_MEM_NODE(pfn
);
1901 page_freelist_lock(mnode
);
1903 idx
= PNUM_TO_IDX(mnode
, new_szc
, pfn
);
1904 if (PAGE_COUNTERS(mnode
, new_szc
, idx
) == full
)
1905 (void) page_promote(mnode
, pfn
, new_szc
, PC_FREE
, PC_MTYPE_ANY
);
1907 page_freelist_unlock(mnode
);
1910 static uint_t page_promote_err
;
1911 static uint_t page_promote_noreloc_err
;
1914 * Create a single larger page (of szc new_szc) from smaller contiguous pages
1915 * for the given mnode starting at pfnum. Pages involved are on the freelist
1916 * before the call and may be returned to the caller if requested, otherwise
1917 * they will be placed back on the freelist.
1918 * If flags is PC_ALLOC, then the large page will be returned to the user in
1919 * a state which is consistent with a page being taken off the freelist. If
1920 * we failed to lock the new large page, then we will return NULL to the
1921 * caller and put the large page on the freelist instead.
1922 * If flags is PC_FREE, then the large page will be placed on the freelist,
1923 * and NULL will be returned.
1924 * The caller is responsible for locking the freelist as well as any other
1925 * accounting which needs to be done for a returned page.
1927 * RFE: For performance pass in pp instead of pfnum so
1928 * we can avoid excessive calls to page_numtopp_nolock().
1929 * This would depend on an assumption that all contiguous
1930 * pages are in the same memseg so we can just add/dec
1935 * There is a potential but rare deadlock situation
1936 * for page promotion and demotion operations. The problem
1937 * is there are two paths into the freelist manager and
1938 * they have different lock orders:
1945 * caller drops page_lock
1947 * page_free() and page_reclaim()
1948 * caller grabs page_lock(EXCL)
1954 * What prevents a thread in page_create() from deadlocking
1955 * with a thread freeing or reclaiming the same page is the
1956 * page_trylock() in page_get_freelist(). If the trylock fails
1957 * it skips the page.
1959 * The lock ordering for promotion and demotion is the same as
1960 * for page_create(). Since the same deadlock could occur during
1961 * page promotion and freeing or reclaiming of a page on the
1962 * cache list we might have to fail the operation and undo what
1963 * have done so far. Again this is rare.
1966 page_promote(int mnode
, pfn_t pfnum
, uchar_t new_szc
, int flags
, int mtype
)
1968 page_t
*pp
, *pplist
, *tpp
, *start_pp
;
1969 pgcnt_t new_npgs
, npgs
;
1971 pgcnt_t tmpnpgs
, pages_left
;
1978 * General algorithm:
1979 * Find the starting page
1980 * Walk each page struct removing it from the freelist,
1981 * and linking it to all the other pages removed.
1982 * Once all pages are off the freelist,
1983 * walk the list, modifying p_szc to new_szc and what
1984 * ever other info needs to be done to create a large free page.
1985 * According to the flags, either return the page or put it
1989 start_pp
= page_numtopp_nolock(pfnum
);
1990 ASSERT(start_pp
&& (start_pp
->p_pagenum
== pfnum
));
1991 new_npgs
= page_get_pagecnt(new_szc
);
1992 ASSERT(IS_P2ALIGNED(pfnum
, new_npgs
));
1994 /* don't return page of the wrong mtype */
1995 if (mtype
!= PC_MTYPE_ANY
&& mtype
!= PP_2_MTYPE(start_pp
))
1999 * Loop through smaller pages to confirm that all pages
2000 * give the same result for PP_ISNORELOC().
2001 * We can check this reliably here as the protocol for setting
2002 * P_NORELOC requires pages to be taken off the free list first.
2004 noreloc
= PP_ISNORELOC(start_pp
);
2005 for (pp
= start_pp
+ new_npgs
; --pp
> start_pp
; ) {
2006 if (noreloc
!= PP_ISNORELOC(pp
)) {
2007 page_promote_noreloc_err
++;
2013 pages_left
= new_npgs
;
2017 /* Loop around coalescing the smaller pages into a big page. */
2018 while (pages_left
) {
2020 * Remove from the freelist.
2022 ASSERT(PP_ISFREE(pp
));
2024 ASSERT(mnode
== PP_2_MEM_NODE(pp
));
2025 mtype
= PP_2_MTYPE(pp
);
2026 if (PP_ISAGED(pp
)) {
2032 page_vpsub(&PAGE_FREELISTS(mnode
,
2033 pp
->p_szc
, bin
, mtype
), pp
);
2035 mach_page_sub(&PAGE_FREELISTS(mnode
, 0,
2038 which_list
= PG_FREE_LIST
;
2040 ASSERT(pp
->p_szc
== 0);
2045 * Since this page comes from the
2046 * cachelist, we must destroy the
2047 * vnode association.
2049 if (!page_trylock(pp
, SE_EXCL
)) {
2054 * We need to be careful not to deadlock
2055 * with another thread in page_lookup().
2056 * The page_lookup() thread could be holding
2057 * the same phm that we need if the two
2058 * pages happen to hash to the same phm lock.
2059 * At this point we have locked the entire
2060 * freelist and page_lookup() could be trying
2061 * to grab a freelist lock.
2063 index
= PAGE_HASH_FUNC(pp
->p_vnode
, pp
->p_offset
);
2064 phm
= PAGE_HASH_MUTEX(index
);
2065 if (!mutex_tryenter(phm
)) {
2066 page_unlock_nocapture(pp
);
2070 mach_page_sub(&PAGE_CACHELISTS(mnode
, bin
, mtype
), pp
);
2071 page_hashout(pp
, phm
);
2074 page_unlock_nocapture(pp
);
2075 which_list
= PG_CACHE_LIST
;
2077 page_ctr_sub(mnode
, mtype
, pp
, which_list
);
2080 * Concatenate the smaller page(s) onto
2081 * the large page list.
2083 tmpnpgs
= npgs
= page_get_pagecnt(pp
->p_szc
);
2087 tpp
->p_szc
= new_szc
;
2090 page_list_concat(&pplist
, &pp
);
2093 CHK_LPG(pplist
, new_szc
);
2096 * return the page to the user if requested
2097 * in the properly locked state.
2099 if (flags
== PC_ALLOC
&& (page_trylock_cons(pplist
, SE_EXCL
))) {
2104 * Otherwise place the new large page on the freelist
2106 bin
= PP_2_BIN(pplist
);
2107 mnode
= PP_2_MEM_NODE(pplist
);
2108 mtype
= PP_2_MTYPE(pplist
);
2109 page_vpadd(&PAGE_FREELISTS(mnode
, new_szc
, bin
, mtype
), pplist
);
2111 page_ctr_add(mnode
, mtype
, pplist
, PG_FREE_LIST
);
2116 * A thread must have still been freeing or
2117 * reclaiming the page on the cachelist.
2118 * To prevent a deadlock undo what we have
2119 * done sofar and return failure. This
2120 * situation can only happen while promoting
2126 mach_page_sub(&pplist
, pp
);
2129 mtype
= PP_2_MTYPE(pp
);
2130 mach_page_add(&PAGE_FREELISTS(mnode
, 0, bin
, mtype
), pp
);
2131 page_ctr_add(mnode
, mtype
, pp
, PG_FREE_LIST
);
2138 * Break up a large page into smaller size pages.
2139 * Pages involved are on the freelist before the call and may
2140 * be returned to the caller if requested, otherwise they will
2141 * be placed back on the freelist.
2142 * The caller is responsible for locking the freelist as well as any other
2143 * accounting which needs to be done for a returned page.
2144 * If flags is not PC_ALLOC, the color argument is ignored, and thus
2145 * technically, any value may be passed in but PC_NO_COLOR is the standard
2146 * which should be followed for clarity's sake.
2147 * Returns a page whose pfn is < pfnmax
2150 page_demote(int mnode
, pfn_t pfnum
, pfn_t pfnmax
, uchar_t cur_szc
,
2151 uchar_t new_szc
, int color
, int flags
)
2153 page_t
*pp
, *pplist
, *npplist
;
2157 page_t
*ret_pp
= NULL
;
2159 ASSERT(cur_szc
!= 0);
2160 ASSERT(new_szc
< cur_szc
);
2162 pplist
= page_numtopp_nolock(pfnum
);
2163 ASSERT(pplist
!= NULL
);
2165 ASSERT(pplist
->p_szc
== cur_szc
);
2167 bin
= PP_2_BIN(pplist
);
2168 ASSERT(mnode
== PP_2_MEM_NODE(pplist
));
2169 mtype
= PP_2_MTYPE(pplist
);
2170 page_vpsub(&PAGE_FREELISTS(mnode
, cur_szc
, bin
, mtype
), pplist
);
2172 CHK_LPG(pplist
, cur_szc
);
2173 page_ctr_sub(mnode
, mtype
, pplist
, PG_FREE_LIST
);
2176 * Number of PAGESIZE pages for smaller new_szc
2179 npgs
= page_get_pagecnt(new_szc
);
2184 ASSERT(pp
->p_szc
== cur_szc
);
2187 * We either break it up into PAGESIZE pages or larger.
2189 if (npgs
== 1) { /* PAGESIZE case */
2190 mach_page_sub(&pplist
, pp
);
2191 ASSERT(pp
->p_szc
== cur_szc
);
2192 ASSERT(new_szc
== 0);
2193 ASSERT(mnode
== PP_2_MEM_NODE(pp
));
2194 pp
->p_szc
= new_szc
;
2196 if ((bin
== color
) && (flags
== PC_ALLOC
) &&
2197 (ret_pp
== NULL
) && (pfnmax
== 0 ||
2198 pp
->p_pagenum
< pfnmax
) &&
2199 page_trylock_cons(pp
, SE_EXCL
)) {
2202 mtype
= PP_2_MTYPE(pp
);
2203 mach_page_add(&PAGE_FREELISTS(mnode
, 0, bin
,
2205 page_ctr_add(mnode
, mtype
, pp
, PG_FREE_LIST
);
2208 page_t
*try_to_return_this_page
= NULL
;
2212 * Break down into smaller lists of pages.
2214 page_list_break(&pplist
, &npplist
, npgs
);
2219 ASSERT(pp
->p_szc
== cur_szc
);
2221 * Check whether all the pages in this list
2222 * fit the request criteria.
2224 if (pfnmax
== 0 || pp
->p_pagenum
< pfnmax
) {
2227 pp
->p_szc
= new_szc
;
2231 if (count
== npgs
&&
2232 (pfnmax
== 0 || pp
->p_pagenum
< pfnmax
)) {
2233 try_to_return_this_page
= pp
;
2236 CHK_LPG(pplist
, new_szc
);
2238 bin
= PP_2_BIN(pplist
);
2239 if (try_to_return_this_page
)
2241 PP_2_MEM_NODE(try_to_return_this_page
));
2242 if ((bin
== color
) && (flags
== PC_ALLOC
) &&
2243 (ret_pp
== NULL
) && try_to_return_this_page
&&
2244 page_trylock_cons(try_to_return_this_page
,
2246 ret_pp
= try_to_return_this_page
;
2248 mtype
= PP_2_MTYPE(pp
);
2249 page_vpadd(&PAGE_FREELISTS(mnode
, new_szc
,
2250 bin
, mtype
), pplist
);
2252 page_ctr_add(mnode
, mtype
, pplist
,
2261 int mpss_coalesce_disable
= 0;
2264 * Coalesce free pages into a page of the given szc and color if possible.
2265 * Return the pointer to the page created, otherwise, return NULL.
2267 * If pfnhi is non-zero, search for large page with pfn range less than pfnhi.
2270 page_freelist_coalesce(int mnode
, uchar_t szc
, uint_t color
, uint_t ceq_mask
,
2271 int mtype
, pfn_t pfnhi
)
2273 int r
= szc
; /* region size */
2275 uint_t full
, bin
, color_mask
, wrap
= 0;
2276 pfn_t pfnum
, lo
, hi
;
2277 size_t len
, idx
, idx0
;
2278 pgcnt_t cands
= 0, szcpgcnt
= page_get_pagecnt(szc
);
2280 MEM_NODE_ITERATOR_DECL(it
);
2281 #if defined(__sparc)
2282 pfn_t pfnum0
, nlo
, nhi
;
2285 if (mpss_coalesce_disable
) {
2286 ASSERT(szc
< MMU_PAGE_SIZES
);
2287 VM_STAT_ADD(vmm_vmstats
.page_ctrs_coalesce
[szc
][0]);
2291 ASSERT(szc
< mmu_page_sizes
);
2292 color_mask
= PAGE_GET_PAGECOLORS(szc
) - 1;
2293 ASSERT(ceq_mask
<= color_mask
);
2294 ASSERT(color
<= color_mask
);
2297 /* Prevent page_counters dynamic memory from being freed */
2298 rw_enter(&page_ctrs_rwlock
[mnode
], RW_READER
);
2300 mrange
= MTYPE_2_MRANGE(mnode
, mtype
);
2301 ASSERT(mrange
< mnode_nranges
[mnode
]);
2302 VM_STAT_ADD(vmm_vmstats
.page_ctrs_coalesce
[r
][mrange
]);
2304 /* get pfn range for mtype */
2305 len
= PAGE_COUNTERS_ENTRIES(mnode
, r
);
2306 MNODETYPE_2_PFN(mnode
, mtype
, lo
, hi
);
2309 /* use lower limit if given */
2310 if (pfnhi
!= PFNNULL
&& pfnhi
< hi
)
2313 /* round to szcpgcnt boundaries */
2314 lo
= P2ROUNDUP(lo
, szcpgcnt
);
2315 MEM_NODE_ITERATOR_INIT(lo
, mnode
, szc
, &it
);
2316 if (lo
== (pfn_t
)-1) {
2317 rw_exit(&page_ctrs_rwlock
[mnode
]);
2320 hi
= hi
& ~(szcpgcnt
- 1);
2322 /* set lo to the closest pfn of the right color */
2323 if (((PFN_2_COLOR(lo
, szc
, &it
) ^ color
) & ceq_mask
) ||
2324 (interleaved_mnodes
&& PFN_2_MEM_NODE(lo
) != mnode
)) {
2325 PAGE_NEXT_PFN_FOR_COLOR(lo
, szc
, color
, ceq_mask
, color_mask
,
2330 rw_exit(&page_ctrs_rwlock
[mnode
]);
2334 full
= FULL_REGION_CNT(r
);
2336 /* calculate the number of page candidates and initial search index */
2338 idx0
= (size_t)(-1);
2342 PGCTRS_CANDS_GETVALUECOLOR(mnode
, mrange
, r
, bin
, acand
);
2344 idx
= PAGE_COUNTERS_CURRENT_COLOR(mnode
,
2346 idx0
= MIN(idx0
, idx
);
2349 bin
= ADD_MASKED(bin
, 1, ceq_mask
, color_mask
);
2350 } while (bin
!= color
);
2353 VM_STAT_ADD(vmm_vmstats
.page_ctrs_cands_skip
[r
][mrange
]);
2354 rw_exit(&page_ctrs_rwlock
[mnode
]);
2358 pfnum
= IDX_TO_PNUM(mnode
, r
, idx0
);
2359 if (pfnum
< lo
|| pfnum
>= hi
) {
2362 MEM_NODE_ITERATOR_INIT(pfnum
, mnode
, szc
, &it
);
2363 if (pfnum
== (pfn_t
)-1) {
2365 MEM_NODE_ITERATOR_INIT(pfnum
, mnode
, szc
, &it
);
2366 ASSERT(pfnum
!= (pfn_t
)-1);
2367 } else if ((PFN_2_COLOR(pfnum
, szc
, &it
) ^ color
) & ceq_mask
||
2368 (interleaved_mnodes
&& PFN_2_MEM_NODE(pfnum
) != mnode
)) {
2369 /* invalid color, get the closest correct pfn */
2370 PAGE_NEXT_PFN_FOR_COLOR(pfnum
, szc
, color
, ceq_mask
,
2374 MEM_NODE_ITERATOR_INIT(pfnum
, mnode
, szc
, &it
);
2379 /* set starting index */
2380 idx0
= PNUM_TO_IDX(mnode
, r
, pfnum
);
2383 #if defined(__sparc)
2384 pfnum0
= pfnum
; /* page corresponding to idx0 */
2385 nhi
= 0; /* search kcage ranges */
2388 for (idx
= idx0
; wrap
== 0 || (idx
< idx0
&& wrap
< 2); ) {
2390 #if defined(__sparc)
2392 * Find lowest intersection of kcage ranges and mnode.
2393 * MTYPE_NORELOC means look in the cage, otherwise outside.
2396 if (kcage_next_range(mtype
== MTYPE_NORELOC
, pfnum
,
2397 (wrap
== 0 ? hi
: pfnum0
), &nlo
, &nhi
))
2400 /* jump to the next page in the range */
2402 pfnum
= P2ROUNDUP(nlo
, szcpgcnt
);
2403 MEM_NODE_ITERATOR_INIT(pfnum
, mnode
, szc
, &it
);
2404 idx
= PNUM_TO_IDX(mnode
, r
, pfnum
);
2405 if (idx
>= len
|| pfnum
>= hi
)
2407 if ((PFN_2_COLOR(pfnum
, szc
, &it
) ^ color
) &
2410 if (interleaved_mnodes
&&
2411 PFN_2_MEM_NODE(pfnum
) != mnode
)
2417 if (PAGE_COUNTERS(mnode
, r
, idx
) != full
)
2421 * RFE: For performance maybe we can do something less
2422 * brutal than locking the entire freelist. So far
2423 * this doesn't seem to be a performance problem?
2425 page_freelist_lock(mnode
);
2426 if (PAGE_COUNTERS(mnode
, r
, idx
) == full
) {
2428 page_promote(mnode
, pfnum
, r
, PC_ALLOC
, mtype
);
2429 if (ret_pp
!= NULL
) {
2430 VM_STAT_ADD(vmm_vmstats
.pfc_coalok
[r
][mrange
]);
2431 PAGE_COUNTERS_CURRENT_COLOR(mnode
, r
,
2432 PFN_2_COLOR(pfnum
, szc
, &it
), mrange
) = idx
;
2433 page_freelist_unlock(mnode
);
2434 rw_exit(&page_ctrs_rwlock
[mnode
]);
2435 #if defined(__sparc)
2436 if (PP_ISNORELOC(ret_pp
)) {
2439 npgs
= page_get_pagecnt(ret_pp
->p_szc
);
2440 kcage_freemem_sub(npgs
);
2446 VM_STAT_ADD(vmm_vmstats
.page_ctrs_changed
[r
][mrange
]);
2449 page_freelist_unlock(mnode
);
2451 * No point looking for another page if we've
2452 * already tried all of the ones that
2453 * page_ctr_cands indicated. Stash off where we left
2455 * Note: this is not exact since we don't hold the
2456 * page_freelist_locks before we initially get the
2457 * value of cands for performance reasons, but should
2458 * be a decent approximation.
2461 PAGE_COUNTERS_CURRENT_COLOR(mnode
, r
, color
, mrange
) =
2466 PAGE_NEXT_PFN_FOR_COLOR(pfnum
, szc
, color
, ceq_mask
,
2468 idx
= PNUM_TO_IDX(mnode
, r
, pfnum
);
2469 if (idx
>= len
|| pfnum
>= hi
) {
2472 MEM_NODE_ITERATOR_INIT(pfnum
, mnode
, szc
, &it
);
2473 idx
= PNUM_TO_IDX(mnode
, r
, pfnum
);
2475 #if defined(__sparc)
2476 nhi
= 0; /* search kcage ranges */
2481 rw_exit(&page_ctrs_rwlock
[mnode
]);
2482 VM_STAT_ADD(vmm_vmstats
.page_ctrs_failed
[r
][mrange
]);
2487 * For the given mnode, promote as many small pages to large pages as possible.
2488 * mnode can be -1, which means do them all
2491 page_freelist_coalesce_all(int mnode
)
2493 int r
; /* region size */
2496 int doall
= interleaved_mnodes
|| mnode
< 0;
2497 int mlo
= doall
? 0 : mnode
;
2498 int mhi
= doall
? max_mem_nodes
: (mnode
+ 1);
2500 VM_STAT_ADD(vmm_vmstats
.page_ctrs_coalesce_all
);
2502 if (mpss_coalesce_disable
) {
2507 * Lock the entire freelist and coalesce what we can.
2509 * Always promote to the largest page possible
2510 * first to reduce the number of page promotions.
2512 for (mnode
= mlo
; mnode
< mhi
; mnode
++) {
2513 rw_enter(&page_ctrs_rwlock
[mnode
], RW_READER
);
2514 page_freelist_lock(mnode
);
2516 for (r
= mmu_page_sizes
- 1; r
> 0; r
--) {
2517 for (mnode
= mlo
; mnode
< mhi
; mnode
++) {
2519 int mrange
, nranges
= mnode_nranges
[mnode
];
2521 for (mrange
= 0; mrange
< nranges
; mrange
++) {
2522 PGCTRS_CANDS_GETVALUE(mnode
, mrange
, r
, cands
);
2527 VM_STAT_ADD(vmm_vmstats
.
2528 page_ctrs_cands_skip_all
);
2532 full
= FULL_REGION_CNT(r
);
2533 len
= PAGE_COUNTERS_ENTRIES(mnode
, r
);
2535 for (idx
= 0; idx
< len
; idx
++) {
2536 if (PAGE_COUNTERS(mnode
, r
, idx
) == full
) {
2538 IDX_TO_PNUM(mnode
, r
, idx
);
2539 int tmnode
= interleaved_mnodes
?
2540 PFN_2_MEM_NODE(pfnum
) : mnode
;
2543 mem_node_config
[tmnode
].physbase
&&
2545 mem_node_config
[tmnode
].physmax
);
2547 (void) page_promote(tmnode
,
2548 pfnum
, r
, PC_FREE
, PC_MTYPE_ANY
);
2551 /* shared hpm_counters covers all mnodes, so we quit */
2552 if (interleaved_mnodes
)
2556 for (mnode
= mlo
; mnode
< mhi
; mnode
++) {
2557 page_freelist_unlock(mnode
);
2558 rw_exit(&page_ctrs_rwlock
[mnode
]);
2563 * This is where all polices for moving pages around
2564 * to different page size free lists is implemented.
2565 * Returns 1 on success, 0 on failure.
2567 * So far these are the priorities for this algorithm in descending
2570 * 1) When servicing a request try to do so with a free page
2571 * from next size up. Helps defer fragmentation as long
2574 * 2) Page coalesce on demand. Only when a freelist
2575 * larger than PAGESIZE is empty and step 1
2576 * will not work since all larger size lists are
2579 * If pfnhi is non-zero, search for large page with pfn range less than pfnhi.
2583 page_freelist_split(uchar_t szc
, uint_t color
, int mnode
, int mtype
,
2584 pfn_t pfnlo
, pfn_t pfnhi
, page_list_walker_t
*plw
)
2586 uchar_t nszc
= szc
+ 1;
2587 uint_t bin
, sbin
, bin_prev
;
2588 page_t
*pp
, *firstpp
;
2589 page_t
*ret_pp
= NULL
;
2592 if (nszc
== mmu_page_sizes
)
2595 ASSERT(nszc
< mmu_page_sizes
);
2596 color_mask
= PAGE_GET_PAGECOLORS(nszc
) - 1;
2597 bin
= sbin
= PAGE_GET_NSZ_COLOR(szc
, color
);
2598 bin_prev
= (plw
->plw_bin_split_prev
== color
) ? INVALID_COLOR
:
2599 PAGE_GET_NSZ_COLOR(szc
, plw
->plw_bin_split_prev
);
2601 VM_STAT_ADD(vmm_vmstats
.pfs_req
[szc
]);
2603 * First try to break up a larger page to fill current size freelist.
2605 while (plw
->plw_bins
[nszc
] != 0) {
2607 ASSERT(nszc
< mmu_page_sizes
);
2610 * If page found then demote it.
2612 if (PAGE_FREELISTS(mnode
, nszc
, bin
, mtype
)) {
2613 page_freelist_lock(mnode
);
2614 firstpp
= pp
= PAGE_FREELISTS(mnode
, nszc
, bin
, mtype
);
2617 * If pfnhi is not PFNNULL, look for large page below
2618 * pfnhi. PFNNULL signifies no pfn requirement.
2621 ((pfnhi
!= PFNNULL
&& pp
->p_pagenum
>= pfnhi
) ||
2622 (pfnlo
!= PFNNULL
&& pp
->p_pagenum
< pfnlo
))) {
2625 if (pp
== firstpp
) {
2629 } while ((pfnhi
!= PFNNULL
&&
2630 pp
->p_pagenum
>= pfnhi
) ||
2631 (pfnlo
!= PFNNULL
&&
2632 pp
->p_pagenum
< pfnlo
));
2634 if (pfnhi
!= PFNNULL
&& pp
!= NULL
)
2635 ASSERT(pp
->p_pagenum
< pfnhi
);
2637 if (pfnlo
!= PFNNULL
&& pp
!= NULL
)
2638 ASSERT(pp
->p_pagenum
>= pfnlo
);
2641 uint_t ccolor
= page_correct_color(szc
, nszc
,
2642 color
, bin
, plw
->plw_ceq_mask
[szc
]);
2644 ASSERT(pp
->p_szc
== nszc
);
2645 VM_STAT_ADD(vmm_vmstats
.pfs_demote
[nszc
]);
2646 ret_pp
= page_demote(mnode
, pp
->p_pagenum
,
2647 pfnhi
, pp
->p_szc
, szc
, ccolor
, PC_ALLOC
);
2649 page_freelist_unlock(mnode
);
2650 #if defined(__sparc)
2651 if (PP_ISNORELOC(ret_pp
)) {
2654 npgs
= page_get_pagecnt(
2656 kcage_freemem_sub(npgs
);
2662 page_freelist_unlock(mnode
);
2665 /* loop through next size bins */
2666 bin
= ADD_MASKED(bin
, 1, plw
->plw_ceq_mask
[nszc
], color_mask
);
2667 plw
->plw_bins
[nszc
]--;
2670 uchar_t nnszc
= nszc
+ 1;
2672 /* we are done with this page size - check next */
2673 if (plw
->plw_bins
[nnszc
] == 0)
2674 /* we have already checked next size bins */
2677 bin
= sbin
= PAGE_GET_NSZ_COLOR(nszc
, bin
);
2678 if (bin_prev
!= INVALID_COLOR
) {
2679 bin_prev
= PAGE_GET_NSZ_COLOR(nszc
, bin_prev
);
2680 if (!((bin
^ bin_prev
) &
2681 plw
->plw_ceq_mask
[nnszc
]))
2684 ASSERT(nnszc
< mmu_page_sizes
);
2685 color_mask
= PAGE_GET_PAGECOLORS(nnszc
) - 1;
2687 ASSERT(nszc
< mmu_page_sizes
);
2695 * Helper routine used only by the freelist code to lock
2696 * a page. If the page is a large page then it succeeds in
2697 * locking all the constituent pages or none at all.
2698 * Returns 1 on sucess, 0 on failure.
2701 page_trylock_cons(page_t
*pp
, se_t se
)
2703 page_t
*tpp
, *first_pp
= pp
;
2706 * Fail if can't lock first or only page.
2708 if (!page_trylock(pp
, se
)) {
2713 * PAGESIZE: common case.
2715 if (pp
->p_szc
== 0) {
2724 if (!page_trylock(tpp
, se
)) {
2726 * On failure unlock what we have locked so far.
2727 * We want to avoid attempting to capture these
2728 * pages as the pcm mutex may be held which could
2729 * lead to a recursive mutex panic.
2731 while (first_pp
!= tpp
) {
2732 page_unlock_nocapture(first_pp
);
2733 first_pp
= first_pp
->p_next
;
2743 * init context for walking page lists
2744 * Called when a page of the given szc in unavailable. Sets markers
2745 * for the beginning of the search to detect when search has
2746 * completed a full cycle. Sets flags for splitting larger pages
2747 * and coalescing smaller pages. Page walking procedes until a page
2748 * of the desired equivalent color is found.
2751 page_list_walk_init(uchar_t szc
, uint_t flags
, uint_t bin
, int can_split
,
2752 int use_ceq
, page_list_walker_t
*plw
)
2754 uint_t nszc
, ceq_mask
, colors
;
2755 uchar_t ceq
= use_ceq
? colorequivszc
[szc
] : 0;
2757 ASSERT(szc
< mmu_page_sizes
);
2758 colors
= PAGE_GET_PAGECOLORS(szc
);
2760 plw
->plw_colors
= colors
;
2761 plw
->plw_color_mask
= colors
- 1;
2762 plw
->plw_bin_marker
= plw
->plw_bin0
= bin
;
2763 plw
->plw_bin_split_prev
= bin
;
2764 plw
->plw_bin_step
= (szc
== 0) ? vac_colors
: 1;
2767 * if vac aliasing is possible make sure lower order color
2768 * bits are never ignored
2774 * calculate the number of non-equivalent colors and
2775 * color equivalency mask
2777 plw
->plw_ceq_dif
= colors
>> ((ceq
>> 4) + (ceq
& 0xf));
2778 ASSERT(szc
> 0 || plw
->plw_ceq_dif
>= vac_colors
);
2779 ASSERT(plw
->plw_ceq_dif
> 0);
2780 plw
->plw_ceq_mask
[szc
] = (plw
->plw_ceq_dif
- 1) << (ceq
& 0xf);
2782 if (flags
& PG_MATCH_COLOR
) {
2783 if (cpu_page_colors
< 0) {
2785 * this is a heterogeneous machine with different CPUs
2786 * having different size e$ (not supported for ni2/rock
2788 uint_t cpucolors
= CPUSETSIZE() >> PAGE_GET_SHIFT(szc
);
2789 cpucolors
= MAX(cpucolors
, 1);
2790 ceq_mask
= plw
->plw_color_mask
& (cpucolors
- 1);
2791 plw
->plw_ceq_mask
[szc
] =
2792 MIN(ceq_mask
, plw
->plw_ceq_mask
[szc
]);
2794 plw
->plw_ceq_dif
= 1;
2797 /* we can split pages in the freelist, but not the cachelist */
2799 plw
->plw_do_split
= (szc
+ 1 < mmu_page_sizes
) ? 1 : 0;
2801 /* set next szc color masks and number of free list bins */
2802 for (nszc
= szc
+ 1; nszc
< mmu_page_sizes
; nszc
++, szc
++) {
2803 plw
->plw_ceq_mask
[nszc
] = PAGE_GET_NSZ_MASK(szc
,
2804 plw
->plw_ceq_mask
[szc
]);
2805 plw
->plw_bins
[nszc
] = PAGE_GET_PAGECOLORS(nszc
);
2807 plw
->plw_ceq_mask
[nszc
] = INVALID_MASK
;
2808 plw
->plw_bins
[nszc
] = 0;
2812 plw
->plw_do_split
= 0;
2813 plw
->plw_bins
[1] = 0;
2814 plw
->plw_ceq_mask
[1] = INVALID_MASK
;
2819 * set mark to flag where next split should occur
2821 #define PAGE_SET_NEXT_SPLIT_MARKER(szc, nszc, bin, plw) { \
2822 uint_t bin_nsz = PAGE_GET_NSZ_COLOR(szc, bin); \
2823 uint_t bin0_nsz = PAGE_GET_NSZ_COLOR(szc, plw->plw_bin0); \
2824 uint_t neq_mask = ~plw->plw_ceq_mask[nszc] & plw->plw_color_mask; \
2825 plw->plw_split_next = \
2826 INC_MASKED(bin_nsz, neq_mask, plw->plw_color_mask); \
2827 if (!((plw->plw_split_next ^ bin0_nsz) & plw->plw_ceq_mask[nszc])) { \
2828 plw->plw_split_next = \
2829 INC_MASKED(plw->plw_split_next, \
2830 neq_mask, plw->plw_color_mask); \
2835 page_list_walk_next_bin(uchar_t szc
, uint_t bin
, page_list_walker_t
*plw
)
2837 uint_t neq_mask
= ~plw
->plw_ceq_mask
[szc
] & plw
->plw_color_mask
;
2838 uint_t bin0_nsz
, nbin_nsz
, nbin0
, nbin
;
2839 uchar_t nszc
= szc
+ 1;
2841 nbin
= ADD_MASKED(bin
,
2842 plw
->plw_bin_step
, neq_mask
, plw
->plw_color_mask
);
2844 if (plw
->plw_do_split
) {
2845 plw
->plw_bin_split_prev
= bin
;
2846 PAGE_SET_NEXT_SPLIT_MARKER(szc
, nszc
, bin
, plw
);
2847 plw
->plw_do_split
= 0;
2851 if (plw
->plw_count
!= 0 || plw
->plw_ceq_dif
== vac_colors
) {
2852 if (nbin
== plw
->plw_bin0
&&
2853 (vac_colors
== 1 || nbin
!= plw
->plw_bin_marker
)) {
2854 nbin
= ADD_MASKED(nbin
, plw
->plw_bin_step
,
2855 neq_mask
, plw
->plw_color_mask
);
2856 plw
->plw_bin_split_prev
= plw
->plw_bin0
;
2859 if (vac_colors
> 1 && nbin
== plw
->plw_bin_marker
) {
2860 plw
->plw_bin_marker
=
2861 nbin
= INC_MASKED(nbin
, neq_mask
,
2862 plw
->plw_color_mask
);
2863 plw
->plw_bin_split_prev
= plw
->plw_bin0
;
2865 * large pages all have the same vac color
2866 * so by now we should be done with next
2867 * size page splitting process
2869 ASSERT(plw
->plw_bins
[1] == 0);
2870 plw
->plw_do_split
= 0;
2875 uint_t bin_jump
= (vac_colors
== 1) ?
2876 (BIN_STEP
& ~3) - (plw
->plw_bin0
& 3) : BIN_STEP
;
2878 bin_jump
&= ~(vac_colors
- 1);
2880 nbin0
= ADD_MASKED(plw
->plw_bin0
, bin_jump
, neq_mask
,
2881 plw
->plw_color_mask
);
2883 if ((nbin0
^ plw
->plw_bin0
) & plw
->plw_ceq_mask
[szc
]) {
2885 plw
->plw_bin_marker
= nbin
= nbin0
;
2887 if (plw
->plw_bins
[nszc
] != 0) {
2889 * check if next page size bin is the
2890 * same as the next page size bin for
2893 nbin_nsz
= PAGE_GET_NSZ_COLOR(szc
,
2895 bin0_nsz
= PAGE_GET_NSZ_COLOR(szc
,
2898 if ((bin0_nsz
^ nbin_nsz
) &
2899 plw
->plw_ceq_mask
[nszc
])
2900 plw
->plw_do_split
= 1;
2907 if (plw
->plw_bins
[nszc
] != 0) {
2908 nbin_nsz
= PAGE_GET_NSZ_COLOR(szc
, nbin
);
2909 if (!((plw
->plw_split_next
^ nbin_nsz
) &
2910 plw
->plw_ceq_mask
[nszc
]))
2911 plw
->plw_do_split
= 1;
2918 page_get_mnode_freelist(int mnode
, uint_t bin
, int mtype
, uchar_t szc
,
2922 page_t
*pp
, *first_pp
;
2924 int plw_initialized
;
2925 page_list_walker_t plw
;
2927 ASSERT(szc
< mmu_page_sizes
);
2929 VM_STAT_ADD(vmm_vmstats
.pgmf_alloc
[szc
]);
2931 MTYPE_START(mnode
, mtype
, flags
);
2932 if (mtype
< 0) { /* mnode does not have memory in mtype range */
2933 VM_STAT_ADD(vmm_vmstats
.pgmf_allocempty
[szc
]);
2938 plw_initialized
= 0;
2939 plw
.plw_ceq_dif
= 1;
2942 * Only hold one freelist lock at a time, that way we
2943 * can start anywhere and not have to worry about lock
2946 for (plw
.plw_count
= 0;
2947 plw
.plw_count
< plw
.plw_ceq_dif
; plw
.plw_count
++) {
2950 if (!PAGE_FREELISTS(mnode
, szc
, bin
, mtype
))
2953 pcm
= PC_BIN_MUTEX(mnode
, bin
, PG_FREE_LIST
);
2955 pp
= PAGE_FREELISTS(mnode
, szc
, bin
, mtype
);
2960 * These were set before the page
2961 * was put on the free list,
2962 * they must still be set.
2964 ASSERT(PP_ISFREE(pp
));
2965 ASSERT(PP_ISAGED(pp
));
2966 ASSERT(pp
->p_vnode
== NULL
);
2967 ASSERT(pp
->p_hash
== NULL
);
2968 ASSERT(pp
->p_offset
== (u_offset_t
)-1);
2969 ASSERT(pp
->p_szc
== szc
);
2970 ASSERT(PFN_2_MEM_NODE(pp
->p_pagenum
) == mnode
);
2973 * Walk down the hash chain.
2974 * 8k pages are linked on p_next
2975 * and p_prev fields. Large pages
2976 * are a contiguous group of
2977 * constituent pages linked together
2978 * on their p_next and p_prev fields.
2979 * The large pages are linked together
2980 * on the hash chain using p_vpnext
2981 * p_vpprev of the base constituent
2982 * page of each large page.
2985 while (IS_DUMP_PAGE(pp
) || !page_trylock_cons(pp
,
2993 ASSERT(PP_ISFREE(pp
));
2994 ASSERT(PP_ISAGED(pp
));
2995 ASSERT(pp
->p_vnode
== NULL
);
2996 ASSERT(pp
->p_hash
== NULL
);
2997 ASSERT(pp
->p_offset
== (u_offset_t
)-1);
2998 ASSERT(pp
->p_szc
== szc
);
2999 ASSERT(PFN_2_MEM_NODE(pp
->p_pagenum
) == mnode
);
3006 ASSERT(mtype
== PP_2_MTYPE(pp
));
3007 ASSERT(pp
->p_szc
== szc
);
3009 page_sub(&PAGE_FREELISTS(mnode
,
3010 szc
, bin
, mtype
), pp
);
3012 page_vpsub(&PAGE_FREELISTS(mnode
,
3013 szc
, bin
, mtype
), pp
);
3016 page_ctr_sub(mnode
, mtype
, pp
, PG_FREE_LIST
);
3018 if ((PP_ISFREE(pp
) == 0) || (PP_ISAGED(pp
) == 0))
3019 panic("free page is not. pp %p", (void *)pp
);
3022 #if defined(__sparc)
3023 ASSERT(!kcage_on
|| PP_ISNORELOC(pp
) ||
3024 (flags
& PG_NORELOC
) == 0);
3026 if (PP_ISNORELOC(pp
))
3027 kcage_freemem_sub(page_get_pagecnt(szc
));
3029 VM_STAT_ADD(vmm_vmstats
.pgmf_allocok
[szc
]);
3035 if (plw_initialized
== 0) {
3036 page_list_walk_init(szc
, flags
, bin
, 1, 1,
3038 plw_initialized
= 1;
3039 ASSERT(plw
.plw_colors
<=
3040 PAGE_GET_PAGECOLORS(szc
));
3041 ASSERT(plw
.plw_colors
> 0);
3042 ASSERT((plw
.plw_colors
&
3043 (plw
.plw_colors
- 1)) == 0);
3044 ASSERT(bin
< plw
.plw_colors
);
3045 ASSERT(plw
.plw_ceq_mask
[szc
] < plw
.plw_colors
);
3047 /* calculate the next bin with equivalent color */
3048 bin
= ADD_MASKED(bin
, plw
.plw_bin_step
,
3049 plw
.plw_ceq_mask
[szc
], plw
.plw_color_mask
);
3050 } while (sbin
!= bin
);
3053 * color bins are all empty if color match. Try and
3054 * satisfy the request by breaking up or coalescing
3055 * pages from a different size freelist of the correct
3056 * color that satisfies the ORIGINAL color requested.
3057 * If that fails then try pages of the same size but
3058 * different colors assuming we are not called with
3061 if (plw
.plw_do_split
&&
3062 (pp
= page_freelist_split(szc
, bin
, mnode
,
3063 mtype
, PFNNULL
, PFNNULL
, &plw
)) != NULL
)
3066 if (szc
> 0 && (pp
= page_freelist_coalesce(mnode
, szc
,
3067 bin
, plw
.plw_ceq_mask
[szc
], mtype
, PFNNULL
)) != NULL
)
3070 if (plw
.plw_ceq_dif
> 1)
3071 bin
= page_list_walk_next_bin(szc
, bin
, &plw
);
3074 /* if allowed, cycle through additional mtypes */
3075 MTYPE_NEXT(mnode
, mtype
, flags
);
3079 VM_STAT_ADD(vmm_vmstats
.pgmf_allocfailed
[szc
]);
3085 * Returns the count of free pages for 'pp' with size code 'szc'.
3086 * Note: This function does not return an exact value as the page freelist
3087 * locks are not held and thus the values in the page_counters may be
3088 * changing as we walk through the data.
3091 page_freecnt(int mnode
, page_t
*pp
, uchar_t szc
)
3095 ssize_t r
= szc
; /* region size */
3100 /* Make sure pagenum passed in is aligned properly */
3101 ASSERT((pp
->p_pagenum
& (PNUM_SIZE(szc
) - 1)) == 0);
3104 /* Prevent page_counters dynamic memory from being freed */
3105 rw_enter(&page_ctrs_rwlock
[mnode
], RW_READER
);
3106 idx
= PNUM_TO_IDX(mnode
, r
, pp
->p_pagenum
);
3107 cnt
= PAGE_COUNTERS(mnode
, r
, idx
);
3108 pgfree
= cnt
<< PNUM_SHIFT(r
- 1);
3109 range
= FULL_REGION_CNT(szc
);
3111 /* Check for completely full region */
3113 rw_exit(&page_ctrs_rwlock
[mnode
]);
3118 idx
= PNUM_TO_IDX(mnode
, r
, pp
->p_pagenum
);
3119 full
= FULL_REGION_CNT(r
);
3120 for (i
= 0; i
< range
; i
++, idx
++) {
3121 cnt
= PAGE_COUNTERS(mnode
, r
, idx
);
3123 * If cnt here is full, that means we have already
3124 * accounted for these pages earlier.
3127 pgfree
+= (cnt
<< PNUM_SHIFT(r
- 1));
3132 rw_exit(&page_ctrs_rwlock
[mnode
]);
3137 * Called from page_geti_contig_pages to exclusively lock constituent pages
3138 * starting from 'spp' for page size code 'szc'.
3140 * If 'ptcpthreshold' is set, the number of free pages needed in the 'szc'
3141 * region needs to be greater than or equal to the threshold.
3144 page_trylock_contig_pages(int mnode
, page_t
*spp
, uchar_t szc
, int flags
)
3146 pgcnt_t pgcnt
= PNUM_SIZE(szc
);
3150 VM_STAT_ADD(vmm_vmstats
.ptcp
[szc
]);
3153 if ((ptcpthreshold
== 0) || (flags
& PGI_PGCPHIPRI
))
3156 * check if there are sufficient free pages available before attempting
3157 * to trylock. Count is approximate as page counters can change.
3159 pgfree
= page_freecnt(mnode
, spp
, szc
);
3161 /* attempt to trylock if there are sufficient already free pages */
3162 if (pgfree
< pgcnt
/ptcpthreshold
) {
3163 VM_STAT_ADD(vmm_vmstats
.ptcpfreethresh
[szc
]);
3169 for (i
= 0; i
< pgcnt
; i
++) {
3171 if (!page_trylock(pp
, SE_EXCL
)) {
3172 VM_STAT_ADD(vmm_vmstats
.ptcpfailexcl
[szc
]);
3173 while (--i
!= (pgcnt_t
)-1) {
3175 ASSERT(PAGE_EXCL(pp
));
3176 page_unlock_nocapture(pp
);
3180 ASSERT(spp
[i
].p_pagenum
== spp
->p_pagenum
+ i
);
3181 if ((pp
->p_szc
> szc
|| (szc
&& pp
->p_szc
== szc
)) &&
3183 VM_STAT_ADD(vmm_vmstats
.ptcpfailszc
[szc
]);
3185 page_unlock_nocapture(pp
);
3190 * If a page has been marked non-relocatable or has been
3191 * explicitly locked in memory, we don't want to relocate it;
3192 * unlock the pages and fail the operation.
3194 if (PP_ISNORELOC(pp
) ||
3195 pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0) {
3196 VM_STAT_ADD(vmm_vmstats
.ptcpfailcage
[szc
]);
3197 while (i
!= (pgcnt_t
)-1) {
3199 ASSERT(PAGE_EXCL(pp
));
3200 page_unlock_nocapture(pp
);
3206 VM_STAT_ADD(vmm_vmstats
.ptcpok
[szc
]);
3211 * Claim large page pointed to by 'pp'. 'pp' is the starting set
3212 * of 'szc' constituent pages that had been locked exclusively previously.
3213 * Will attempt to relocate constituent pages in use.
3216 page_claim_contig_pages(page_t
*pp
, uchar_t szc
, int flags
)
3218 spgcnt_t pgcnt
, npgs
, i
;
3219 page_t
*targpp
, *rpp
, *hpp
;
3220 page_t
*replpp
= NULL
;
3221 page_t
*pplist
= NULL
;
3225 pgcnt
= page_get_pagecnt(szc
);
3227 ASSERT(PAGE_EXCL(pp
));
3228 ASSERT(!PP_ISNORELOC(pp
));
3229 if (PP_ISFREE(pp
)) {
3231 * If this is a PG_FREE_LIST page then its
3232 * size code can change underneath us due to
3233 * page promotion or demotion. As an optimzation
3234 * use page_list_sub_pages() instead of
3237 if (PP_ISAGED(pp
)) {
3238 page_list_sub_pages(pp
, szc
);
3239 if (pp
->p_szc
== szc
) {
3242 ASSERT(pp
->p_szc
< szc
);
3243 npgs
= page_get_pagecnt(pp
->p_szc
);
3245 for (i
= 0; i
< npgs
; i
++, pp
++) {
3248 page_list_concat(&pplist
, &hpp
);
3252 ASSERT(!PP_ISAGED(pp
));
3253 ASSERT(pp
->p_szc
== 0);
3254 page_list_sub(pp
, PG_CACHE_LIST
);
3255 page_hashout(pp
, NULL
);
3258 page_list_concat(&pplist
, &pp
);
3263 npgs
= page_get_pagecnt(pp
->p_szc
);
3266 * page_create_wait freemem accounting done by caller of
3267 * page_get_freelist and not necessary to call it prior to
3268 * calling page_get_replacement_page.
3270 * page_get_replacement_page can call page_get_contig_pages
3271 * to acquire a large page (szc > 0); the replacement must be
3272 * smaller than the contig page size to avoid looping or
3273 * szc == 0 and PGI_PGCPSZC0 is set.
3275 if (pp
->p_szc
< szc
|| (szc
== 0 && (flags
& PGI_PGCPSZC0
))) {
3276 replpp
= page_get_replacement_page(pp
, NULL
, 0);
3278 npgs
= page_get_pagecnt(pp
->p_szc
);
3279 ASSERT(npgs
<= pgcnt
);
3285 * If replacement is NULL or do_page_relocate fails, fail
3286 * coalescing of pages.
3288 if (replpp
== NULL
|| (do_page_relocate(&targpp
, &replpp
, 0,
3289 &npgs
, NULL
) != 0)) {
3291 * Unlock un-processed target list
3294 ASSERT(PAGE_EXCL(pp
));
3295 page_unlock_nocapture(pp
);
3299 * Free the processed target list.
3303 page_sub(&pplist
, pp
);
3304 ASSERT(PAGE_EXCL(pp
));
3305 ASSERT(pp
->p_szc
== szc
);
3306 ASSERT(PP_ISFREE(pp
));
3307 ASSERT(PP_ISAGED(pp
));
3309 page_list_add(pp
, PG_FREE_LIST
| PG_LIST_TAIL
);
3310 page_unlock_nocapture(pp
);
3314 page_free_replacement_page(replpp
);
3318 ASSERT(pp
== targpp
);
3321 ASSERT(hpp
= pp
); /* That's right, it's an assignment */
3327 ASSERT(PAGE_EXCL(targpp
));
3328 ASSERT(!PP_ISFREE(targpp
));
3329 ASSERT(!PP_ISNORELOC(targpp
));
3331 ASSERT(PP_ISAGED(targpp
));
3332 ASSERT(targpp
->p_szc
< szc
|| (szc
== 0 &&
3333 (flags
& PGI_PGCPSZC0
)));
3334 targpp
->p_szc
= szc
;
3335 targpp
= targpp
->p_next
;
3338 ASSERT(rpp
!= NULL
);
3339 page_sub(&replpp
, rpp
);
3340 ASSERT(PAGE_EXCL(rpp
));
3341 ASSERT(!PP_ISFREE(rpp
));
3342 page_unlock_nocapture(rpp
);
3344 ASSERT(targpp
== hpp
);
3345 ASSERT(replpp
== NULL
);
3346 page_list_concat(&pplist
, &targpp
);
3348 CHK_LPG(pplist
, szc
);
3353 * Trim kernel cage from pfnlo-pfnhi and store result in lo-hi. Return code
3354 * of 0 means nothing left after trim.
3357 trimkcage(struct memseg
*mseg
, pfn_t
*lo
, pfn_t
*hi
, pfn_t pfnlo
, pfn_t pfnhi
)
3363 if (PP_ISNORELOC(mseg
->pages
)) {
3364 if (PP_ISNORELOC(mseg
->epages
- 1) == 0) {
3366 /* lower part of this mseg inside kernel cage */
3367 decr
= kcage_current_pfn(&kcagepfn
);
3369 /* kernel cage may have transitioned past mseg */
3370 if (kcagepfn
>= mseg
->pages_base
&&
3371 kcagepfn
< mseg
->pages_end
) {
3373 *lo
= MAX(kcagepfn
, pfnlo
);
3374 *hi
= MIN(pfnhi
, (mseg
->pages_end
- 1));
3378 /* else entire mseg in the cage */
3380 if (PP_ISNORELOC(mseg
->epages
- 1)) {
3382 /* upper part of this mseg inside kernel cage */
3383 decr
= kcage_current_pfn(&kcagepfn
);
3385 /* kernel cage may have transitioned past mseg */
3386 if (kcagepfn
>= mseg
->pages_base
&&
3387 kcagepfn
< mseg
->pages_end
) {
3389 *hi
= MIN(kcagepfn
, pfnhi
);
3390 *lo
= MAX(pfnlo
, mseg
->pages_base
);
3394 /* entire mseg outside of kernel cage */
3395 *lo
= MAX(pfnlo
, mseg
->pages_base
);
3396 *hi
= MIN(pfnhi
, (mseg
->pages_end
- 1));
3404 * called from page_get_contig_pages to search 'pfnlo' thru 'pfnhi' to claim a
3405 * page with size code 'szc'. Claiming such a page requires acquiring
3406 * exclusive locks on all constituent pages (page_trylock_contig_pages),
3407 * relocating pages in use and concatenating these constituent pages into a
3410 * The page lists do not have such a large page and page_freelist_split has
3411 * already failed to demote larger pages and/or coalesce smaller free pages.
3413 * 'flags' may specify PG_COLOR_MATCH which would limit the search of large
3414 * pages with the same color as 'bin'.
3416 * 'pfnflag' specifies the subset of the pfn range to search.
3420 page_geti_contig_pages(int mnode
, uint_t bin
, uchar_t szc
, int flags
,
3421 pfn_t pfnlo
, pfn_t pfnhi
, pgcnt_t pfnflag
)
3423 struct memseg
*mseg
;
3424 pgcnt_t szcpgcnt
= page_get_pagecnt(szc
);
3425 pgcnt_t szcpgmask
= szcpgcnt
- 1;
3427 page_t
*pp
, *randpp
, *endpp
;
3428 uint_t colors
, ceq_mask
;
3429 /* LINTED : set but not used in function */
3430 uint_t color_mask __unused
;
3433 MEM_NODE_ITERATOR_DECL(it
);
3435 ASSERT(szc
!= 0 || (flags
& PGI_PGCPSZC0
));
3437 pfnlo
= P2ROUNDUP(pfnlo
, szcpgcnt
);
3439 if ((pfnhi
- pfnlo
) + 1 < szcpgcnt
|| pfnlo
>= pfnhi
)
3442 ASSERT(szc
< mmu_page_sizes
);
3444 colors
= PAGE_GET_PAGECOLORS(szc
);
3445 color_mask
= colors
- 1;
3446 if ((colors
> 1) && (flags
& PG_MATCH_COLOR
)) {
3447 uchar_t ceq
= colorequivszc
[szc
];
3448 uint_t ceq_dif
= colors
>> ((ceq
>> 4) + (ceq
& 0xf));
3450 ASSERT(ceq_dif
> 0);
3451 ceq_mask
= (ceq_dif
- 1) << (ceq
& 0xf);
3456 ASSERT(bin
< colors
);
3458 /* clear "non-significant" color bits */
3462 * trim the pfn range to search based on pfnflag. pfnflag is set
3463 * when there have been previous page_get_contig_page failures to
3466 * The high bit in pfnflag specifies the number of 'slots' in the
3467 * pfn range and the remainder of pfnflag specifies which slot.
3468 * For example, a value of 1010b would mean the second slot of
3469 * the pfn range that has been divided into 8 slots.
3472 int slots
= 1 << (highbit(pfnflag
) - 1);
3473 int slotid
= pfnflag
& (slots
- 1);
3477 pfnhi
= P2ALIGN((pfnhi
+ 1), szcpgcnt
) - 1;
3478 szcpages
= ((pfnhi
- pfnlo
) + 1) / szcpgcnt
;
3479 slotlen
= howmany(szcpages
, slots
);
3480 /* skip if 'slotid' slot is empty */
3481 if (slotid
* slotlen
>= szcpages
)
3483 pfnlo
= pfnlo
+ (((slotid
* slotlen
) % szcpages
) * szcpgcnt
);
3484 ASSERT(pfnlo
< pfnhi
);
3485 if (pfnhi
> pfnlo
+ (slotlen
* szcpgcnt
))
3486 pfnhi
= pfnlo
+ (slotlen
* szcpgcnt
) - 1;
3490 * This routine is can be called recursively so we shouldn't
3491 * acquire a reader lock if a write request is pending. This
3492 * could lead to a deadlock with the DR thread.
3494 * Returning NULL informs the caller that we could not get
3495 * a contig page with the required characteristics.
3498 if (!memsegs_trylock(0))
3502 * loop through memsegs to look for contig page candidates
3505 for (mseg
= memsegs
; mseg
!= NULL
; mseg
= mseg
->next
) {
3506 if (pfnhi
< mseg
->pages_base
|| pfnlo
>= mseg
->pages_end
) {
3511 if (mseg
->pages_end
- mseg
->pages_base
< szcpgcnt
)
3512 /* mseg too small */
3516 * trim off kernel cage pages from pfn range and check for
3517 * a trimmed pfn range returned that does not span the
3518 * desired large page size.
3521 if (trimkcage(mseg
, &lo
, &hi
, pfnlo
, pfnhi
) == 0 ||
3522 lo
>= hi
|| ((hi
- lo
) + 1) < szcpgcnt
)
3525 lo
= MAX(pfnlo
, mseg
->pages_base
);
3526 hi
= MIN(pfnhi
, (mseg
->pages_end
- 1));
3529 /* round to szcpgcnt boundaries */
3530 lo
= P2ROUNDUP(lo
, szcpgcnt
);
3532 MEM_NODE_ITERATOR_INIT(lo
, mnode
, szc
, &it
);
3533 hi
= P2ALIGN((hi
+ 1), szcpgcnt
) - 1;
3539 * set lo to point to the pfn for the desired bin. Large
3540 * page sizes may only have a single page color
3543 if (ceq_mask
> 0 || interleaved_mnodes
) {
3544 /* set lo to point at appropriate color */
3545 if (((PFN_2_COLOR(lo
, szc
, &it
) ^ bin
) & ceq_mask
) ||
3546 (interleaved_mnodes
&&
3547 PFN_2_MEM_NODE(lo
) != mnode
)) {
3548 PAGE_NEXT_PFN_FOR_COLOR(lo
, szc
, bin
, ceq_mask
,
3552 /* mseg cannot satisfy color request */
3556 /* randomly choose a point between lo and hi to begin search */
3558 randpfn
= (pfn_t
)GETTICK();
3559 randpfn
= ((randpfn
% (hi
- lo
)) + lo
) & ~(skip
- 1);
3560 MEM_NODE_ITERATOR_INIT(randpfn
, mnode
, szc
, &it
);
3561 if (ceq_mask
|| interleaved_mnodes
|| randpfn
== (pfn_t
)-1) {
3562 if (randpfn
!= (pfn_t
)-1) {
3563 PAGE_NEXT_PFN_FOR_COLOR(randpfn
, szc
, bin
,
3564 ceq_mask
, color_mask
, &it
);
3566 if (randpfn
>= hi
) {
3568 MEM_NODE_ITERATOR_INIT(randpfn
, mnode
, szc
,
3572 randpp
= mseg
->pages
+ (randpfn
- mseg
->pages_base
);
3574 ASSERT(randpp
->p_pagenum
== randpfn
);
3577 endpp
= mseg
->pages
+ (hi
- mseg
->pages_base
) + 1;
3579 ASSERT(randpp
+ szcpgcnt
<= endpp
);
3582 ASSERT(!(pp
->p_pagenum
& szcpgmask
));
3583 ASSERT(((PP_2_BIN(pp
) ^ bin
) & ceq_mask
) == 0);
3585 if (page_trylock_contig_pages(mnode
, pp
, szc
, flags
)) {
3586 /* pages unlocked by page_claim on failure */
3587 if (page_claim_contig_pages(pp
, szc
, flags
)) {
3593 if (ceq_mask
== 0 && !interleaved_mnodes
) {
3596 pfn_t pfn
= pp
->p_pagenum
;
3598 PAGE_NEXT_PFN_FOR_COLOR(pfn
, szc
, bin
,
3599 ceq_mask
, color_mask
, &it
);
3600 if (pfn
== (pfn_t
)-1) {
3604 (pfn
- mseg
->pages_base
);
3608 /* start from the beginning */
3609 MEM_NODE_ITERATOR_INIT(lo
, mnode
, szc
, &it
);
3610 pp
= mseg
->pages
+ (lo
- mseg
->pages_base
);
3611 ASSERT(pp
->p_pagenum
== lo
);
3612 ASSERT(pp
+ szcpgcnt
<= endpp
);
3614 } while (pp
!= randpp
);
3622 * controlling routine that searches through physical memory in an attempt to
3623 * claim a large page based on the input parameters.
3624 * on the page free lists.
3626 * calls page_geti_contig_pages with an initial pfn range from the mnode
3627 * and mtype. page_geti_contig_pages will trim off the parts of the pfn range
3628 * that overlaps with the kernel cage or does not match the requested page
3629 * color if PG_MATCH_COLOR is set. Since this search is very expensive,
3630 * page_geti_contig_pages may further limit the search range based on
3631 * previous failure counts (pgcpfailcnt[]).
3633 * for PGI_PGCPSZC0 requests, page_get_contig_pages will relocate a base
3634 * pagesize page that satisfies mtype.
3637 page_get_contig_pages(int mnode
, uint_t bin
, int mtype
, uchar_t szc
,
3640 pfn_t pfnlo
, pfnhi
; /* contig pages pfn range */
3642 pgcnt_t pfnflag
= 0; /* no limit on search if 0 */
3644 VM_STAT_ADD(vmm_vmstats
.pgcp_alloc
[szc
]);
3646 /* no allocations from cage */
3647 flags
|= PGI_NOCAGE
;
3650 MTYPE_START(mnode
, mtype
, flags
);
3651 if (mtype
< 0) { /* mnode does not have memory in mtype range */
3652 VM_STAT_ADD(vmm_vmstats
.pgcp_allocempty
[szc
]);
3656 ASSERT(szc
> 0 || (flags
& PGI_PGCPSZC0
));
3658 /* do not limit search and ignore color if hi pri */
3660 if (pgcplimitsearch
&& ((flags
& PGI_PGCPHIPRI
) == 0))
3661 pfnflag
= pgcpfailcnt
[szc
];
3663 /* remove color match to improve chances */
3665 if (flags
& PGI_PGCPHIPRI
|| pfnflag
)
3666 flags
&= ~PG_MATCH_COLOR
;
3669 /* get pfn range based on mnode and mtype */
3670 MNODETYPE_2_PFN(mnode
, mtype
, pfnlo
, pfnhi
);
3672 ASSERT(pfnhi
>= pfnlo
);
3674 pp
= page_geti_contig_pages(mnode
, bin
, szc
, flags
,
3675 pfnlo
, pfnhi
, pfnflag
);
3678 pfnflag
= pgcpfailcnt
[szc
];
3680 /* double the search size */
3681 pgcpfailcnt
[szc
] = pfnflag
>> 1;
3683 VM_STAT_ADD(vmm_vmstats
.pgcp_allocok
[szc
]);
3686 MTYPE_NEXT(mnode
, mtype
, flags
);
3687 } while (mtype
>= 0);
3689 VM_STAT_ADD(vmm_vmstats
.pgcp_allocfailed
[szc
]);
3693 #if defined(__i386) || defined(__amd64)
3695 * Determine the likelihood of finding/coalescing a szc page.
3696 * Return 0 if the likelihood is small otherwise return 1.
3698 * For now, be conservative and check only 1g pages and return 0
3699 * if there had been previous coalescing failures and the szc pages
3700 * needed to satisfy request would exhaust most of freemem.
3703 page_chk_freelist(uint_t szc
)
3710 pgcnt
= page_get_pagecnt(szc
);
3711 if (pgcpfailcnt
[szc
] && pgcnt
+ throttlefree
>= freemem
) {
3712 VM_STAT_ADD(vmm_vmstats
.pcf_deny
[szc
]);
3715 VM_STAT_ADD(vmm_vmstats
.pcf_allow
[szc
]);
3721 * Find the `best' page on the freelist for this (vp,off) (as,vaddr) pair.
3723 * Does its own locking and accounting.
3724 * If PG_MATCH_COLOR is set, then NULL will be returned if there are no
3725 * pages of the proper color even if there are pages of a different color.
3727 * Finds a page, removes it, THEN locks it.
3732 page_get_freelist(struct vnode
*vp
, u_offset_t off
, struct seg
*seg
,
3733 caddr_t vaddr
, size_t size
, uint_t flags
, struct lgrp
*lgrp
)
3735 struct as
*as
= seg
->s_as
;
3741 page_t
*(*page_get_func
)(int, uint_t
, int, uchar_t
, uint_t
);
3742 lgrp_mnode_cookie_t lgrp_cookie
;
3744 page_get_func
= page_get_mnode_freelist
;
3747 * If we aren't passed a specific lgroup, or passed a freed lgrp
3748 * assume we wish to allocate near to the current thread's home.
3750 if (!LGRP_EXISTS(lgrp
))
3751 lgrp
= lgrp_home_lgrp();
3754 if ((flags
& (PG_NORELOC
| PG_PANIC
)) == PG_NORELOC
&&
3755 kcage_freemem
< kcage_throttlefree
+ btop(size
) &&
3756 curthread
!= kcage_cageout_thread
) {
3758 * Set a "reserve" of kcage_throttlefree pages for
3759 * PG_PANIC and cageout thread allocations.
3761 * Everybody else has to serialize in
3762 * page_create_get_something() to get a cage page, so
3763 * that we don't deadlock cageout!
3768 flags
&= ~PG_NORELOC
;
3769 flags
|= PGI_NOCAGE
;
3773 MTYPE_INIT(mtype
, vp
, vaddr
, flags
, size
);
3776 * Convert size to page size code.
3778 if ((szc
= page_szc(size
)) == (uchar_t
)-1)
3779 panic("page_get_freelist: illegal page size request");
3780 ASSERT(szc
< mmu_page_sizes
);
3782 VM_STAT_ADD(vmm_vmstats
.pgf_alloc
[szc
]);
3785 AS_2_BIN(as
, seg
, vp
, vaddr
, bin
, szc
);
3787 ASSERT(bin
< PAGE_GET_PAGECOLORS(szc
));
3790 * Try to get a local page first, but try remote if we can't
3791 * get a page of the right color.
3794 LGRP_MNODE_COOKIE_INIT(lgrp_cookie
, lgrp
, LGRP_SRCH_LOCAL
);
3795 while ((mnode
= lgrp_memnode_choose(&lgrp_cookie
)) >= 0) {
3796 pp
= page_get_func(mnode
, bin
, mtype
, szc
, flags
);
3798 VM_STAT_ADD(vmm_vmstats
.pgf_allocok
[szc
]);
3799 DTRACE_PROBE4(page__get
,
3810 * for non-SZC0 PAGESIZE requests, check cachelist before checking
3811 * remote free lists. Caller expected to call page_get_cachelist which
3812 * will check local cache lists and remote free lists.
3814 if (szc
== 0 && ((flags
& PGI_PGCPSZC0
) == 0)) {
3815 VM_STAT_ADD(vmm_vmstats
.pgf_allocdeferred
);
3819 ASSERT(szc
> 0 || (flags
& PGI_PGCPSZC0
));
3821 lgrp_stat_add(lgrp
->lgrp_id
, LGRP_NUM_ALLOC_FAIL
, 1);
3823 if (!(flags
& PG_LOCAL
)) {
3825 * Try to get a non-local freelist page.
3827 LGRP_MNODE_COOKIE_UPGRADE(lgrp_cookie
);
3828 while ((mnode
= lgrp_memnode_choose(&lgrp_cookie
)) >= 0) {
3829 pp
= page_get_func(mnode
, bin
, mtype
, szc
, flags
);
3831 DTRACE_PROBE4(page__get
,
3836 VM_STAT_ADD(vmm_vmstats
.pgf_allocokrem
[szc
]);
3844 * when the cage is off chances are page_get_contig_pages() will fail
3845 * to lock a large page chunk therefore when the cage is off it's not
3846 * called by default. this can be changed via /etc/system.
3848 * page_get_contig_pages() also called to acquire a base pagesize page
3849 * for page_create_get_something().
3851 if (!(flags
& PG_NORELOC
) && (pg_contig_disable
== 0) &&
3852 (kcage_on
|| pg_lpgcreate_nocage
|| szc
== 0) &&
3853 (page_get_func
!= page_get_contig_pages
)) {
3855 VM_STAT_ADD(vmm_vmstats
.pgf_allocretry
[szc
]);
3856 page_get_func
= page_get_contig_pages
;
3860 if (!(flags
& PG_LOCAL
) && pgcplimitsearch
&&
3861 page_get_func
== page_get_contig_pages
)
3862 SETPGCPFAILCNT(szc
);
3864 VM_STAT_ADD(vmm_vmstats
.pgf_allocfailed
[szc
]);
3869 * Find the `best' page on the cachelist for this (vp,off) (as,vaddr) pair.
3871 * Does its own locking.
3872 * If PG_MATCH_COLOR is set, then NULL will be returned if there are no
3873 * pages of the proper color even if there are pages of a different color.
3874 * Otherwise, scan the bins for ones with pages. For each bin with pages,
3875 * try to lock one of them. If no page can be locked, try the
3876 * next bin. Return NULL if a page can not be found and locked.
3878 * Finds a pages, trys to lock it, then removes it.
3883 page_get_cachelist(struct vnode
*vp
, u_offset_t off
, struct seg
*seg
,
3884 caddr_t vaddr
, uint_t flags
, struct lgrp
*lgrp
)
3887 struct as
*as
= seg
->s_as
;
3892 lgrp_mnode_cookie_t lgrp_cookie
;
3895 * If we aren't passed a specific lgroup, or pasased a freed lgrp
3896 * assume we wish to allocate near to the current thread's home.
3898 if (!LGRP_EXISTS(lgrp
))
3899 lgrp
= lgrp_home_lgrp();
3902 flags
&= ~PG_NORELOC
;
3903 flags
|= PGI_NOCAGE
;
3906 if ((flags
& (PG_NORELOC
| PG_PANIC
| PG_PUSHPAGE
)) == PG_NORELOC
&&
3907 kcage_freemem
<= kcage_throttlefree
) {
3909 * Reserve kcage_throttlefree pages for critical kernel
3912 * Everybody else has to go to page_create_get_something()
3913 * to get a cage page, so we don't deadlock cageout.
3919 AS_2_BIN(as
, seg
, vp
, vaddr
, bin
, 0);
3921 ASSERT(bin
< PAGE_GET_PAGECOLORS(0));
3924 MTYPE_INIT(mtype
, vp
, vaddr
, flags
, MMU_PAGESIZE
);
3926 VM_STAT_ADD(vmm_vmstats
.pgc_alloc
);
3929 * Try local cachelists first
3931 LGRP_MNODE_COOKIE_INIT(lgrp_cookie
, lgrp
, LGRP_SRCH_LOCAL
);
3932 while ((mnode
= lgrp_memnode_choose(&lgrp_cookie
)) >= 0) {
3933 pp
= page_get_mnode_cachelist(bin
, flags
, mnode
, mtype
);
3935 VM_STAT_ADD(vmm_vmstats
.pgc_allocok
);
3936 DTRACE_PROBE4(page__get
,
3945 lgrp_stat_add(lgrp
->lgrp_id
, LGRP_NUM_ALLOC_FAIL
, 1);
3948 * Try freelists/cachelists that are farther away
3949 * This is our only chance to allocate remote pages for PAGESIZE
3952 LGRP_MNODE_COOKIE_UPGRADE(lgrp_cookie
);
3953 while ((mnode
= lgrp_memnode_choose(&lgrp_cookie
)) >= 0) {
3954 pp
= page_get_mnode_freelist(mnode
, bin
, mtype
,
3957 VM_STAT_ADD(vmm_vmstats
.pgc_allocokdeferred
);
3958 DTRACE_PROBE4(page__get
,
3965 pp
= page_get_mnode_cachelist(bin
, flags
, mnode
, mtype
);
3967 VM_STAT_ADD(vmm_vmstats
.pgc_allocokrem
);
3968 DTRACE_PROBE4(page__get
,
3977 VM_STAT_ADD(vmm_vmstats
.pgc_allocfailed
);
3982 page_get_mnode_cachelist(uint_t bin
, uint_t flags
, int mnode
, int mtype
)
3985 page_t
*pp
, *first_pp
;
3987 int plw_initialized
;
3988 page_list_walker_t plw
;
3990 VM_STAT_ADD(vmm_vmstats
.pgmc_alloc
);
3993 MTYPE_START(mnode
, mtype
, flags
);
3994 if (mtype
< 0) { /* mnode does not have memory in mtype range */
3995 VM_STAT_ADD(vmm_vmstats
.pgmc_allocempty
);
4001 plw_initialized
= 0;
4002 plw
.plw_ceq_dif
= 1;
4005 * Only hold one cachelist lock at a time, that way we
4006 * can start anywhere and not have to worry about lock
4010 for (plw
.plw_count
= 0;
4011 plw
.plw_count
< plw
.plw_ceq_dif
; plw
.plw_count
++) {
4015 if (!PAGE_CACHELISTS(mnode
, bin
, mtype
))
4017 pcm
= PC_BIN_MUTEX(mnode
, bin
, PG_CACHE_LIST
);
4019 pp
= PAGE_CACHELISTS(mnode
, bin
, mtype
);
4024 ASSERT(pp
->p_vnode
);
4025 ASSERT(PP_ISAGED(pp
) == 0);
4026 ASSERT(pp
->p_szc
== 0);
4027 ASSERT(PFN_2_MEM_NODE(pp
->p_pagenum
) == mnode
);
4028 while (IS_DUMP_PAGE(pp
) || !page_trylock(pp
, SE_EXCL
)) {
4030 ASSERT(pp
->p_szc
== 0);
4031 if (pp
== first_pp
) {
4033 * We have searched the complete list!
4034 * And all of them (might only be one)
4035 * are locked. This can happen since
4036 * these pages can also be found via
4037 * the hash list. When found via the
4038 * hash list, they are locked first,
4039 * then removed. We give up to let the
4045 ASSERT(pp
->p_vnode
);
4046 ASSERT(PP_ISFREE(pp
));
4047 ASSERT(PP_ISAGED(pp
) == 0);
4048 ASSERT(PFN_2_MEM_NODE(pp
->p_pagenum
) ==
4055 * Found and locked a page.
4056 * Pull it off the list.
4058 ASSERT(mtype
== PP_2_MTYPE(pp
));
4059 ppp
= &PAGE_CACHELISTS(mnode
, bin
, mtype
);
4062 * Subtract counters before releasing pcm mutex
4063 * to avoid a race with page_freelist_coalesce
4064 * and page_freelist_split.
4066 page_ctr_sub(mnode
, mtype
, pp
, PG_CACHE_LIST
);
4068 ASSERT(pp
->p_vnode
);
4069 ASSERT(PP_ISAGED(pp
) == 0);
4070 #if defined(__sparc)
4072 (flags
& PG_NORELOC
) == 0 ||
4074 if (PP_ISNORELOC(pp
)) {
4075 kcage_freemem_sub(1);
4078 VM_STAT_ADD(vmm_vmstats
. pgmc_allocok
);
4084 if (plw_initialized
== 0) {
4085 page_list_walk_init(0, flags
, bin
, 0, 1, &plw
);
4086 plw_initialized
= 1;
4088 /* calculate the next bin with equivalent color */
4089 bin
= ADD_MASKED(bin
, plw
.plw_bin_step
,
4090 plw
.plw_ceq_mask
[0], plw
.plw_color_mask
);
4091 } while (sbin
!= bin
);
4093 if (plw
.plw_ceq_dif
> 1)
4094 bin
= page_list_walk_next_bin(0, bin
, &plw
);
4097 MTYPE_NEXT(mnode
, mtype
, flags
);
4101 VM_STAT_ADD(vmm_vmstats
.pgmc_allocfailed
);
4106 #define REPL_PAGE_STATS
4109 #ifdef REPL_PAGE_STATS
4110 struct repl_page_stats
{
4112 uint_t ngets_noreloc
;
4113 uint_t npgr_noreloc
;
4114 uint_t nnopage_first
;
4120 #define REPL_STAT_INCR(v) atomic_inc_32(&repl_page_stats.v)
4121 #else /* REPL_PAGE_STATS */
4122 #define REPL_STAT_INCR(v)
4123 #endif /* REPL_PAGE_STATS */
4128 * The freemem accounting must be done by the caller.
4129 * First we try to get a replacement page of the same size as like_pp,
4130 * if that is not possible, then we just get a set of discontiguous
4134 page_get_replacement_page(page_t
*orig_like_pp
, struct lgrp
*lgrp_target
,
4138 page_t
*pp
, *pplist
;
4141 int mnode
, page_mnode
;
4143 spgcnt_t npgs
, pg_cnt
;
4147 lgrp_mnode_cookie_t lgrp_cookie
;
4150 REPL_STAT_INCR(ngets
);
4151 like_pp
= orig_like_pp
;
4152 ASSERT(PAGE_EXCL(like_pp
));
4154 szc
= like_pp
->p_szc
;
4155 npgs
= page_get_pagecnt(szc
);
4157 * Now we reset like_pp to the base page_t.
4158 * That way, we won't walk past the end of this 'szc' page.
4160 pfnum
= PFN_BASE(like_pp
->p_pagenum
, szc
);
4161 like_pp
= page_numtopp_nolock(pfnum
);
4162 ASSERT(like_pp
->p_szc
== szc
);
4164 if (PP_ISNORELOC(like_pp
)) {
4166 REPL_STAT_INCR(ngets_noreloc
);
4167 flags
= PGI_RELOCONLY
;
4168 } else if (pgrflags
& PGR_NORELOC
) {
4170 REPL_STAT_INCR(npgr_noreloc
);
4175 * Kernel pages must always be replaced with the same size
4176 * pages, since we cannot properly handle demotion of kernel
4179 if (PP_ISKAS(like_pp
))
4180 pgrflags
|= PGR_SAMESZC
;
4183 MTYPE_PGR_INIT(mtype
, flags
, like_pp
, page_mnode
, npgs
);
4188 pg_cnt
= page_get_pagecnt(szc
);
4189 bin
= PP_2_BIN(like_pp
);
4190 ASSERT(like_pp
->p_szc
== orig_like_pp
->p_szc
);
4191 ASSERT(pg_cnt
<= npgs
);
4194 * If an lgroup was specified, try to get the
4195 * page from that lgroup.
4196 * NOTE: Must be careful with code below because
4197 * lgroup may disappear and reappear since there
4198 * is no locking for lgroup here.
4200 if (LGRP_EXISTS(lgrp_target
)) {
4202 * Keep local variable for lgroup separate
4203 * from lgroup argument since this code should
4204 * only be exercised when lgroup argument
4209 /* Try the lgroup's freelists first */
4210 LGRP_MNODE_COOKIE_INIT(lgrp_cookie
, lgrp
,
4212 while ((pplist
== NULL
) &&
4213 (mnode
= lgrp_memnode_choose(&lgrp_cookie
))
4216 page_get_mnode_freelist(mnode
, bin
,
4221 * Now try it's cachelists if this is a
4222 * small page. Don't need to do it for
4223 * larger ones since page_freelist_coalesce()
4226 if (pplist
!= NULL
|| szc
!= 0)
4229 /* Now try it's cachelists */
4230 LGRP_MNODE_COOKIE_INIT(lgrp_cookie
, lgrp
,
4233 while ((pplist
== NULL
) &&
4234 (mnode
= lgrp_memnode_choose(&lgrp_cookie
))
4237 page_get_mnode_cachelist(bin
, flags
,
4240 if (pplist
!= NULL
) {
4241 page_hashout(pplist
, NULL
);
4243 REPL_STAT_INCR(nhashout
);
4246 /* Done looking in this lgroup. Bail out. */
4251 * No lgroup was specified (or lgroup was removed by
4252 * DR, so just try to get the page as close to
4253 * like_pp's mnode as possible.
4254 * First try the local freelist...
4256 mnode
= PP_2_MEM_NODE(like_pp
);
4257 pplist
= page_get_mnode_freelist(mnode
, bin
,
4262 REPL_STAT_INCR(nnofree
);
4265 * ...then the local cachelist. Don't need to do it for
4266 * larger pages cause page_freelist_coalesce() already
4267 * failed there anyway.
4270 pplist
= page_get_mnode_cachelist(bin
, flags
,
4272 if (pplist
!= NULL
) {
4273 page_hashout(pplist
, NULL
);
4275 REPL_STAT_INCR(nhashout
);
4280 /* Now try remote freelists */
4283 lgrp_hand_to_lgrp(MEM_NODE_2_LGRPHAND(page_mnode
));
4284 LGRP_MNODE_COOKIE_INIT(lgrp_cookie
, lgrp
,
4286 while (pplist
== NULL
&&
4287 (mnode
= lgrp_memnode_choose(&lgrp_cookie
))
4292 if ((mnode
== page_mnode
) ||
4293 (mem_node_config
[mnode
].exists
== 0))
4296 pplist
= page_get_mnode_freelist(mnode
,
4297 bin
, mtype
, szc
, flags
);
4304 /* Now try remote cachelists */
4305 LGRP_MNODE_COOKIE_INIT(lgrp_cookie
, lgrp
,
4307 while (pplist
== NULL
&& szc
== 0) {
4308 mnode
= lgrp_memnode_choose(&lgrp_cookie
);
4314 if ((mnode
== page_mnode
) ||
4315 (mem_node_config
[mnode
].exists
== 0))
4318 pplist
= page_get_mnode_cachelist(bin
,
4319 flags
, mnode
, mtype
);
4321 if (pplist
!= NULL
) {
4322 page_hashout(pplist
, NULL
);
4324 REPL_STAT_INCR(nhashout
);
4330 * Break out of while loop under the following cases:
4331 * - If we successfully got a page.
4332 * - If pgrflags specified only returning a specific
4333 * page size and we could not find that page size.
4334 * - If we could not satisfy the request with PAGESIZE
4337 if (pplist
!= NULL
|| szc
== 0)
4340 if ((pgrflags
& PGR_SAMESZC
) || pgrppgcp
) {
4341 /* try to find contig page */
4343 LGRP_MNODE_COOKIE_INIT(lgrp_cookie
, lgrp
,
4346 while ((pplist
== NULL
) &&
4348 lgrp_memnode_choose(&lgrp_cookie
))
4350 pplist
= page_get_contig_pages(
4351 mnode
, bin
, mtype
, szc
,
4352 flags
| PGI_PGCPHIPRI
);
4358 * The correct thing to do here is try the next
4359 * page size down using szc--. Due to a bug
4360 * with the processing of HAT_RELOAD_SHARE
4361 * where the sfmmu_ttecnt arrays of all
4362 * hats sharing an ISM segment don't get updated,
4363 * using intermediate size pages for relocation
4364 * can lead to continuous page faults.
4369 if (pplist
!= NULL
) {
4370 DTRACE_PROBE4(page__get
,
4376 while (pplist
!= NULL
&& pg_cnt
--) {
4377 ASSERT(pplist
!= NULL
);
4379 page_sub(&pplist
, pp
);
4382 page_list_concat(&pl
, &pp
);
4384 like_pp
= like_pp
+ 1;
4385 REPL_STAT_INCR(nnext_pp
);
4387 ASSERT(pg_cnt
== 0);
4395 * We were unable to allocate the necessary number
4397 * We need to free up any pl.
4399 REPL_STAT_INCR(nnopage
);
4400 page_free_replacement_page(pl
);
4408 * demote a free large page to it's constituent pages
4411 page_demote_free_pages(page_t
*pp
)
4417 ASSERT(PAGE_LOCKED(pp
));
4418 ASSERT(PP_ISFREE(pp
));
4419 ASSERT(pp
->p_szc
!= 0 && pp
->p_szc
< mmu_page_sizes
);
4421 mnode
= PP_2_MEM_NODE(pp
);
4422 page_freelist_lock(mnode
);
4423 if (pp
->p_szc
!= 0) {
4424 (void) page_demote(mnode
, PFN_BASE(pp
->p_pagenum
,
4425 pp
->p_szc
), 0, pp
->p_szc
, 0, PC_NO_COLOR
, PC_FREE
);
4427 page_freelist_unlock(mnode
);
4428 ASSERT(pp
->p_szc
== 0);
4432 * Factor in colorequiv to check additional 'equivalent' bins.
4433 * colorequiv may be set in /etc/system
4436 page_set_colorequiv_arr(void)
4438 if (colorequiv
> 1) {
4440 uint_t sv_a
= lowbit(colorequiv
) - 1;
4445 for (i
= 0; i
< MMU_PAGE_SIZES
; i
++) {
4449 if ((colors
= hw_page_array
[i
].hp_colors
) <= 1) {
4452 while ((colors
>> a
) == 0)
4454 if ((a
<< 4) > colorequivszc
[i
]) {
4455 colorequivszc
[i
] = (a
<< 4);