4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015, Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
24 * Copyright (c) 2015, 2016 by Delphix. All rights reserved.
25 * Copyright 2018 Joyent, Inc.
26 * Copyright 2021 Oxide Computer Company
29 /* Copyright (c) 1983, 1984, 1985, 1986, 1987, 1988, 1989 AT&T */
30 /* All Rights Reserved */
33 * University Copyright- Copyright (c) 1982, 1986, 1988
34 * The Regents of the University of California
37 * University Acknowledgment- Portions of this document are derived from
38 * software developed by the University of California, Berkeley, and its
43 * VM - physical page management.
46 #include <sys/types.h>
47 #include <sys/t_lock.h>
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/errno.h>
52 #include <sys/vnode.h>
54 #include <sys/vtrace.h>
56 #include <sys/cmn_err.h>
57 #include <sys/tuneable.h>
58 #include <sys/sysmacros.h>
59 #include <sys/cpuvar.h>
60 #include <sys/callb.h>
61 #include <sys/debug.h>
62 #include <sys/condvar_impl.h>
63 #include <sys/mem_config.h>
64 #include <sys/mem_cage.h>
66 #include <sys/atomic.h>
67 #include <sys/strlog.h>
69 #include <sys/ontrap.h>
78 #include <vm/seg_kmem.h>
79 #include <vm/vm_dep.h>
80 #include <sys/vm_usage.h>
81 #include <fs/fs_subr.h>
83 #include <sys/modctl.h>
85 static pgcnt_t max_page_get
; /* max page_get request size in pages */
86 pgcnt_t total_pages
= 0; /* total number of pages (used by /proc) */
87 volatile uint64_t n_throttle
= 0;
90 * freemem_lock protects all freemem variables:
91 * availrmem. Also this lock protects the globals which track the
92 * availrmem changes for accurate kernel footprint calculation.
93 * See below for an explanation of these
96 kmutex_t freemem_lock
;
98 pgcnt_t availrmem_initial
;
101 * These globals track availrmem changes to get a more accurate
102 * estimate of tke kernel size. Historically pp_kernel is used for
103 * kernel size and is based on availrmem. But availrmem is adjusted for
104 * locked pages in the system not just for kernel locked pages.
105 * These new counters will track the pages locked through segvn and
106 * by explicit user locking.
108 * pages_locked : How many pages are locked because of user specified
109 * locking through mlock or plock.
111 * pages_useclaim,pages_claimed : These two variables track the
112 * claim adjustments because of the protection changes on a segvn segment.
114 * All these globals are protected by the same lock which protects availrmem.
116 pgcnt_t pages_locked
= 0;
117 pgcnt_t pages_useclaim
= 0;
118 pgcnt_t pages_claimed
= 0;
122 * new_freemem_lock protects freemem, freemem_wait & freemem_cv.
124 static kmutex_t new_freemem_lock
;
125 static uint_t freemem_wait
; /* someone waiting for freemem */
126 static kcondvar_t freemem_cv
;
129 * The logical page free list is maintained as two lists, the 'free'
130 * and the 'cache' lists.
131 * The free list contains those pages that should be reused first.
133 * The implementation of the lists is machine dependent.
134 * page_get_freelist(), page_get_cachelist(),
135 * page_list_sub(), and page_list_add()
136 * form the interface to the machine dependent implementation.
138 * Pages with p_free set are on the cache list.
139 * Pages with p_free and p_age set are on the free list,
141 * A page may be locked while on either list.
145 * free list accounting stuff.
148 * Spread out the value for the number of pages on the
149 * page free and page cache lists. If there is just one
150 * value, then it must be under just one lock.
151 * The lock contention and cache traffic are a real bother.
153 * When we acquire and then drop a single pcf lock
154 * we can start in the middle of the array of pcf structures.
155 * If we acquire more than one pcf lock at a time, we need to
156 * start at the front to avoid deadlocking.
158 * pcf_count holds the number of pages in each pool.
160 * pcf_block is set when page_create_get_something() has asked the
161 * PSM page freelist and page cachelist routines without specifying
162 * a color and nothing came back. This is used to block anything
163 * else from moving pages from one list to the other while the
164 * lists are searched again. If a page is freeed while pcf_block is
165 * set, then pcf_reserve is incremented. pcgs_unblock() takes care
166 * of clearning pcf_block, doing the wakeups, etc.
169 #define MAX_PCF_FANOUT NCPU
170 static uint_t pcf_fanout
= 1; /* Will get changed at boot time */
171 static uint_t pcf_fanout_mask
= 0;
174 kmutex_t pcf_lock
; /* protects the structure */
175 uint_t pcf_count
; /* page count */
176 uint_t pcf_wait
; /* number of waiters */
177 uint_t pcf_block
; /* pcgs flag to page_free() */
178 uint_t pcf_reserve
; /* pages freed after pcf_block set */
179 uint_t pcf_fill
[10]; /* to line up on the caches */
183 * PCF_INDEX hash needs to be dynamic (every so often the hash changes where
184 * it will hash the cpu to). This is done to prevent a drain condition
185 * from happening. This drain condition will occur when pcf_count decrement
186 * occurs on cpu A and the increment of pcf_count always occurs on cpu B. An
187 * example of this shows up with device interrupts. The dma buffer is allocated
188 * by the cpu requesting the IO thus the pcf_count is decremented based on that.
189 * When the memory is returned by the interrupt thread, the pcf_count will be
190 * incremented based on the cpu servicing the interrupt.
192 static struct pcf pcf
[MAX_PCF_FANOUT
];
193 #define PCF_INDEX() ((int)(((long)CPU->cpu_seqid) + \
194 (randtick() >> 24)) & (pcf_fanout_mask))
196 static int pcf_decrement_bucket(pgcnt_t
);
197 static int pcf_decrement_multiple(pgcnt_t
*, pgcnt_t
, int);
199 kmutex_t pcgs_lock
; /* serializes page_create_get_ */
200 kmutex_t pcgs_cagelock
; /* serializes NOSLEEP cage allocs */
201 kmutex_t pcgs_wait_lock
; /* used for delay in pcgs */
202 static kcondvar_t pcgs_cv
; /* cv for delay in pcgs */
207 * No locks, but so what, they are only statistics.
210 static struct page_tcnt
{
211 int pc_free_cache
; /* free's into cache list */
212 int pc_free_dontneed
; /* free's with dontneed */
213 int pc_free_pageout
; /* free's from pageout */
214 int pc_free_free
; /* free's into free list */
215 int pc_free_pages
; /* free's into large page free list */
216 int pc_destroy_pages
; /* large page destroy's */
217 int pc_get_cache
; /* get's from cache list */
218 int pc_get_free
; /* get's from free list */
219 int pc_reclaim
; /* reclaim's */
220 int pc_abortfree
; /* abort's of free pages */
221 int pc_find_hit
; /* find's that find page */
222 int pc_find_miss
; /* find's that don't find page */
223 int pc_destroy_free
; /* # of free pages destroyed */
224 #define PC_HASH_CNT (4*PAGE_HASHAVELEN)
225 int pc_find_hashlen
[PC_HASH_CNT
+1];
226 int pc_addclaim_pages
;
227 int pc_subclaim_pages
;
228 int pc_free_replacement_page
[2];
229 int pc_try_demote_pages
[6];
230 int pc_demote_pages
[2];
234 uint_t hashin_not_held
;
235 uint_t hashin_already
;
237 uint_t hashout_count
;
238 uint_t hashout_not_held
;
240 uint_t page_create_count
;
241 uint_t page_create_not_enough
;
242 uint_t page_create_not_enough_again
;
243 uint_t page_create_zero
;
244 uint_t page_create_hashout
;
245 uint_t page_create_page_lock_failed
;
246 uint_t page_create_trylock_failed
;
247 uint_t page_create_found_one
;
248 uint_t page_create_hashin_failed
;
249 uint_t page_create_dropped_phm
;
251 uint_t page_create_new
;
252 uint_t page_create_exists
;
253 uint_t page_create_putbacks
;
254 uint_t page_create_overshoot
;
256 uint_t page_reclaim_zero
;
257 uint_t page_reclaim_zero_locked
;
259 uint_t page_rename_exists
;
260 uint_t page_rename_count
;
262 uint_t page_lookup_cnt
[20];
263 uint_t page_lookup_nowait_cnt
[10];
264 uint_t page_find_cnt
;
265 uint_t page_exists_cnt
;
266 uint_t page_exists_forreal_cnt
;
267 uint_t page_lookup_dev_cnt
;
268 uint_t get_cachelist_cnt
;
269 uint_t page_create_cnt
[10];
270 uint_t alloc_pages
[9];
271 uint_t page_exphcontg
[19];
272 uint_t page_create_large_cnt
[10];
276 static inline page_t
*
277 page_hash_search(ulong_t index
, vnode_t
*vnode
, u_offset_t off
)
282 for (page
= page_hash
[index
]; page
; page
= page
->p_hash
, mylen
++)
283 if (page
->p_vnode
== vnode
&& page
->p_offset
== off
)
288 pagecnt
.pc_find_hit
++;
290 pagecnt
.pc_find_miss
++;
292 pagecnt
.pc_find_hashlen
[MIN(mylen
, PC_HASH_CNT
)]++;
300 #define MEMSEG_SEARCH_STATS
303 #ifdef MEMSEG_SEARCH_STATS
304 struct memseg_stats
{
311 #define MEMSEG_STAT_INCR(v) \
312 atomic_inc_32(&memseg_stats.v)
314 #define MEMSEG_STAT_INCR(x)
317 struct memseg
*memsegs
; /* list of memory segments */
320 * /etc/system tunable to control large page allocation hueristic.
322 * Setting to LPAP_LOCAL will heavily prefer the local lgroup over remote lgroup
323 * for large page allocation requests. If a large page is not readily
324 * avaliable on the local freelists we will go through additional effort
325 * to create a large page, potentially moving smaller pages around to coalesce
326 * larger pages in the local lgroup.
327 * Default value of LPAP_DEFAULT will go to remote freelists if large pages
328 * are not readily available in the local lgroup.
331 LPAP_DEFAULT
, /* default large page allocation policy */
332 LPAP_LOCAL
/* local large page allocation policy */
335 enum lpap lpg_alloc_prefer
= LPAP_DEFAULT
;
337 static void page_init_mem_config(void);
338 static int page_do_hashin(page_t
*, vnode_t
*, u_offset_t
);
339 static void page_do_hashout(page_t
*);
340 static void page_capture_init();
341 int page_capture_take_action(page_t
*, uint_t
, void *);
343 static void page_demote_vp_pages(page_t
*);
349 if (boot_ncpus
!= -1) {
350 pcf_fanout
= boot_ncpus
;
352 pcf_fanout
= max_ncpus
;
356 * Force at least 4 buckets if possible for sun4v.
358 pcf_fanout
= MAX(pcf_fanout
, 4);
362 * Round up to the nearest power of 2.
364 pcf_fanout
= MIN(pcf_fanout
, MAX_PCF_FANOUT
);
365 if (!ISP2(pcf_fanout
)) {
366 pcf_fanout
= 1 << highbit(pcf_fanout
);
368 if (pcf_fanout
> MAX_PCF_FANOUT
) {
369 pcf_fanout
= 1 << (highbit(MAX_PCF_FANOUT
) - 1);
372 pcf_fanout_mask
= pcf_fanout
- 1;
376 * vm subsystem related initialization
381 boolean_t
callb_vm_cpr(void *, int);
383 (void) callb_add(callb_vm_cpr
, 0, CB_CL_CPR_VM
, "vm");
384 page_init_mem_config();
391 * This function is called at startup and when memory is added or deleted.
394 init_pages_pp_maximum()
396 static pgcnt_t p_min
;
397 static pgcnt_t pages_pp_maximum_startup
;
398 static pgcnt_t avrmem_delta
;
399 static int init_done
;
400 static int user_set
; /* true if set in /etc/system */
402 if (init_done
== 0) {
404 /* If the user specified a value, save it */
405 if (pages_pp_maximum
!= 0) {
407 pages_pp_maximum_startup
= pages_pp_maximum
;
411 * Setting of pages_pp_maximum is based first time
412 * on the value of availrmem just after the start-up
413 * allocations. To preserve this relationship at run
414 * time, use a delta from availrmem_initial.
416 ASSERT(availrmem_initial
>= availrmem
);
417 avrmem_delta
= availrmem_initial
- availrmem
;
419 /* The allowable floor of pages_pp_maximum */
420 p_min
= tune
.t_minarmem
+ 100;
422 /* Make sure we don't come through here again. */
426 * Determine pages_pp_maximum, the number of currently available
427 * pages (availrmem) that can't be `locked'. If not set by
428 * the user, we set it to 4% of the currently available memory
430 * But we also insist that it be greater than tune.t_minarmem;
431 * otherwise a process could lock down a lot of memory, get swapped
432 * out, and never have enough to get swapped back in.
435 pages_pp_maximum
= pages_pp_maximum_startup
;
437 pages_pp_maximum
= ((availrmem_initial
- avrmem_delta
) / 25)
438 + btop(4 * 1024 * 1024);
440 if (pages_pp_maximum
<= p_min
) {
441 pages_pp_maximum
= p_min
;
446 * In the past, we limited the maximum pages that could be gotten to essentially
447 * 1/2 of the total pages on the system. However, this is too conservative for
448 * some cases. For example, if we want to host a large virtual machine which
449 * needs to use a significant portion of the system's memory. In practice,
450 * allowing more than 1/2 of the total pages is fine, but becomes problematic
451 * as we approach or exceed 75% of the pages on the system. Thus, we limit the
452 * maximum to 23/32 of the total pages, which is ~72%.
455 set_max_page_get(pgcnt_t target_total_pages
)
457 max_page_get
= (target_total_pages
>> 5) * 23;
458 ASSERT3U(max_page_get
, >, 0);
464 return (max_page_get
);
467 static pgcnt_t pending_delete
;
471 page_mem_config_post_add(
475 set_max_page_get(total_pages
- pending_delete
);
476 init_pages_pp_maximum();
481 page_mem_config_pre_del(
487 nv
= atomic_add_long_nv(&pending_delete
, (spgcnt_t
)delta_pages
);
488 set_max_page_get(total_pages
- nv
);
494 page_mem_config_post_del(
501 nv
= atomic_add_long_nv(&pending_delete
, -(spgcnt_t
)delta_pages
);
502 set_max_page_get(total_pages
- nv
);
504 init_pages_pp_maximum();
507 static kphysm_setup_vector_t page_mem_config_vec
= {
508 KPHYSM_SETUP_VECTOR_VERSION
,
509 page_mem_config_post_add
,
510 page_mem_config_pre_del
,
511 page_mem_config_post_del
,
515 page_init_mem_config(void)
519 ret
= kphysm_setup_func_register(&page_mem_config_vec
, (void *)NULL
);
524 * Evenly spread out the PCF counters for large free pages
527 page_free_large_ctr(pgcnt_t npages
)
529 static struct pcf
*p
= pcf
;
534 lump
= roundup(npages
, pcf_fanout
) / pcf_fanout
;
538 ASSERT(!p
->pcf_block
);
541 p
->pcf_count
+= (uint_t
)lump
;
544 p
->pcf_count
+= (uint_t
)npages
;
548 ASSERT(!p
->pcf_wait
);
550 if (++p
> &pcf
[pcf_fanout
- 1])
558 * Add a physical chunk of memory to the system free lists during startup.
559 * Platform specific startup() allocates the memory for the page structs.
561 * num - number of page structures
562 * base - page number (pfn) to be associated with the first page.
564 * Since we are doing this during startup (ie. single threaded), we will
565 * use shortcut routines to avoid any locking overhead while putting all
566 * these pages on the freelists.
568 * NOTE: Any changes performed to page_free(), must also be performed to
569 * add_physmem() since this is how we initialize all page_t's at
579 uint_t szc
= page_num_pagesizes() - 1;
580 pgcnt_t large
= page_get_pagecnt(szc
);
583 TRACE_2(TR_FAC_VM
, TR_PAGE_INIT
,
584 "add_physmem:pp %p num %lu", pp
, num
);
587 * Arbitrarily limit the max page_get request
588 * to 1/2 of the page structs we have.
591 set_max_page_get(total_pages
);
593 PLCNT_MODIFY_MAX(pnum
, (long)num
);
596 * The physical space for the pages array
597 * representing ram pages has already been
598 * allocated. Here we initialize each lock
599 * in the page structure, and put each on
602 for (; num
; pp
++, pnum
++, num
--) {
605 * this needs to fill in the page number
606 * and do any other arch specific initialization
608 add_physmem_cb(pp
, pnum
);
615 * Initialize the page lock as unlocked, since nobody
616 * can see or access this page yet.
623 page_iolock_init(pp
);
626 * initialize other fields in the page_t
629 page_clr_all_props(pp
);
631 pp
->p_offset
= (u_offset_t
)-1;
636 * Simple case: System doesn't support large pages.
640 page_free_at_startup(pp
);
645 * Handle unaligned pages, we collect them up onto
646 * the root page until we have a full large page.
648 if (!IS_P2ALIGNED(pnum
, large
)) {
651 * If not in a large page,
652 * just free as small page.
656 page_free_at_startup(pp
);
661 * Link a constituent page into the large page.
664 page_list_concat(&root
, &pp
);
667 * When large page is fully formed, free it.
669 if (++cnt
== large
) {
670 page_free_large_ctr(cnt
);
671 page_list_add_pages(root
, PG_LIST_ISINIT
);
679 * At this point we have a page number which
680 * is aligned. We assert that we aren't already
681 * in a different large page.
683 ASSERT(IS_P2ALIGNED(pnum
, large
));
684 ASSERT(root
== NULL
&& cnt
== 0);
687 * If insufficient number of pages left to form
688 * a large page, just free the small page.
692 page_free_at_startup(pp
);
697 * Otherwise start a new large page.
703 ASSERT(root
== NULL
&& cnt
== 0);
707 * Find a page representing the specified [vp, offset].
708 * If we find the page but it is intransit coming in,
709 * it will have an "exclusive" lock and we wait for
710 * the i/o to complete. A page found on the free list
711 * is always reclaimed and then locked. On success, the page
712 * is locked, its data is valid and it isn't on the free
713 * list, while a NULL is returned if the page doesn't exist.
716 page_lookup(vnode_t
*vp
, u_offset_t off
, se_t se
)
718 return (page_lookup_create(vp
, off
, se
, NULL
, NULL
, 0));
722 * Find a page representing the specified [vp, offset].
723 * We either return the one we found or, if passed in,
724 * create one with identity of [vp, offset] of the
725 * pre-allocated page. If we find existing page but it is
726 * intransit coming in, it will have an "exclusive" lock
727 * and we wait for the i/o to complete. A page found on
728 * the free list is always reclaimed and then locked.
729 * On success, the page is locked, its data is valid and
730 * it isn't on the free list, while a NULL is returned
731 * if the page doesn't exist and newpp is NULL;
748 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp
)));
749 VM_STAT_ADD(page_lookup_cnt
[0]);
750 ASSERT(newpp
? PAGE_EXCL(newpp
) : 1);
753 * Acquire the appropriate page hash lock since
754 * we have to search the hash list. Pages that
755 * hash to this list can't change identity while
759 index
= PAGE_HASH_FUNC(vp
, off
);
762 pp
= page_hash_search(index
, vp
, off
);
764 VM_STAT_ADD(page_lookup_cnt
[1]);
765 es
= (newpp
!= NULL
) ? 1 : 0;
768 VM_STAT_ADD(page_lookup_cnt
[2]);
769 if (!page_try_reclaim_lock(pp
, se
, es
)) {
771 * On a miss, acquire the phm. Then
772 * next time, page_lock() will be called,
773 * causing a wait if the page is busy.
774 * just looping with page_trylock() would
777 VM_STAT_ADD(page_lookup_cnt
[3]);
778 phm
= PAGE_HASH_MUTEX(index
);
784 VM_STAT_ADD(page_lookup_cnt
[4]);
785 if (!page_lock_es(pp
, se
, phm
, P_RECLAIM
, es
)) {
786 VM_STAT_ADD(page_lookup_cnt
[5]);
792 * Since `pp' is locked it can not change identity now.
793 * Reconfirm we locked the correct page.
795 * Both the p_vnode and p_offset *must* be cast volatile
796 * to force a reload of their values: The page_hash_search
797 * function will have stuffed p_vnode and p_offset into
798 * registers before calling page_trylock(); another thread,
799 * actually holding the hash lock, could have changed the
800 * page's identity in memory, but our registers would not
801 * be changed, fooling the reconfirmation. If the hash
802 * lock was held during the search, the casting would
805 VM_STAT_ADD(page_lookup_cnt
[6]);
806 if (((volatile struct vnode
*)(pp
->p_vnode
) != vp
) ||
807 ((volatile u_offset_t
)(pp
->p_offset
) != off
)) {
808 VM_STAT_ADD(page_lookup_cnt
[7]);
810 panic("page_lookup_create: lost page %p",
815 phm
= PAGE_HASH_MUTEX(index
);
822 * If page_trylock() was called, then pp may still be on
823 * the cachelist (can't be on the free list, it would not
824 * have been found in the search). If it is on the
825 * cachelist it must be pulled now. To pull the page from
826 * the cachelist, it must be exclusively locked.
828 * The other big difference between page_trylock() and
829 * page_lock(), is that page_lock() will pull the
830 * page from whatever free list (the cache list in this
831 * case) the page is on. If page_trylock() was used
832 * above, then we have to do the reclaim ourselves.
834 if ((!hash_locked
) && (PP_ISFREE(pp
))) {
835 ASSERT(PP_ISAGED(pp
) == 0);
836 VM_STAT_ADD(page_lookup_cnt
[8]);
839 * page_relcaim will insure that we
840 * have this page exclusively
843 if (!page_reclaim(pp
, NULL
)) {
845 * Page_reclaim dropped whatever lock
848 VM_STAT_ADD(page_lookup_cnt
[9]);
849 phm
= PAGE_HASH_MUTEX(index
);
853 } else if (se
== SE_SHARED
&& newpp
== NULL
) {
854 VM_STAT_ADD(page_lookup_cnt
[10]);
863 if (newpp
!= NULL
&& pp
->p_szc
< newpp
->p_szc
&&
864 PAGE_EXCL(pp
) && nrelocp
!= NULL
) {
865 ASSERT(nrelocp
!= NULL
);
866 (void) page_relocate(&pp
, &newpp
, 1, 1, nrelocp
,
869 VM_STAT_COND_ADD(*nrelocp
== 1,
870 page_lookup_cnt
[11]);
871 VM_STAT_COND_ADD(*nrelocp
> 1,
872 page_lookup_cnt
[12]);
876 if (se
== SE_SHARED
) {
879 VM_STAT_ADD(page_lookup_cnt
[13]);
881 } else if (newpp
!= NULL
&& nrelocp
!= NULL
) {
882 if (PAGE_EXCL(pp
) && se
== SE_SHARED
) {
885 VM_STAT_COND_ADD(pp
->p_szc
< newpp
->p_szc
,
886 page_lookup_cnt
[14]);
887 VM_STAT_COND_ADD(pp
->p_szc
== newpp
->p_szc
,
888 page_lookup_cnt
[15]);
889 VM_STAT_COND_ADD(pp
->p_szc
> newpp
->p_szc
,
890 page_lookup_cnt
[16]);
891 } else if (newpp
!= NULL
&& PAGE_EXCL(pp
)) {
894 } else if (!hash_locked
) {
895 VM_STAT_ADD(page_lookup_cnt
[17]);
896 phm
= PAGE_HASH_MUTEX(index
);
900 } else if (newpp
!= NULL
) {
902 * If we have a preallocated page then
903 * insert it now and basically behave like
906 VM_STAT_ADD(page_lookup_cnt
[18]);
908 * Since we hold the page hash mutex and
909 * just searched for this page, page_hashin
910 * had better not fail. If it does, that
911 * means some thread did not follow the
912 * page hash mutex rules. Panic now and
913 * get it over with. As usual, go down
914 * holding all the locks.
916 ASSERT(MUTEX_HELD(phm
));
917 if (!page_hashin(newpp
, vp
, off
, phm
)) {
918 ASSERT(MUTEX_HELD(phm
));
919 panic("page_lookup_create: hashin failed %p %p %llx %p",
920 (void *)newpp
, (void *)vp
, off
, (void *)phm
);
923 ASSERT(MUTEX_HELD(phm
));
926 page_set_props(newpp
, P_REF
);
931 VM_STAT_ADD(page_lookup_cnt
[19]);
935 ASSERT(pp
? PAGE_LOCKED_SE(pp
, se
) : 1);
937 ASSERT(pp
? ((PP_ISFREE(pp
) == 0) && (PP_ISAGED(pp
) == 0)) : 1);
943 * Search the hash list for the page representing the
944 * specified [vp, offset] and return it locked. Skip
945 * free pages and pages that cannot be locked as requested.
946 * Used while attempting to kluster pages.
949 page_lookup_nowait(vnode_t
*vp
, u_offset_t off
, se_t se
)
956 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp
)));
957 VM_STAT_ADD(page_lookup_nowait_cnt
[0]);
959 index
= PAGE_HASH_FUNC(vp
, off
);
960 pp
= page_hash_search(index
, vp
, off
);
964 VM_STAT_ADD(page_lookup_nowait_cnt
[1]);
966 phm
= PAGE_HASH_MUTEX(index
);
968 pp
= page_hash_search(index
, vp
, off
);
971 if (pp
== NULL
|| PP_ISFREE(pp
)) {
972 VM_STAT_ADD(page_lookup_nowait_cnt
[2]);
975 if (!page_trylock(pp
, se
)) {
976 VM_STAT_ADD(page_lookup_nowait_cnt
[3]);
979 VM_STAT_ADD(page_lookup_nowait_cnt
[4]);
981 * See the comment in page_lookup()
983 if (((volatile struct vnode
*)(pp
->p_vnode
) != vp
) ||
984 ((u_offset_t
)(pp
->p_offset
) != off
)) {
985 VM_STAT_ADD(page_lookup_nowait_cnt
[5]);
987 panic("page_lookup_nowait %p",
995 VM_STAT_ADD(page_lookup_nowait_cnt
[6]);
1002 VM_STAT_ADD(page_lookup_nowait_cnt
[7]);
1006 ASSERT(pp
? PAGE_LOCKED_SE(pp
, se
) : 1);
1012 * Search the hash list for a page with the specified [vp, off]
1013 * that is known to exist and is already locked. This routine
1014 * is typically used by segment SOFTUNLOCK routines.
1017 page_find(vnode_t
*vp
, u_offset_t off
)
1023 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp
)));
1024 VM_STAT_ADD(page_find_cnt
);
1026 index
= PAGE_HASH_FUNC(vp
, off
);
1027 phm
= PAGE_HASH_MUTEX(index
);
1030 pp
= page_hash_search(index
, vp
, off
);
1033 ASSERT(pp
== NULL
|| PAGE_LOCKED(pp
) || panicstr
);
1038 * Determine whether a page with the specified [vp, off]
1039 * currently exists in the system. Obviously this should
1040 * only be considered as a hint since nothing prevents the
1041 * page from disappearing or appearing immediately after
1042 * the return from this routine. Subsequently, we don't
1043 * even bother to lock the list.
1046 page_exists(vnode_t
*vp
, u_offset_t off
)
1050 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp
)));
1051 VM_STAT_ADD(page_exists_cnt
);
1053 index
= PAGE_HASH_FUNC(vp
, off
);
1055 return (page_hash_search(index
, vp
, off
));
1059 * Determine if physically contiguous pages exist for [vp, off] - [vp, off +
1060 * page_size(szc)) range. if they exist and ppa is not NULL fill ppa array
1061 * with these pages locked SHARED. If necessary reclaim pages from
1062 * freelist. Return 1 if contiguous pages exist and 0 otherwise.
1064 * If we fail to lock pages still return 1 if pages exist and contiguous.
1065 * But in this case return value is just a hint. ppa array won't be filled.
1066 * Caller should initialize ppa[0] as NULL to distinguish return value.
1068 * Returns 0 if pages don't exist or not physically contiguous.
1070 * This routine doesn't work for anonymous(swapfs) pages.
1073 page_exists_physcontig(vnode_t
*vp
, u_offset_t off
, uint_t szc
, page_t
*ppa
[])
1080 u_offset_t save_off
= off
;
1089 ASSERT(!IS_SWAPFSVP(vp
));
1090 ASSERT(!VN_ISKAS(vp
));
1093 if (++loopcnt
> 3) {
1094 VM_STAT_ADD(page_exphcontg
[0]);
1098 index
= PAGE_HASH_FUNC(vp
, off
);
1099 phm
= PAGE_HASH_MUTEX(index
);
1102 pp
= page_hash_search(index
, vp
, off
);
1105 VM_STAT_ADD(page_exphcontg
[1]);
1108 VM_STAT_ADD(page_exphcontg
[2]);
1112 pages
= page_get_pagecnt(szc
);
1114 pfn
= rootpp
->p_pagenum
;
1116 if ((pszc
= pp
->p_szc
) >= szc
&& ppa
!= NULL
) {
1117 VM_STAT_ADD(page_exphcontg
[3]);
1118 if (!page_trylock(pp
, SE_SHARED
)) {
1119 VM_STAT_ADD(page_exphcontg
[4]);
1123 * Also check whether p_pagenum was modified by DR.
1125 if (pp
->p_szc
!= pszc
|| pp
->p_vnode
!= vp
||
1126 pp
->p_offset
!= off
|| pp
->p_pagenum
!= pfn
) {
1127 VM_STAT_ADD(page_exphcontg
[5]);
1133 * szc was non zero and vnode and offset matched after we
1134 * locked the page it means it can't become free on us.
1136 ASSERT(!PP_ISFREE(pp
));
1137 if (!IS_P2ALIGNED(pfn
, pages
)) {
1145 for (i
= 1; i
< pages
; i
++, pp
++, off
+= PAGESIZE
, pfn
++) {
1146 if (!page_trylock(pp
, SE_SHARED
)) {
1147 VM_STAT_ADD(page_exphcontg
[6]);
1156 if (pp
->p_szc
!= pszc
) {
1157 VM_STAT_ADD(page_exphcontg
[7]);
1169 * szc the same as for previous already locked pages
1170 * with right identity. Since this page had correct
1171 * szc after we locked it can't get freed or destroyed
1172 * and therefore must have the expected identity.
1174 ASSERT(!PP_ISFREE(pp
));
1175 if (pp
->p_vnode
!= vp
||
1176 pp
->p_offset
!= off
) {
1177 panic("page_exists_physcontig: "
1178 "large page identity doesn't match");
1181 ASSERT(pp
->p_pagenum
== pfn
);
1183 VM_STAT_ADD(page_exphcontg
[8]);
1186 } else if (pszc
>= szc
) {
1187 VM_STAT_ADD(page_exphcontg
[9]);
1188 if (!IS_P2ALIGNED(pfn
, pages
)) {
1194 if (!IS_P2ALIGNED(pfn
, pages
)) {
1195 VM_STAT_ADD(page_exphcontg
[10]);
1199 if (page_numtomemseg_nolock(pfn
) !=
1200 page_numtomemseg_nolock(pfn
+ pages
- 1)) {
1201 VM_STAT_ADD(page_exphcontg
[11]);
1206 * We loop up 4 times across pages to promote page size.
1207 * We're extra cautious to promote page size atomically with respect
1208 * to everybody else. But we can probably optimize into 1 loop if
1209 * this becomes an issue.
1212 for (i
= 0; i
< pages
; i
++, pp
++, off
+= PAGESIZE
, pfn
++) {
1213 if (!page_trylock(pp
, SE_EXCL
)) {
1214 VM_STAT_ADD(page_exphcontg
[12]);
1218 * Check whether p_pagenum was modified by DR.
1220 if (pp
->p_pagenum
!= pfn
) {
1224 if (pp
->p_vnode
!= vp
||
1225 pp
->p_offset
!= off
) {
1226 VM_STAT_ADD(page_exphcontg
[13]);
1230 if (pp
->p_szc
>= szc
) {
1239 VM_STAT_ADD(page_exphcontg
[14]);
1249 for (i
= 0; i
< pages
; i
++, pp
++) {
1250 if (PP_ISFREE(pp
)) {
1251 VM_STAT_ADD(page_exphcontg
[15]);
1252 ASSERT(!PP_ISAGED(pp
));
1253 ASSERT(pp
->p_szc
== 0);
1254 if (!page_reclaim(pp
, NULL
)) {
1258 ASSERT(pp
->p_szc
< szc
);
1259 VM_STAT_ADD(page_exphcontg
[16]);
1260 (void) hat_pageunload(pp
, HAT_FORCE_PGUNLOAD
);
1264 VM_STAT_ADD(page_exphcontg
[17]);
1266 * page_reclaim failed because we were out of memory.
1267 * drop the rest of the locks and return because this page
1268 * must be already reallocated anyway.
1271 for (j
= 0; j
< pages
; j
++, pp
++) {
1281 for (i
= 0; i
< pages
; i
++, pp
++, off
+= PAGESIZE
) {
1282 ASSERT(PAGE_EXCL(pp
));
1283 ASSERT(!PP_ISFREE(pp
));
1284 ASSERT(!hat_page_is_mapped(pp
));
1285 ASSERT(pp
->p_vnode
== vp
);
1286 ASSERT(pp
->p_offset
== off
);
1290 for (i
= 0; i
< pages
; i
++, pp
++) {
1295 page_downgrade(ppa
[i
]);
1301 VM_STAT_ADD(page_exphcontg
[18]);
1302 ASSERT(vp
->v_pages
!= NULL
);
1307 * Determine whether a page with the specified [vp, off]
1308 * currently exists in the system and if so return its
1309 * size code. Obviously this should only be considered as
1310 * a hint since nothing prevents the page from disappearing
1311 * or appearing immediately after the return from this routine.
1314 page_exists_forreal(vnode_t
*vp
, u_offset_t off
, uint_t
*szc
)
1321 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp
)));
1322 ASSERT(szc
!= NULL
);
1323 VM_STAT_ADD(page_exists_forreal_cnt
);
1325 index
= PAGE_HASH_FUNC(vp
, off
);
1326 phm
= PAGE_HASH_MUTEX(index
);
1329 pp
= page_hash_search(index
, vp
, off
);
1338 /* wakeup threads waiting for pages in page_create_get_something() */
1342 if (!CV_HAS_WAITERS(&pcgs_cv
))
1344 cv_broadcast(&pcgs_cv
);
1348 * 'freemem' is used all over the kernel as an indication of how many
1349 * pages are free (either on the cache list or on the free page list)
1350 * in the system. In very few places is a really accurate 'freemem'
1351 * needed. To avoid contention of the lock protecting a the
1352 * single freemem, it was spread out into NCPU buckets. Set_freemem
1353 * sets freemem to the total of all NCPU buckets. It is called from
1354 * clock() on each TICK.
1365 for (i
= 0; i
< pcf_fanout
; i
++) {
1372 * Don't worry about grabbing mutex. It's not that
1373 * critical if we miss a tick or two. This is
1374 * where we wakeup possible delayers in
1375 * page_create_get_something().
1389 for (i
= 0; i
< pcf_fanout
; i
++) {
1394 * We just calculated it, might as well set it.
1401 * Acquire all of the page cache & free (pcf) locks.
1410 for (i
= 0; i
< pcf_fanout
; i
++) {
1411 mutex_enter(&p
->pcf_lock
);
1417 * Release all the pcf_locks.
1426 for (i
= 0; i
< pcf_fanout
; i
++) {
1427 mutex_exit(&p
->pcf_lock
);
1433 * Inform the VM system that we need some pages freed up.
1434 * Calls must be symmetric, e.g.:
1436 * page_needfree(100);
1438 * page_needfree(-100);
1441 page_needfree(spgcnt_t npages
)
1443 mutex_enter(&new_freemem_lock
);
1445 mutex_exit(&new_freemem_lock
);
1449 * Throttle for page_create(): try to prevent freemem from dropping
1450 * below throttlefree. We can't provide a 100% guarantee because
1451 * KM_NOSLEEP allocations, page_reclaim(), and various other things
1452 * nibble away at the freelist. However, we can block all PG_WAIT
1453 * allocations until memory becomes available. The motivation is
1454 * that several things can fall apart when there's no free memory:
1456 * (1) If pageout() needs memory to push a page, the system deadlocks.
1458 * (2) By (broken) specification, timeout(9F) can neither fail nor
1459 * block, so it has no choice but to panic the system if it
1460 * cannot allocate a callout structure.
1462 * (3) Like timeout(), ddi_set_callback() cannot fail and cannot block;
1463 * it panics if it cannot allocate a callback structure.
1465 * (4) Untold numbers of third-party drivers have not yet been hardened
1466 * against KM_NOSLEEP and/or allocb() failures; they simply assume
1467 * success and panic the system with a data fault on failure.
1468 * (The long-term solution to this particular problem is to ship
1469 * hostile fault-injecting DEBUG kernels with the DDK.)
1471 * It is theoretically impossible to guarantee success of non-blocking
1472 * allocations, but in practice, this throttle is very hard to break.
1475 page_create_throttle(pgcnt_t npages
, int flags
)
1479 pgcnt_t tf
; /* effective value of throttlefree */
1481 atomic_inc_64(&n_throttle
);
1484 * Normal priority allocations.
1486 if ((flags
& (PG_WAIT
| PG_NORMALPRI
)) == PG_NORMALPRI
) {
1487 ASSERT(!(flags
& (PG_PANIC
| PG_PUSHPAGE
)));
1488 return (freemem
>= npages
+ throttlefree
);
1492 * Never deny pages when:
1493 * - it's a thread that cannot block [NOMEMWAIT()]
1494 * - the allocation cannot block and must not fail
1495 * - the allocation cannot block and is pageout dispensated
1498 ((flags
& (PG_WAIT
| PG_PANIC
)) == PG_PANIC
) ||
1499 ((flags
& (PG_WAIT
| PG_PUSHPAGE
)) == PG_PUSHPAGE
))
1503 * If the allocation can't block, we look favorably upon it
1504 * unless we're below pageout_reserve. In that case we fail
1505 * the allocation because we want to make sure there are a few
1506 * pages available for pageout.
1508 if ((flags
& PG_WAIT
) == 0)
1509 return (freemem
>= npages
+ pageout_reserve
);
1511 /* Calculate the effective throttlefree value */
1513 ((flags
& PG_PUSHPAGE
) ? pageout_reserve
: 0);
1515 WAKE_PAGEOUT_SCANNER(page__create__throttle
);
1520 mutex_enter(&new_freemem_lock
);
1521 for (i
= 0; i
< pcf_fanout
; i
++) {
1522 fm
+= pcf
[i
].pcf_count
;
1524 mutex_exit(&pcf
[i
].pcf_lock
);
1527 if (freemem
>= npages
+ tf
) {
1528 mutex_exit(&new_freemem_lock
);
1533 cv_wait(&freemem_cv
, &new_freemem_lock
);
1536 mutex_exit(&new_freemem_lock
);
1542 * page_create_wait() is called to either coalesce pages from the
1543 * different pcf buckets or to wait because there simply are not
1544 * enough pages to satisfy the caller's request.
1546 * Sadly, this is called from platform/vm/vm_machdep.c
1549 page_create_wait(pgcnt_t npages
, uint_t flags
)
1556 * Wait until there are enough free pages to satisfy our
1558 * We set needfree += npages before prodding pageout, to make sure
1559 * it does real work when npages > lotsfree > freemem.
1561 VM_STAT_ADD(page_create_not_enough
);
1563 ASSERT(!kcage_on
? !(flags
& PG_NORELOC
) : 1);
1565 if ((flags
& PG_NORELOC
) &&
1566 kcage_freemem
< kcage_throttlefree
+ npages
)
1567 (void) kcage_create_throttle(npages
, flags
);
1569 if (freemem
< npages
+ throttlefree
)
1570 if (!page_create_throttle(npages
, flags
))
1573 if (pcf_decrement_bucket(npages
) ||
1574 pcf_decrement_multiple(&total
, npages
, 0))
1578 * All of the pcf locks are held, there are not enough pages
1579 * to satisfy the request (npages < total).
1580 * Be sure to acquire the new_freemem_lock before dropping
1581 * the pcf locks. This prevents dropping wakeups in page_free().
1582 * The order is always pcf_lock then new_freemem_lock.
1584 * Since we hold all the pcf locks, it is a good time to set freemem.
1586 * If the caller does not want to wait, return now.
1587 * Else turn the pageout daemon loose to find something
1588 * and wait till it does.
1593 if ((flags
& PG_WAIT
) == 0) {
1596 TRACE_2(TR_FAC_VM
, TR_PAGE_CREATE_NOMEM
,
1597 "page_create_nomem:npages %ld freemem %ld", npages
, freemem
);
1601 ASSERT(proc_pageout
!= NULL
);
1602 WAKE_PAGEOUT_SCANNER(page__create__wait
);
1604 TRACE_2(TR_FAC_VM
, TR_PAGE_CREATE_SLEEP_START
,
1605 "page_create_sleep_start: freemem %ld needfree %ld",
1609 * We are going to wait.
1610 * We currently hold all of the pcf_locks,
1611 * get the new_freemem_lock (it protects freemem_wait),
1612 * before dropping the pcf_locks.
1614 mutex_enter(&new_freemem_lock
);
1617 for (i
= 0; i
< pcf_fanout
; i
++) {
1619 mutex_exit(&p
->pcf_lock
);
1626 cv_wait(&freemem_cv
, &new_freemem_lock
);
1631 mutex_exit(&new_freemem_lock
);
1633 TRACE_2(TR_FAC_VM
, TR_PAGE_CREATE_SLEEP_END
,
1634 "page_create_sleep_end: freemem %ld needfree %ld",
1637 VM_STAT_ADD(page_create_not_enough_again
);
1641 * A routine to do the opposite of page_create_wait().
1644 page_create_putback(spgcnt_t npages
)
1651 * When a contiguous lump is broken up, we have to
1652 * deal with lots of pages (min 64) so lets spread
1653 * the wealth around.
1655 lump
= roundup(npages
, pcf_fanout
) / pcf_fanout
;
1658 for (p
= pcf
; (npages
> 0) && (p
< &pcf
[pcf_fanout
]); p
++) {
1659 which
= &p
->pcf_count
;
1661 mutex_enter(&p
->pcf_lock
);
1664 which
= &p
->pcf_reserve
;
1667 if (lump
< npages
) {
1668 *which
+= (uint_t
)lump
;
1671 *which
+= (uint_t
)npages
;
1676 mutex_enter(&new_freemem_lock
);
1678 * Check to see if some other thread
1679 * is actually waiting. Another bucket
1680 * may have woken it up by now. If there
1681 * are no waiters, then set our pcf_wait
1682 * count to zero to avoid coming in here
1687 cv_broadcast(&freemem_cv
);
1689 cv_signal(&freemem_cv
);
1695 mutex_exit(&new_freemem_lock
);
1697 mutex_exit(&p
->pcf_lock
);
1699 ASSERT(npages
== 0);
1703 * A helper routine for page_create_get_something.
1704 * The indenting got to deep down there.
1705 * Unblock the pcf counters. Any pages freed after
1706 * pcf_block got set are moved to pcf_count and
1707 * wakeups (cv_broadcast() or cv_signal()) are done as needed.
1715 /* Update freemem while we're here. */
1718 for (i
= 0; i
< pcf_fanout
; i
++) {
1719 mutex_enter(&p
->pcf_lock
);
1720 ASSERT(p
->pcf_count
== 0);
1721 p
->pcf_count
= p
->pcf_reserve
;
1723 freemem
+= p
->pcf_count
;
1725 mutex_enter(&new_freemem_lock
);
1727 if (p
->pcf_reserve
> 1) {
1728 cv_broadcast(&freemem_cv
);
1731 cv_signal(&freemem_cv
);
1737 mutex_exit(&new_freemem_lock
);
1740 mutex_exit(&p
->pcf_lock
);
1746 * Called from page_create_va() when both the cache and free lists
1747 * have been checked once.
1749 * Either returns a page or panics since the accounting was done
1750 * way before we got here.
1752 * We don't come here often, so leave the accounting on permanently.
1755 #define MAX_PCGS 100
1758 #define PCGS_TRIES 100
1760 #define PCGS_TRIES 10
1764 uint_t pcgs_counts
[PCGS_TRIES
];
1765 uint_t pcgs_too_many
;
1766 uint_t pcgs_entered
;
1767 uint_t pcgs_entered_noreloc
;
1769 uint_t pcgs_cagelocked
;
1770 #endif /* VM_STATS */
1773 page_create_get_something(vnode_t
*vp
, u_offset_t off
, struct seg
*seg
,
1774 caddr_t vaddr
, uint_t flags
)
1783 VM_STAT_ADD(pcgs_entered
);
1786 * Tap any reserve freelists: if we fail now, we'll die
1787 * since the page(s) we're looking for have already been
1792 if ((flags
& PG_NORELOC
) != 0) {
1793 VM_STAT_ADD(pcgs_entered_noreloc
);
1795 * Requests for free pages from critical threads
1796 * such as pageout still won't throttle here, but
1797 * we must try again, to give the cageout thread
1798 * another chance to catch up. Since we already
1799 * accounted for the pages, we had better get them
1802 * N.B. All non-critical threads acquire the pcgs_cagelock
1803 * to serialize access to the freelists. This implements a
1804 * turnstile-type synchornization to avoid starvation of
1805 * critical requests for PG_NORELOC memory by non-critical
1806 * threads: all non-critical threads must acquire a 'ticket'
1807 * before passing through, which entails making sure
1808 * kcage_freemem won't fall below minfree prior to grabbing
1809 * pages from the freelists.
1811 if (kcage_create_throttle(1, flags
) == KCT_NONCRIT
) {
1812 mutex_enter(&pcgs_cagelock
);
1814 VM_STAT_ADD(pcgs_cagelocked
);
1819 * Time to get serious.
1820 * We failed to get a `correctly colored' page from both the
1821 * free and cache lists.
1822 * We escalate in stage.
1824 * First try both lists without worring about color.
1826 * Then, grab all page accounting locks (ie. pcf[]) and
1827 * steal any pages that they have and set the pcf_block flag to
1828 * stop deletions from the lists. This will help because
1829 * a page can get added to the free list while we are looking
1830 * at the cache list, then another page could be added to the cache
1831 * list allowing the page on the free list to be removed as we
1832 * move from looking at the cache list to the free list. This
1833 * could happen over and over. We would never find the page
1834 * we have accounted for.
1836 * Noreloc pages are a subset of the global (relocatable) page pool.
1837 * They are not tracked separately in the pcf bins, so it is
1838 * impossible to know when doing pcf accounting if the available
1839 * page(s) are noreloc pages or not. When looking for a noreloc page
1840 * it is quite easy to end up here even if the global (relocatable)
1841 * page pool has plenty of free pages but the noreloc pool is empty.
1843 * When the noreloc pool is empty (or low), additional noreloc pages
1844 * are created by converting pages from the global page pool. This
1845 * process will stall during pcf accounting if the pcf bins are
1846 * already locked. Such is the case when a noreloc allocation is
1847 * looping here in page_create_get_something waiting for more noreloc
1850 * Short of adding a new field to the pcf bins to accurately track
1851 * the number of free noreloc pages, we instead do not grab the
1852 * pcgs_lock, do not set the pcf blocks and do not timeout when
1853 * allocating a noreloc page. This allows noreloc allocations to
1854 * loop without blocking global page pool allocations.
1856 * NOTE: the behaviour of page_create_get_something has not changed
1857 * for the case of global page pool allocations.
1860 flags
&= ~PG_MATCH_COLOR
;
1863 flags
= page_create_update_flags_x86(flags
);
1866 lgrp
= lgrp_mem_choose(seg
, vaddr
, PAGESIZE
);
1868 for (count
= 0; kcage_on
|| count
< MAX_PCGS
; count
++) {
1869 pp
= page_get_freelist(vp
, off
, seg
, vaddr
, PAGESIZE
,
1872 pp
= page_get_cachelist(vp
, off
, seg
, vaddr
,
1877 * Serialize. Don't fight with other pcgs().
1879 if (!locked
&& (!kcage_on
|| !(flags
& PG_NORELOC
))) {
1880 mutex_enter(&pcgs_lock
);
1881 VM_STAT_ADD(pcgs_locked
);
1884 for (i
= 0; i
< pcf_fanout
; i
++) {
1885 mutex_enter(&p
->pcf_lock
);
1886 ASSERT(p
->pcf_block
== 0);
1888 p
->pcf_reserve
= p
->pcf_count
;
1890 mutex_exit(&p
->pcf_lock
);
1898 * Since page_free() puts pages on
1899 * a list then accounts for it, we
1900 * just have to wait for page_free()
1901 * to unlock any page it was working
1902 * with. The page_lock()-page_reclaim()
1903 * path falls in the same boat.
1905 * We don't need to check on the
1906 * PG_WAIT flag, we have already
1907 * accounted for the page we are
1908 * looking for in page_create_va().
1910 * We just wait a moment to let any
1911 * locked pages on the lists free up,
1912 * then continue around and try again.
1914 * Will be awakened by set_freemem().
1916 mutex_enter(&pcgs_wait_lock
);
1917 cv_wait(&pcgs_cv
, &pcgs_wait_lock
);
1918 mutex_exit(&pcgs_wait_lock
);
1922 if (count
>= PCGS_TRIES
) {
1923 VM_STAT_ADD(pcgs_too_many
);
1925 VM_STAT_ADD(pcgs_counts
[count
]);
1930 mutex_exit(&pcgs_lock
);
1933 mutex_exit(&pcgs_cagelock
);
1938 * we go down holding the pcf locks.
1940 panic("no %spage found %d",
1941 ((flags
& PG_NORELOC
) ? "non-reloc " : ""), count
);
1946 * Create enough pages for "bytes" worth of data starting at
1949 * Where flag must be one of:
1951 * PG_EXCL: Exclusive create (fail if any page already
1952 * exists in the page cache) which does not
1953 * wait for memory to become available.
1955 * PG_WAIT: Non-exclusive create which can wait for
1956 * memory to become available.
1958 * PG_PHYSCONTIG: Allocate physically contiguous pages.
1961 * A doubly linked list of pages is returned to the caller. Each page
1962 * on the list has the "exclusive" (p_selock) lock and "iolock" (p_iolock)
1965 * Unable to change the parameters to page_create() in a minor release,
1966 * we renamed page_create() to page_create_va(), changed all known calls
1967 * from page_create() to page_create_va(), and created this wrapper.
1969 * Upon a major release, we should break compatibility by deleting this
1970 * wrapper, and replacing all the strings "page_create_va", with "page_create".
1972 * NOTE: There is a copy of this interface as page_create_io() in
1973 * i86/vm/vm_machdep.c. Any bugs fixed here should be applied
1977 page_create(vnode_t
*vp
, u_offset_t off
, size_t bytes
, uint_t flags
)
1979 caddr_t random_vaddr
;
1983 cmn_err(CE_WARN
, "Using deprecated interface page_create: caller %p",
1987 random_vaddr
= (caddr_t
)(((uintptr_t)vp
>> 7) ^
1988 (uintptr_t)(off
>> PAGESHIFT
));
1991 return (page_create_va(vp
, off
, bytes
, flags
, &kseg
, random_vaddr
));
1995 uint32_t pg_alloc_pgs_mtbf
= 0;
1999 * Used for large page support. It will attempt to allocate
2000 * a large page(s) off the freelist.
2002 * Returns non zero on failure.
2005 page_alloc_pages(struct vnode
*vp
, struct seg
*seg
, caddr_t addr
,
2006 page_t
**basepp
, page_t
*ppa
[], uint_t szc
, int anypgsz
, int pgflags
)
2008 pgcnt_t npgs
, curnpgs
, totpgs
;
2010 page_t
*pplist
= NULL
, *pp
;
2014 ASSERT(szc
!= 0 && szc
<= (page_num_pagesizes() - 1));
2015 ASSERT(pgflags
== 0 || pgflags
== PG_LOCAL
);
2018 * Check if system heavily prefers local large pages over remote
2019 * on systems with multiple lgroups.
2021 if (lpg_alloc_prefer
== LPAP_LOCAL
&& nlgrps
> 1) {
2025 VM_STAT_ADD(alloc_pages
[0]);
2028 if (pg_alloc_pgs_mtbf
&& !(gethrtime() % pg_alloc_pgs_mtbf
)) {
2034 * One must be NULL but not both.
2035 * And one must be non NULL but not both.
2037 ASSERT(basepp
!= NULL
|| ppa
!= NULL
);
2038 ASSERT(basepp
== NULL
|| ppa
== NULL
);
2041 while (page_chk_freelist(szc
) == 0) {
2042 VM_STAT_ADD(alloc_pages
[8]);
2043 if (anypgsz
== 0 || --szc
== 0)
2048 pgsz
= page_get_pagesize(szc
);
2049 totpgs
= curnpgs
= npgs
= pgsz
>> PAGESHIFT
;
2051 ASSERT(((uintptr_t)addr
& (pgsz
- 1)) == 0);
2053 (void) page_create_wait(npgs
, PG_WAIT
);
2055 while (npgs
&& szc
) {
2056 lgrp
= lgrp_mem_choose(seg
, addr
, pgsz
);
2057 if (pgflags
== PG_LOCAL
) {
2058 pp
= page_get_freelist(vp
, 0, seg
, addr
, pgsz
,
2061 pp
= page_get_freelist(vp
, 0, seg
, addr
, pgsz
,
2065 pp
= page_get_freelist(vp
, 0, seg
, addr
, pgsz
,
2069 VM_STAT_ADD(alloc_pages
[1]);
2070 page_list_concat(&pplist
, &pp
);
2071 ASSERT(npgs
>= curnpgs
);
2073 } else if (anypgsz
) {
2074 VM_STAT_ADD(alloc_pages
[2]);
2076 pgsz
= page_get_pagesize(szc
);
2077 curnpgs
= pgsz
>> PAGESHIFT
;
2079 VM_STAT_ADD(alloc_pages
[3]);
2080 ASSERT(npgs
== totpgs
);
2081 page_create_putback(npgs
);
2086 VM_STAT_ADD(alloc_pages
[4]);
2088 page_create_putback(npgs
);
2090 } else if (basepp
!= NULL
) {
2092 ASSERT(ppa
== NULL
);
2096 npgs
= totpgs
- npgs
;
2100 * Clear the free and age bits. Also if we were passed in a ppa then
2101 * fill it in with all the constituent pages from the large page. But
2102 * if we failed to allocate all the pages just free what we got.
2105 ASSERT(PP_ISFREE(pp
));
2106 ASSERT(PP_ISAGED(pp
));
2107 if (ppa
!= NULL
|| err
!= 0) {
2109 VM_STAT_ADD(alloc_pages
[5]);
2112 page_sub(&pplist
, pp
);
2116 VM_STAT_ADD(alloc_pages
[6]);
2117 ASSERT(pp
->p_szc
!= 0);
2118 curnpgs
= page_get_pagecnt(pp
->p_szc
);
2119 page_list_break(&pp
, &pplist
, curnpgs
);
2120 page_list_add_pages(pp
, 0);
2121 page_create_putback(curnpgs
);
2122 ASSERT(npgs
>= curnpgs
);
2127 VM_STAT_ADD(alloc_pages
[7]);
2138 * Get a single large page off of the freelists, and set it up for use.
2139 * Number of bytes requested must be a supported page size.
2141 * Note that this call may fail even if there is sufficient
2142 * memory available or PG_WAIT is set, so the caller must
2143 * be willing to fallback on page_create_va(), block and retry,
2144 * or fail the requester.
2147 page_create_va_large(vnode_t
*vp
, u_offset_t off
, size_t bytes
, uint_t flags
,
2148 struct seg
*seg
, caddr_t vaddr
, void *arg
)
2154 lgrp_id_t
*lgrpid
= (lgrp_id_t
*)arg
;
2158 ASSERT((flags
& ~(PG_EXCL
| PG_WAIT
|
2159 PG_NORELOC
| PG_PANIC
| PG_PUSHPAGE
| PG_NORMALPRI
)) == 0);
2162 ASSERT((flags
& PG_EXCL
) == PG_EXCL
);
2164 npages
= btop(bytes
);
2166 if (!kcage_on
|| panicstr
) {
2168 * Cage is OFF, or we are single threaded in
2169 * panic, so make everything a RELOC request.
2171 flags
&= ~PG_NORELOC
;
2175 * Make sure there's adequate physical memory available.
2176 * Note: PG_WAIT is ignored here.
2178 if (freemem
<= throttlefree
+ npages
) {
2179 VM_STAT_ADD(page_create_large_cnt
[1]);
2184 * If cage is on, dampen draw from cage when available
2185 * cage space is low.
2187 if ((flags
& (PG_NORELOC
| PG_WAIT
)) == (PG_NORELOC
| PG_WAIT
) &&
2188 kcage_freemem
< kcage_throttlefree
+ npages
) {
2191 * The cage is on, the caller wants PG_NORELOC
2192 * pages and available cage memory is very low.
2193 * Call kcage_create_throttle() to attempt to
2194 * control demand on the cage.
2196 if (kcage_create_throttle(npages
, flags
) == KCT_FAILURE
) {
2197 VM_STAT_ADD(page_create_large_cnt
[2]);
2202 if (!pcf_decrement_bucket(npages
) &&
2203 !pcf_decrement_multiple(NULL
, npages
, 1)) {
2204 VM_STAT_ADD(page_create_large_cnt
[4]);
2209 * This is where this function behaves fundamentally differently
2210 * than page_create_va(); since we're intending to map the page
2211 * with a single TTE, we have to get it as a physically contiguous
2212 * hardware pagesize chunk. If we can't, we fail.
2214 if (lgrpid
!= NULL
&& *lgrpid
>= 0 && *lgrpid
<= lgrp_alloc_max
&&
2215 LGRP_EXISTS(lgrp_table
[*lgrpid
]))
2216 lgrp
= lgrp_table
[*lgrpid
];
2218 lgrp
= lgrp_mem_choose(seg
, vaddr
, bytes
);
2220 if ((rootpp
= page_get_freelist(&kvp
, off
, seg
, vaddr
,
2221 bytes
, flags
& ~PG_MATCH_COLOR
, lgrp
)) == NULL
) {
2222 page_create_putback(npages
);
2223 VM_STAT_ADD(page_create_large_cnt
[5]);
2228 * if we got the page with the wrong mtype give it back this is a
2229 * workaround for CR 6249718. When CR 6249718 is fixed we never get
2230 * inside "if" and the workaround becomes just a nop
2232 if (kcage_on
&& (flags
& PG_NORELOC
) && !PP_ISNORELOC(rootpp
)) {
2233 page_list_add_pages(rootpp
, 0);
2234 page_create_putback(npages
);
2235 VM_STAT_ADD(page_create_large_cnt
[6]);
2240 * If satisfying this request has left us with too little
2241 * memory, start the wheels turning to get some back. The
2242 * first clause of the test prevents waking up the pageout
2243 * daemon in situations where it would decide that there's
2246 if (nscan
< desscan
&& freemem
< minfree
) {
2247 TRACE_1(TR_FAC_VM
, TR_PAGEOUT_CV_SIGNAL
,
2248 "pageout_cv_signal:freemem %ld", freemem
);
2249 WAKE_PAGEOUT_SCANNER(va__large
);
2254 ASSERT(PAGE_EXCL(pp
));
2255 ASSERT(pp
->p_vnode
== NULL
);
2256 ASSERT(!hat_page_is_mapped(pp
));
2259 if (!page_hashin(pp
, vp
, off
, NULL
))
2260 panic("page_create_large: hashin failed: page %p",
2267 VM_STAT_ADD(page_create_large_cnt
[0]);
2272 page_create_va(vnode_t
*vp
, u_offset_t off
, size_t bytes
, uint_t flags
,
2273 struct seg
*seg
, caddr_t vaddr
)
2275 page_t
*plist
= NULL
;
2277 pgcnt_t found_on_free
= 0;
2283 TRACE_4(TR_FAC_VM
, TR_PAGE_CREATE_START
,
2284 "page_create_start:vp %p off %llx bytes %lu flags %x",
2285 vp
, off
, bytes
, flags
);
2287 ASSERT(bytes
!= 0 && vp
!= NULL
);
2289 if ((flags
& PG_EXCL
) == 0 && (flags
& PG_WAIT
) == 0) {
2290 panic("page_create: invalid flags");
2293 ASSERT((flags
& ~(PG_EXCL
| PG_WAIT
|
2294 PG_NORELOC
| PG_PANIC
| PG_PUSHPAGE
| PG_NORMALPRI
)) == 0);
2297 pages_req
= npages
= btopr(bytes
);
2299 * Try to see whether request is too large to *ever* be
2300 * satisfied, in order to prevent deadlock. We arbitrarily
2301 * decide to limit maximum size requests to max_page_get.
2303 if (npages
>= max_page_get
) {
2304 if ((flags
& PG_WAIT
) == 0) {
2305 TRACE_4(TR_FAC_VM
, TR_PAGE_CREATE_TOOBIG
,
2306 "page_create_toobig:vp %p off %llx npages "
2307 "%lu max_page_get %lu",
2308 vp
, off
, npages
, max_page_get
);
2312 "Request for too much kernel memory "
2313 "(%lu bytes), will hang forever", bytes
);
2319 if (!kcage_on
|| panicstr
) {
2321 * Cage is OFF, or we are single threaded in
2322 * panic, so make everything a RELOC request.
2324 flags
&= ~PG_NORELOC
;
2327 if (freemem
<= throttlefree
+ npages
)
2328 if (!page_create_throttle(npages
, flags
))
2332 * If cage is on, dampen draw from cage when available
2333 * cage space is low.
2335 if ((flags
& PG_NORELOC
) &&
2336 kcage_freemem
< kcage_throttlefree
+ npages
) {
2339 * The cage is on, the caller wants PG_NORELOC
2340 * pages and available cage memory is very low.
2341 * Call kcage_create_throttle() to attempt to
2342 * control demand on the cage.
2344 if (kcage_create_throttle(npages
, flags
) == KCT_FAILURE
)
2348 VM_STAT_ADD(page_create_cnt
[0]);
2350 if (!pcf_decrement_bucket(npages
)) {
2352 * Have to look harder. If npages is greater than
2353 * one, then we might have to coalesce the counters.
2355 * Go wait. We come back having accounted
2358 VM_STAT_ADD(page_create_cnt
[1]);
2359 if (!page_create_wait(npages
, flags
)) {
2360 VM_STAT_ADD(page_create_cnt
[2]);
2365 TRACE_2(TR_FAC_VM
, TR_PAGE_CREATE_SUCCESS
,
2366 "page_create_success:vp %p off %llx", vp
, off
);
2369 * If satisfying this request has left us with too little
2370 * memory, start the wheels turning to get some back. The
2371 * first clause of the test prevents waking up the pageout
2372 * daemon in situations where it would decide that there's
2375 if (nscan
< desscan
&& freemem
< minfree
) {
2376 TRACE_1(TR_FAC_VM
, TR_PAGEOUT_CV_SIGNAL
,
2377 "pageout_cv_signal:freemem %ld", freemem
);
2378 WAKE_PAGEOUT_SCANNER(va
);
2382 * Loop around collecting the requested number of pages.
2383 * Most of the time, we have to `create' a new page. With
2384 * this in mind, pull the page off the free list before
2385 * getting the hash lock. This will minimize the hash
2386 * lock hold time, nesting, and the like. If it turns
2387 * out we don't need the page, we put it back at the end.
2391 kmutex_t
*phm
= NULL
;
2394 index
= PAGE_HASH_FUNC(vp
, off
);
2396 ASSERT(phm
== NULL
);
2397 ASSERT(index
== PAGE_HASH_FUNC(vp
, off
));
2398 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp
)));
2402 * Try to get a page from the freelist (ie,
2403 * a page with no [vp, off] tag). If that
2404 * fails, use the cachelist.
2406 * During the first attempt at both the free
2407 * and cache lists we try for the correct color.
2410 * XXXX-how do we deal with virtual indexed
2411 * caches and and colors?
2413 VM_STAT_ADD(page_create_cnt
[4]);
2415 * Get lgroup to allocate next page of shared memory
2416 * from and use it to specify where to allocate
2417 * the physical memory
2419 lgrp
= lgrp_mem_choose(seg
, vaddr
, PAGESIZE
);
2420 npp
= page_get_freelist(vp
, off
, seg
, vaddr
, PAGESIZE
,
2421 flags
| PG_MATCH_COLOR
, lgrp
);
2423 npp
= page_get_cachelist(vp
, off
, seg
,
2424 vaddr
, flags
| PG_MATCH_COLOR
, lgrp
);
2426 npp
= page_create_get_something(vp
,
2428 flags
& ~PG_MATCH_COLOR
);
2431 if (PP_ISAGED(npp
) == 0) {
2433 * Since this page came from the
2434 * cachelist, we must destroy the
2435 * old vnode association.
2437 page_hashout(npp
, NULL
);
2445 ASSERT(PAGE_EXCL(npp
));
2446 ASSERT(npp
->p_vnode
== NULL
);
2447 ASSERT(!hat_page_is_mapped(npp
));
2452 * Here we have a page in our hot little mits and are
2453 * just waiting to stuff it on the appropriate lists.
2454 * Get the mutex and check to see if it really does
2457 phm
= PAGE_HASH_MUTEX(index
);
2459 pp
= page_hash_search(index
, vp
, off
);
2461 VM_STAT_ADD(page_create_new
);
2464 if (!page_hashin(pp
, vp
, off
, phm
)) {
2466 * Since we hold the page hash mutex and
2467 * just searched for this page, page_hashin
2468 * had better not fail. If it does, that
2469 * means somethread did not follow the
2470 * page hash mutex rules. Panic now and
2471 * get it over with. As usual, go down
2472 * holding all the locks.
2474 ASSERT(MUTEX_HELD(phm
));
2475 panic("page_create: "
2476 "hashin failed %p %p %llx %p",
2477 (void *)pp
, (void *)vp
, off
, (void *)phm
);
2480 ASSERT(MUTEX_HELD(phm
));
2485 * Hat layer locking need not be done to set
2486 * the following bits since the page is not hashed
2487 * and was on the free list (i.e., had no mappings).
2489 * Set the reference bit to protect
2490 * against immediate pageout
2492 * XXXmh modify freelist code to set reference
2493 * bit so we don't have to do it here.
2495 page_set_props(pp
, P_REF
);
2498 VM_STAT_ADD(page_create_exists
);
2499 if (flags
& PG_EXCL
) {
2501 * Found an existing page, and the caller
2502 * wanted all new pages. Undo all of the work
2507 while (plist
!= NULL
) {
2509 page_sub(&plist
, pp
);
2511 /* large pages should not end up here */
2512 ASSERT(pp
->p_szc
== 0);
2513 /*LINTED: constant in conditional ctx*/
2514 VN_DISPOSE(pp
, B_INVAL
, 0, kcred
);
2516 VM_STAT_ADD(page_create_found_one
);
2519 ASSERT(flags
& PG_WAIT
);
2520 if (!page_lock(pp
, SE_EXCL
, phm
, P_NO_RECLAIM
)) {
2522 * Start all over again if we blocked trying
2526 VM_STAT_ADD(page_create_page_lock_failed
);
2533 if (PP_ISFREE(pp
)) {
2534 ASSERT(PP_ISAGED(pp
) == 0);
2535 VM_STAT_ADD(pagecnt
.pc_get_cache
);
2536 page_list_sub(pp
, PG_CACHE_LIST
);
2543 * Got a page! It is locked. Acquire the i/o
2544 * lock since we are going to use the p_next and
2545 * p_prev fields to link the requested pages together.
2548 page_add(&plist
, pp
);
2549 plist
= plist
->p_next
;
2554 ASSERT((flags
& PG_EXCL
) ? (found_on_free
== pages_req
) : 1);
2558 * Did not need this page after all.
2559 * Put it back on the free list.
2561 VM_STAT_ADD(page_create_putbacks
);
2564 npp
->p_offset
= (u_offset_t
)-1;
2565 page_list_add(npp
, PG_FREE_LIST
| PG_LIST_TAIL
);
2570 ASSERT(pages_req
>= found_on_free
);
2573 uint_t overshoot
= (uint_t
)(pages_req
- found_on_free
);
2576 VM_STAT_ADD(page_create_overshoot
);
2577 p
= &pcf
[PCF_INDEX()];
2578 mutex_enter(&p
->pcf_lock
);
2580 p
->pcf_reserve
+= overshoot
;
2582 p
->pcf_count
+= overshoot
;
2584 mutex_enter(&new_freemem_lock
);
2586 cv_signal(&freemem_cv
);
2591 mutex_exit(&new_freemem_lock
);
2594 mutex_exit(&p
->pcf_lock
);
2595 /* freemem is approximate, so this test OK */
2597 freemem
+= overshoot
;
2605 * One or more constituent pages of this large page has been marked
2606 * toxic. Simply demote the large page to PAGESIZE pages and let
2607 * page_free() handle it. This routine should only be called by
2608 * large page free routines (page_free_pages() and page_destroy_pages().
2609 * All pages are locked SE_EXCL and have already been marked free.
2612 page_free_toxic_pages(page_t
*rootpp
)
2615 pgcnt_t i
, pgcnt
= page_get_pagecnt(rootpp
->p_szc
);
2616 uint_t szc
= rootpp
->p_szc
;
2618 for (i
= 0, tpp
= rootpp
; i
< pgcnt
; i
++, tpp
= tpp
->p_next
) {
2619 ASSERT(tpp
->p_szc
== szc
);
2620 ASSERT((PAGE_EXCL(tpp
) &&
2621 !page_iolock_assert(tpp
)) || panicstr
);
2625 while (rootpp
!= NULL
) {
2627 page_sub(&rootpp
, tpp
);
2628 ASSERT(PP_ISFREE(tpp
));
2635 * Put page on the "free" list.
2636 * The free list is really two lists maintained by
2637 * the PSM of whatever machine we happen to be on.
2640 page_free(page_t
*pp
, int dontneed
)
2645 ASSERT((PAGE_EXCL(pp
) &&
2646 !page_iolock_assert(pp
)) || panicstr
);
2648 if (PP_ISFREE(pp
)) {
2649 panic("page_free: page %p is free", (void *)pp
);
2652 if (pp
->p_szc
!= 0) {
2653 if (pp
->p_vnode
== NULL
|| IS_SWAPFSVP(pp
->p_vnode
) ||
2655 panic("page_free: anon or kernel "
2656 "or no vnode large page %p", (void *)pp
);
2658 page_demote_vp_pages(pp
);
2659 ASSERT(pp
->p_szc
== 0);
2663 * The page_struct_lock need not be acquired to examine these
2664 * fields since the page has an "exclusive" lock.
2666 if (hat_page_is_mapped(pp
) || pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0 ||
2667 pp
->p_slckcnt
!= 0) {
2668 panic("page_free pp=%p, pfn=%lx, lckcnt=%d, cowcnt=%d "
2669 "slckcnt = %d", (void *)pp
, page_pptonum(pp
), pp
->p_lckcnt
,
2670 pp
->p_cowcnt
, pp
->p_slckcnt
);
2674 ASSERT(!hat_page_getshare(pp
));
2677 ASSERT(pp
->p_vnode
== NULL
|| !IS_VMODSORT(pp
->p_vnode
) ||
2679 page_clr_all_props(pp
);
2680 ASSERT(!hat_page_getshare(pp
));
2683 * Now we add the page to the head of the free list.
2684 * But if this page is associated with a paged vnode
2685 * then we adjust the head forward so that the page is
2686 * effectively at the end of the list.
2688 if (pp
->p_vnode
== NULL
) {
2690 * Page has no identity, put it on the free list.
2693 pp
->p_offset
= (u_offset_t
)-1;
2694 page_list_add(pp
, PG_FREE_LIST
| PG_LIST_TAIL
);
2695 VM_STAT_ADD(pagecnt
.pc_free_free
);
2696 TRACE_1(TR_FAC_VM
, TR_PAGE_FREE_FREE
,
2697 "page_free_free:pp %p", pp
);
2702 /* move it to the tail of the list */
2703 page_list_add(pp
, PG_CACHE_LIST
| PG_LIST_TAIL
);
2705 VM_STAT_ADD(pagecnt
.pc_free_cache
);
2706 TRACE_1(TR_FAC_VM
, TR_PAGE_FREE_CACHE_TAIL
,
2707 "page_free_cache_tail:pp %p", pp
);
2709 page_list_add(pp
, PG_CACHE_LIST
| PG_LIST_HEAD
);
2711 VM_STAT_ADD(pagecnt
.pc_free_dontneed
);
2712 TRACE_1(TR_FAC_VM
, TR_PAGE_FREE_CACHE_HEAD
,
2713 "page_free_cache_head:pp %p", pp
);
2719 * Now do the `freemem' accounting.
2721 pcf_index
= PCF_INDEX();
2722 p
= &pcf
[pcf_index
];
2724 mutex_enter(&p
->pcf_lock
);
2726 p
->pcf_reserve
+= 1;
2730 mutex_enter(&new_freemem_lock
);
2732 * Check to see if some other thread
2733 * is actually waiting. Another bucket
2734 * may have woken it up by now. If there
2735 * are no waiters, then set our pcf_wait
2736 * count to zero to avoid coming in here
2737 * next time. Also, since only one page
2738 * was put on the free list, just wake
2742 cv_signal(&freemem_cv
);
2747 mutex_exit(&new_freemem_lock
);
2750 mutex_exit(&p
->pcf_lock
);
2752 /* freemem is approximate, so this test OK */
2758 * Put page on the "free" list during intial startup.
2759 * This happens during initial single threaded execution.
2762 page_free_at_startup(page_t
*pp
)
2767 page_list_add(pp
, PG_FREE_LIST
| PG_LIST_HEAD
| PG_LIST_ISINIT
);
2768 VM_STAT_ADD(pagecnt
.pc_free_free
);
2771 * Now do the `freemem' accounting.
2773 pcf_index
= PCF_INDEX();
2774 p
= &pcf
[pcf_index
];
2776 ASSERT(p
->pcf_block
== 0);
2777 ASSERT(p
->pcf_wait
== 0);
2780 /* freemem is approximate, so this is OK */
2785 page_free_pages(page_t
*pp
)
2787 page_t
*tpp
, *rootpp
= NULL
;
2788 pgcnt_t pgcnt
= page_get_pagecnt(pp
->p_szc
);
2790 uint_t szc
= pp
->p_szc
;
2792 VM_STAT_ADD(pagecnt
.pc_free_pages
);
2793 TRACE_1(TR_FAC_VM
, TR_PAGE_FREE_FREE
,
2794 "page_free_free:pp %p", pp
);
2796 ASSERT(pp
->p_szc
!= 0 && pp
->p_szc
< page_num_pagesizes());
2797 if ((page_pptonum(pp
) & (pgcnt
- 1)) != 0) {
2798 panic("page_free_pages: not root page %p", (void *)pp
);
2802 for (i
= 0, tpp
= pp
; i
< pgcnt
; i
++, tpp
++) {
2803 ASSERT((PAGE_EXCL(tpp
) &&
2804 !page_iolock_assert(tpp
)) || panicstr
);
2805 if (PP_ISFREE(tpp
)) {
2806 panic("page_free_pages: page %p is free", (void *)tpp
);
2809 if (hat_page_is_mapped(tpp
) || tpp
->p_lckcnt
!= 0 ||
2810 tpp
->p_cowcnt
!= 0 || tpp
->p_slckcnt
!= 0) {
2811 panic("page_free_pages %p", (void *)tpp
);
2815 ASSERT(!hat_page_getshare(tpp
));
2816 ASSERT(tpp
->p_vnode
== NULL
);
2817 ASSERT(tpp
->p_szc
== szc
);
2820 page_clr_all_props(tpp
);
2822 tpp
->p_offset
= (u_offset_t
)-1;
2823 ASSERT(tpp
->p_next
== tpp
);
2824 ASSERT(tpp
->p_prev
== tpp
);
2825 page_list_concat(&rootpp
, &tpp
);
2827 ASSERT(rootpp
== pp
);
2829 page_list_add_pages(rootpp
, 0);
2830 page_create_putback(pgcnt
);
2836 * This routine attempts to return pages to the cachelist via page_release().
2837 * It does not *have* to be successful in all cases, since the pageout scanner
2838 * will catch any pages it misses. It does need to be fast and not introduce
2839 * too much overhead.
2841 * If a page isn't found on the unlocked sweep of the page_hash bucket, we
2842 * don't lock and retry. This is ok, since the page scanner will eventually
2843 * find any page we miss in free_vp_pages().
2846 free_vp_pages(vnode_t
*vp
, u_offset_t off
, size_t len
)
2850 extern int swap_in_range(vnode_t
*, u_offset_t
, size_t);
2854 if (free_pages
== 0)
2856 if (swap_in_range(vp
, off
, len
))
2859 for (; off
< eoff
; off
+= PAGESIZE
) {
2862 * find the page using a fast, but inexact search. It'll be OK
2863 * if a few pages slip through the cracks here.
2865 pp
= page_exists(vp
, off
);
2868 * If we didn't find the page (it may not exist), the page
2869 * is free, looks still in use (shared), or we can't lock it,
2874 page_share_cnt(pp
) > 0 ||
2875 !page_trylock(pp
, SE_EXCL
))
2879 * Once we have locked pp, verify that it's still the
2880 * correct page and not already free
2882 ASSERT(PAGE_LOCKED_SE(pp
, SE_EXCL
));
2883 if (pp
->p_vnode
!= vp
|| pp
->p_offset
!= off
|| PP_ISFREE(pp
)) {
2889 * try to release the page...
2891 (void) page_release(pp
, 1);
2896 * Reclaim the given page from the free list.
2897 * If pp is part of a large pages, only the given constituent page is reclaimed
2898 * and the large page it belonged to will be demoted. This can only happen
2899 * if the page is not on the cachelist.
2901 * Returns 1 on success or 0 on failure.
2903 * The page is unlocked if it can't be reclaimed (when freemem == 0).
2904 * If `lock' is non-null, it will be dropped and re-acquired if
2905 * the routine must wait while freemem is 0.
2907 * As it turns out, boot_getpages() does this. It picks a page,
2908 * based on where OBP mapped in some address, gets its pfn, searches
2909 * the memsegs, locks the page, then pulls it off the free list!
2912 page_reclaim(page_t
*pp
, kmutex_t
*lock
)
2919 ASSERT(lock
!= NULL
? MUTEX_HELD(lock
) : 1);
2920 ASSERT(PAGE_EXCL(pp
) && PP_ISFREE(pp
));
2923 * If `freemem' is 0, we cannot reclaim this page from the
2924 * freelist, so release every lock we might hold: the page,
2925 * and the `lock' before blocking.
2927 * The only way `freemem' can become 0 while there are pages
2928 * marked free (have their p->p_free bit set) is when the
2929 * system is low on memory and doing a page_create(). In
2930 * order to guarantee that once page_create() starts acquiring
2931 * pages it will be able to get all that it needs since `freemem'
2932 * was decreased by the requested amount. So, we need to release
2933 * this page, and let page_create() have it.
2935 * Since `freemem' being zero is not supposed to happen, just
2936 * use the usual hash stuff as a starting point. If that bucket
2937 * is empty, then assume the worst, and start at the beginning
2938 * of the pcf array. If we always start at the beginning
2939 * when acquiring more than one pcf lock, there won't be any
2940 * deadlock problems.
2943 /* TODO: Do we need to test kcage_freemem if PG_NORELOC(pp)? */
2945 if (freemem
<= throttlefree
&& !page_create_throttle(1l, 0)) {
2947 goto page_reclaim_nomem
;
2950 enough
= pcf_decrement_bucket(1);
2953 VM_STAT_ADD(page_reclaim_zero
);
2955 * Check again. Its possible that some other thread
2956 * could have been right behind us, and added one
2957 * to a list somewhere. Acquire each of the pcf locks
2958 * until we find a page.
2961 for (i
= 0; i
< pcf_fanout
; i
++) {
2962 mutex_enter(&p
->pcf_lock
);
2963 if (p
->pcf_count
>= 1) {
2966 * freemem is not protected by any lock. Thus,
2967 * we cannot have any assertion containing
2980 * We really can't have page `pp'.
2981 * Time for the no-memory dance with
2982 * page_free(). This is just like
2983 * page_create_wait(). Plus the added
2984 * attraction of releasing whatever mutex
2985 * we held when we were called with in `lock'.
2986 * Page_unlock() will wakeup any thread
2987 * waiting around for this page.
2990 VM_STAT_ADD(page_reclaim_zero_locked
);
2996 * get this before we drop all the pcf locks.
2998 mutex_enter(&new_freemem_lock
);
3001 for (i
= 0; i
< pcf_fanout
; i
++) {
3003 mutex_exit(&p
->pcf_lock
);
3008 cv_wait(&freemem_cv
, &new_freemem_lock
);
3011 mutex_exit(&new_freemem_lock
);
3020 * The pcf accounting has been done,
3021 * though none of the pcf_wait flags have been set,
3022 * drop the locks and continue on.
3025 mutex_exit(&p
->pcf_lock
);
3031 VM_STAT_ADD(pagecnt
.pc_reclaim
);
3034 * page_list_sub will handle the case where pp is a large page.
3035 * It's possible that the page was promoted while on the freelist
3037 if (PP_ISAGED(pp
)) {
3038 page_list_sub(pp
, PG_FREE_LIST
);
3039 TRACE_1(TR_FAC_VM
, TR_PAGE_UNFREE_FREE
,
3040 "page_reclaim_free:pp %p", pp
);
3042 page_list_sub(pp
, PG_CACHE_LIST
);
3043 TRACE_1(TR_FAC_VM
, TR_PAGE_UNFREE_CACHE
,
3044 "page_reclaim_cache:pp %p", pp
);
3048 * clear the p_free & p_age bits since this page is no longer
3049 * on the free list. Notice that there was a brief time where
3050 * a page is marked as free, but is not on the list.
3052 * Set the reference bit to protect against immediate pageout.
3056 page_set_props(pp
, P_REF
);
3058 CPU_STATS_ENTER_K();
3059 cpup
= CPU
; /* get cpup now that CPU cannot change */
3060 CPU_STATS_ADDQ(cpup
, vm
, pgrec
, 1);
3061 CPU_STATS_ADDQ(cpup
, vm
, pgfrec
, 1);
3063 ASSERT(pp
->p_szc
== 0);
3069 * Destroy identity of the page and put it back on
3070 * the page free list. Assumes that the caller has
3071 * acquired the "exclusive" lock on the page.
3074 page_destroy(page_t
*pp
, int dontfree
)
3076 ASSERT((PAGE_EXCL(pp
) &&
3077 !page_iolock_assert(pp
)) || panicstr
);
3078 ASSERT(pp
->p_slckcnt
== 0 || panicstr
);
3080 if (pp
->p_szc
!= 0) {
3081 if (pp
->p_vnode
== NULL
|| IS_SWAPFSVP(pp
->p_vnode
) ||
3083 panic("page_destroy: anon or kernel or no vnode "
3084 "large page %p", (void *)pp
);
3086 page_demote_vp_pages(pp
);
3087 ASSERT(pp
->p_szc
== 0);
3090 TRACE_1(TR_FAC_VM
, TR_PAGE_DESTROY
, "page_destroy:pp %p", pp
);
3093 * Unload translations, if any, then hash out the
3094 * page to erase its identity.
3096 (void) hat_pageunload(pp
, HAT_FORCE_PGUNLOAD
);
3097 page_hashout(pp
, NULL
);
3101 * Acquire the "freemem_lock" for availrmem.
3102 * The page_struct_lock need not be acquired for lckcnt
3103 * and cowcnt since the page has an "exclusive" lock.
3104 * We are doing a modified version of page_pp_unlock here.
3106 if ((pp
->p_lckcnt
!= 0) || (pp
->p_cowcnt
!= 0)) {
3107 mutex_enter(&freemem_lock
);
3108 if (pp
->p_lckcnt
!= 0) {
3113 if (pp
->p_cowcnt
!= 0) {
3114 availrmem
+= pp
->p_cowcnt
;
3115 pages_locked
-= pp
->p_cowcnt
;
3118 mutex_exit(&freemem_lock
);
3121 * Put the page on the "free" list.
3128 page_destroy_pages(page_t
*pp
)
3131 page_t
*tpp
, *rootpp
= NULL
;
3132 pgcnt_t pgcnt
= page_get_pagecnt(pp
->p_szc
);
3133 pgcnt_t i
, pglcks
= 0;
3134 uint_t szc
= pp
->p_szc
;
3136 ASSERT(pp
->p_szc
!= 0 && pp
->p_szc
< page_num_pagesizes());
3138 VM_STAT_ADD(pagecnt
.pc_destroy_pages
);
3140 TRACE_1(TR_FAC_VM
, TR_PAGE_DESTROY
, "page_destroy_pages:pp %p", pp
);
3142 if ((page_pptonum(pp
) & (pgcnt
- 1)) != 0) {
3143 panic("page_destroy_pages: not root page %p", (void *)pp
);
3147 for (i
= 0, tpp
= pp
; i
< pgcnt
; i
++, tpp
++) {
3148 ASSERT((PAGE_EXCL(tpp
) &&
3149 !page_iolock_assert(tpp
)) || panicstr
);
3150 ASSERT(tpp
->p_slckcnt
== 0 || panicstr
);
3151 (void) hat_pageunload(tpp
, HAT_FORCE_PGUNLOAD
);
3152 page_hashout(tpp
, NULL
);
3153 ASSERT(tpp
->p_offset
== (u_offset_t
)-1);
3154 if (tpp
->p_lckcnt
!= 0) {
3157 } else if (tpp
->p_cowcnt
!= 0) {
3158 pglcks
+= tpp
->p_cowcnt
;
3161 ASSERT(!hat_page_getshare(tpp
));
3162 ASSERT(tpp
->p_vnode
== NULL
);
3163 ASSERT(tpp
->p_szc
== szc
);
3166 page_clr_all_props(tpp
);
3168 ASSERT(tpp
->p_next
== tpp
);
3169 ASSERT(tpp
->p_prev
== tpp
);
3170 page_list_concat(&rootpp
, &tpp
);
3173 ASSERT(rootpp
== pp
);
3175 mutex_enter(&freemem_lock
);
3176 availrmem
+= pglcks
;
3177 mutex_exit(&freemem_lock
);
3180 page_list_add_pages(rootpp
, 0);
3181 page_create_putback(pgcnt
);
3185 * Similar to page_destroy(), but destroys pages which are
3186 * locked and known to be on the page free list. Since
3187 * the page is known to be free and locked, no one can access
3190 * Also, the number of free pages does not change.
3193 page_destroy_free(page_t
*pp
)
3195 ASSERT(PAGE_EXCL(pp
));
3196 ASSERT(PP_ISFREE(pp
));
3197 ASSERT(pp
->p_vnode
);
3198 ASSERT(hat_page_getattr(pp
, P_MOD
| P_REF
| P_RO
) == 0);
3199 ASSERT(!hat_page_is_mapped(pp
));
3200 ASSERT(PP_ISAGED(pp
) == 0);
3201 ASSERT(pp
->p_szc
== 0);
3203 VM_STAT_ADD(pagecnt
.pc_destroy_free
);
3204 page_list_sub(pp
, PG_CACHE_LIST
);
3206 page_hashout(pp
, NULL
);
3207 ASSERT(pp
->p_vnode
== NULL
);
3208 ASSERT(pp
->p_offset
== (u_offset_t
)-1);
3209 ASSERT(pp
->p_hash
== NULL
);
3212 page_list_add(pp
, PG_FREE_LIST
| PG_LIST_TAIL
);
3215 mutex_enter(&new_freemem_lock
);
3217 cv_signal(&freemem_cv
);
3219 mutex_exit(&new_freemem_lock
);
3223 * Rename the page "opp" to have an identity specified
3224 * by [vp, off]. If a page already exists with this name
3225 * it is locked and destroyed. Note that the page's
3226 * translations are not unloaded during the rename.
3228 * This routine is used by the anon layer to "steal" the
3229 * original page and is not unlike destroying a page and
3230 * creating a new page using the same page frame.
3232 * XXX -- Could deadlock if caller 1 tries to rename A to B while
3233 * caller 2 tries to rename B to A.
3236 page_rename(page_t
*opp
, vnode_t
*vp
, u_offset_t off
)
3244 ASSERT(PAGE_EXCL(opp
) && !page_iolock_assert(opp
));
3245 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp
)));
3246 ASSERT(PP_ISFREE(opp
) == 0);
3248 VM_STAT_ADD(page_rename_count
);
3250 TRACE_3(TR_FAC_VM
, TR_PAGE_RENAME
,
3251 "page rename:pp %p vp %p off %llx", opp
, vp
, off
);
3254 * CacheFS may call page_rename for a large NFS page
3255 * when both CacheFS and NFS mount points are used
3256 * by applications. Demote this large page before
3257 * renaming it, to ensure that there are no "partial"
3258 * large pages left lying around.
3260 if (opp
->p_szc
!= 0) {
3261 vnode_t
*ovp
= opp
->p_vnode
;
3262 ASSERT(ovp
!= NULL
);
3263 ASSERT(!IS_SWAPFSVP(ovp
));
3264 ASSERT(!VN_ISKAS(ovp
));
3265 page_demote_vp_pages(opp
);
3266 ASSERT(opp
->p_szc
== 0);
3269 page_hashout(opp
, NULL
);
3273 * Acquire the appropriate page hash lock, since
3274 * we're going to rename the page.
3276 index
= PAGE_HASH_FUNC(vp
, off
);
3277 phm
= PAGE_HASH_MUTEX(index
);
3281 * Look for an existing page with this name and destroy it if found.
3282 * By holding the page hash lock all the way to the page_hashin()
3283 * call, we are assured that no page can be created with this
3284 * identity. In the case when the phm lock is dropped to undo any
3285 * hat layer mappings, the existing page is held with an "exclusive"
3286 * lock, again preventing another page from being created with
3289 pp
= page_hash_search(index
, vp
, off
);
3291 VM_STAT_ADD(page_rename_exists
);
3294 * As it turns out, this is one of only two places where
3295 * page_lock() needs to hold the passed in lock in the
3296 * successful case. In all of the others, the lock could
3297 * be dropped as soon as the attempt is made to lock
3298 * the page. It is tempting to add yet another arguement,
3299 * PL_KEEP or PL_DROP, to let page_lock know what to do.
3301 if (!page_lock(pp
, SE_EXCL
, phm
, P_RECLAIM
)) {
3303 * Went to sleep because the page could not
3304 * be locked. We were woken up when the page
3305 * was unlocked, or when the page was destroyed.
3306 * In either case, `phm' was dropped while we
3307 * slept. Hence we should not just roar through
3314 * If an existing page is a large page, then demote
3315 * it to ensure that no "partial" large pages are
3316 * "created" after page_rename. An existing page
3317 * can be a CacheFS page, and can't belong to swapfs.
3319 if (hat_page_is_mapped(pp
)) {
3321 * Unload translations. Since we hold the
3322 * exclusive lock on this page, the page
3323 * can not be changed while we drop phm.
3324 * This is also not a lock protocol violation,
3325 * but rather the proper way to do things.
3328 (void) hat_pageunload(pp
, HAT_FORCE_PGUNLOAD
);
3329 if (pp
->p_szc
!= 0) {
3330 ASSERT(!IS_SWAPFSVP(vp
));
3331 ASSERT(!VN_ISKAS(vp
));
3332 page_demote_vp_pages(pp
);
3333 ASSERT(pp
->p_szc
== 0);
3336 } else if (pp
->p_szc
!= 0) {
3337 ASSERT(!IS_SWAPFSVP(vp
));
3338 ASSERT(!VN_ISKAS(vp
));
3340 page_demote_vp_pages(pp
);
3341 ASSERT(pp
->p_szc
== 0);
3344 page_hashout(pp
, phm
);
3347 * Hash in the page with the new identity.
3349 if (!page_hashin(opp
, vp
, off
, phm
)) {
3351 * We were holding phm while we searched for [vp, off]
3352 * and only dropped phm if we found and locked a page.
3353 * If we can't create this page now, then some thing
3356 panic("page_rename: Can't hash in page: %p", (void *)pp
);
3360 ASSERT(MUTEX_HELD(phm
));
3364 * Now that we have dropped phm, lets get around to finishing up
3368 ASSERT(!hat_page_is_mapped(pp
));
3369 /* for now large pages should not end up here */
3370 ASSERT(pp
->p_szc
== 0);
3372 * Save the locks for transfer to the new page and then
3373 * clear them so page_free doesn't think they're important.
3374 * The page_struct_lock need not be acquired for lckcnt and
3375 * cowcnt since the page has an "exclusive" lock.
3377 olckcnt
= pp
->p_lckcnt
;
3378 ocowcnt
= pp
->p_cowcnt
;
3379 pp
->p_lckcnt
= pp
->p_cowcnt
= 0;
3382 * Put the page on the "free" list after we drop
3383 * the lock. The less work under the lock the better.
3385 /*LINTED: constant in conditional context*/
3386 VN_DISPOSE(pp
, B_FREE
, 0, kcred
);
3390 * Transfer the lock count from the old page (if any).
3391 * The page_struct_lock need not be acquired for lckcnt and
3392 * cowcnt since the page has an "exclusive" lock.
3394 opp
->p_lckcnt
+= olckcnt
;
3395 opp
->p_cowcnt
+= ocowcnt
;
3399 * low level routine to add page `pp' to the hash and vp chains for [vp, offset]
3401 * Pages are normally inserted at the start of a vnode's v_pages list.
3402 * If the vnode is VMODSORT and the page is modified, it goes at the end.
3403 * This can happen when a modified page is relocated for DR.
3405 * Returns 1 on success and 0 on failure.
3408 page_do_hashin(page_t
*pp
, vnode_t
*vp
, u_offset_t offset
)
3414 ASSERT(PAGE_EXCL(pp
));
3416 ASSERT(MUTEX_HELD(page_vnode_mutex(vp
)));
3419 * Be sure to set these up before the page is inserted on the hash
3420 * list. As soon as the page is placed on the list some other
3421 * thread might get confused and wonder how this page could
3422 * possibly hash to this list.
3425 pp
->p_offset
= offset
;
3428 * record if this page is on a swap vnode
3430 if ((vp
->v_flag
& VISSWAP
) != 0)
3433 index
= PAGE_HASH_FUNC(vp
, offset
);
3434 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(index
)));
3435 listp
= &page_hash
[index
];
3438 * If this page is already hashed in, fail this attempt to add it.
3440 for (tp
= *listp
; tp
!= NULL
; tp
= tp
->p_hash
) {
3441 if (tp
->p_vnode
== vp
&& tp
->p_offset
== offset
) {
3443 pp
->p_offset
= (u_offset_t
)(-1);
3447 pp
->p_hash
= *listp
;
3451 * Add the page to the vnode's list of pages
3453 if (vp
->v_pages
!= NULL
&& IS_VMODSORT(vp
) && hat_ismod(pp
))
3454 listp
= &vp
->v_pages
->p_vpprev
->p_vpnext
;
3456 listp
= &vp
->v_pages
;
3458 page_vpadd(listp
, pp
);
3464 * Add page `pp' to both the hash and vp chains for [vp, offset].
3466 * Returns 1 on success and 0 on failure.
3467 * If hold is passed in, it is not dropped.
3470 page_hashin(page_t
*pp
, vnode_t
*vp
, u_offset_t offset
, kmutex_t
*hold
)
3472 kmutex_t
*phm
= NULL
;
3476 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(vp
)));
3477 ASSERT(pp
->p_fsdata
== 0 || panicstr
);
3479 TRACE_3(TR_FAC_VM
, TR_PAGE_HASHIN
,
3480 "page_hashin:pp %p vp %p offset %llx",
3483 VM_STAT_ADD(hashin_count
);
3488 VM_STAT_ADD(hashin_not_held
);
3489 phm
= PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp
, offset
));
3493 vphm
= page_vnode_mutex(vp
);
3495 rc
= page_do_hashin(pp
, vp
, offset
);
3500 VM_STAT_ADD(hashin_already
);
3505 * Remove page ``pp'' from the hash and vp chains and remove vp association.
3506 * All mutexes must be held
3509 page_do_hashout(page_t
*pp
)
3513 vnode_t
*vp
= pp
->p_vnode
;
3516 ASSERT(MUTEX_HELD(page_vnode_mutex(vp
)));
3519 * First, take pp off of its hash chain.
3521 hpp
= &page_hash
[PAGE_HASH_FUNC(vp
, pp
->p_offset
)];
3528 panic("page_do_hashout");
3536 * Now remove it from its associated vnode.
3539 page_vpsub(&vp
->v_pages
, pp
);
3542 page_clr_all_props(pp
);
3545 pp
->p_offset
= (u_offset_t
)-1;
3550 * Remove page ``pp'' from the hash and vp chains and remove vp association.
3552 * When `phm' is non-NULL it contains the address of the mutex protecting the
3553 * hash list pp is on. It is not dropped.
3556 page_hashout(page_t
*pp
, kmutex_t
*phm
)
3564 ASSERT(phm
!= NULL
? MUTEX_HELD(phm
) : 1);
3565 ASSERT(pp
->p_vnode
!= NULL
);
3566 ASSERT((PAGE_EXCL(pp
) && !page_iolock_assert(pp
)) || panicstr
);
3567 ASSERT(MUTEX_NOT_HELD(page_vnode_mutex(pp
->p_vnode
)));
3571 TRACE_2(TR_FAC_VM
, TR_PAGE_HASHOUT
,
3572 "page_hashout:pp %p vp %p", pp
, vp
);
3577 VM_STAT_ADD(hashout_count
);
3578 index
= PAGE_HASH_FUNC(vp
, pp
->p_offset
);
3580 VM_STAT_ADD(hashout_not_held
);
3581 nphm
= PAGE_HASH_MUTEX(index
);
3584 ASSERT(phm
? phm
== PAGE_HASH_MUTEX(index
) : 1);
3588 * grab page vnode mutex and remove it...
3590 vphm
= page_vnode_mutex(vp
);
3593 page_do_hashout(pp
);
3600 * Wake up processes waiting for this page. The page's
3601 * identity has been changed, and is probably not the
3602 * desired page any longer.
3604 sep
= page_se_mutex(pp
);
3606 pp
->p_selock
&= ~SE_EWANTED
;
3607 if (CV_HAS_WAITERS(&pp
->p_cv
))
3608 cv_broadcast(&pp
->p_cv
);
3613 * Add the page to the front of a linked list of pages
3614 * using the p_next & p_prev pointers for the list.
3615 * The caller is responsible for protecting the list pointers.
3618 page_add(page_t
**ppp
, page_t
*pp
)
3620 ASSERT(PAGE_EXCL(pp
) || (PAGE_SHARED(pp
) && page_iolock_assert(pp
)));
3622 page_add_common(ppp
, pp
);
3628 * Common code for page_add() and mach_page_add()
3631 page_add_common(page_t
**ppp
, page_t
*pp
)
3634 pp
->p_next
= pp
->p_prev
= pp
;
3637 pp
->p_prev
= (*ppp
)->p_prev
;
3638 (*ppp
)->p_prev
= pp
;
3639 pp
->p_prev
->p_next
= pp
;
3646 * Remove this page from a linked list of pages
3647 * using the p_next & p_prev pointers for the list.
3649 * The caller is responsible for protecting the list pointers.
3652 page_sub(page_t
**ppp
, page_t
*pp
)
3654 ASSERT(pp
!= NULL
&& (PP_ISFREE(pp
)) ? 1 :
3655 (PAGE_EXCL(pp
)) || (PAGE_SHARED(pp
) && page_iolock_assert(pp
)));
3657 if (*ppp
== NULL
|| pp
== NULL
) {
3658 panic("page_sub: bad arg(s): pp %p, *ppp %p",
3659 (void *)pp
, (void *)(*ppp
));
3663 page_sub_common(ppp
, pp
);
3668 * Common code for page_sub() and mach_page_sub()
3671 page_sub_common(page_t
**ppp
, page_t
*pp
)
3674 *ppp
= pp
->p_next
; /* go to next page */
3677 *ppp
= NULL
; /* page list is gone */
3679 pp
->p_prev
->p_next
= pp
->p_next
;
3680 pp
->p_next
->p_prev
= pp
->p_prev
;
3682 pp
->p_prev
= pp
->p_next
= pp
; /* make pp a list of one */
3687 * Break page list cppp into two lists with npages in the first list.
3688 * The tail is returned in nppp.
3691 page_list_break(page_t
**oppp
, page_t
**nppp
, pgcnt_t npages
)
3693 page_t
*s1pp
= *oppp
;
3695 page_t
*e1pp
, *e2pp
;
3707 for (n
= 0, s2pp
= *oppp
; n
< npages
; n
++) {
3708 s2pp
= s2pp
->p_next
;
3710 /* Fix head and tail of new lists */
3711 e1pp
= s2pp
->p_prev
;
3712 e2pp
= s1pp
->p_prev
;
3713 s1pp
->p_prev
= e1pp
;
3714 e1pp
->p_next
= s1pp
;
3715 s2pp
->p_prev
= e2pp
;
3716 e2pp
->p_next
= s2pp
;
3718 /* second list empty */
3729 * Concatenate page list nppp onto the end of list ppp.
3732 page_list_concat(page_t
**ppp
, page_t
**nppp
)
3734 page_t
*s1pp
, *s2pp
, *e1pp
, *e2pp
;
3736 if (*nppp
== NULL
) {
3744 e1pp
= s1pp
->p_prev
;
3746 e2pp
= s2pp
->p_prev
;
3747 s1pp
->p_prev
= e2pp
;
3748 e2pp
->p_next
= s1pp
;
3749 e1pp
->p_next
= s2pp
;
3750 s2pp
->p_prev
= e1pp
;
3754 * return the next page in the page list
3757 page_list_next(page_t
*pp
)
3759 return (pp
->p_next
);
3764 * Add the page to the front of the linked list of pages
3765 * using p_vpnext/p_vpprev pointers for the list.
3767 * The caller is responsible for protecting the lists.
3770 page_vpadd(page_t
**ppp
, page_t
*pp
)
3773 pp
->p_vpnext
= pp
->p_vpprev
= pp
;
3775 pp
->p_vpnext
= *ppp
;
3776 pp
->p_vpprev
= (*ppp
)->p_vpprev
;
3777 (*ppp
)->p_vpprev
= pp
;
3778 pp
->p_vpprev
->p_vpnext
= pp
;
3784 * Remove this page from the linked list of pages
3785 * using p_vpnext/p_vpprev pointers for the list.
3787 * The caller is responsible for protecting the lists.
3790 page_vpsub(page_t
**ppp
, page_t
*pp
)
3792 if (*ppp
== NULL
|| pp
== NULL
) {
3793 panic("page_vpsub: bad arg(s): pp %p, *ppp %p",
3794 (void *)pp
, (void *)(*ppp
));
3799 *ppp
= pp
->p_vpnext
; /* go to next page */
3802 *ppp
= NULL
; /* page list is gone */
3804 pp
->p_vpprev
->p_vpnext
= pp
->p_vpnext
;
3805 pp
->p_vpnext
->p_vpprev
= pp
->p_vpprev
;
3807 pp
->p_vpprev
= pp
->p_vpnext
= pp
; /* make pp a list of one */
3811 * Lock a physical page into memory "long term". Used to support "lock
3812 * in memory" functions. Accepts the page to be locked, and a cow variable
3813 * to indicate whether a the lock will travel to the new page during
3814 * a potential copy-on-write.
3818 page_t
*pp
, /* page to be locked */
3819 int cow
, /* cow lock */
3820 int kernel
) /* must succeed -- ignore checking */
3822 int r
= 0; /* result -- assume failure */
3824 ASSERT(PAGE_LOCKED(pp
));
3826 page_struct_lock(pp
);
3828 * Acquire the "freemem_lock" for availrmem.
3831 mutex_enter(&freemem_lock
);
3832 if ((availrmem
> pages_pp_maximum
) &&
3833 (pp
->p_cowcnt
< (ushort_t
)PAGE_LOCK_MAXIMUM
)) {
3836 mutex_exit(&freemem_lock
);
3838 if (++pp
->p_cowcnt
== (ushort_t
)PAGE_LOCK_MAXIMUM
) {
3840 "COW lock limit reached on pfn 0x%lx",
3844 mutex_exit(&freemem_lock
);
3847 if (pp
->p_lckcnt
< (ushort_t
)PAGE_LOCK_MAXIMUM
) {
3849 if (++pp
->p_lckcnt
==
3850 (ushort_t
)PAGE_LOCK_MAXIMUM
) {
3851 cmn_err(CE_WARN
, "Page lock limit "
3852 "reached on pfn 0x%lx",
3858 /* availrmem accounting done by caller */
3862 mutex_enter(&freemem_lock
);
3863 if (availrmem
> pages_pp_maximum
) {
3869 mutex_exit(&freemem_lock
);
3873 page_struct_unlock(pp
);
3878 * Decommit a lock on a physical page frame. Account for cow locks if
3883 page_t
*pp
, /* page to be unlocked */
3884 int cow
, /* expect cow lock */
3885 int kernel
) /* this was a kernel lock */
3887 ASSERT(PAGE_LOCKED(pp
));
3889 page_struct_lock(pp
);
3891 * Acquire the "freemem_lock" for availrmem.
3892 * If cowcnt or lcknt is already 0 do nothing; i.e., we
3893 * could be called to unlock even if nothing is locked. This could
3894 * happen if locked file pages were truncated (removing the lock)
3895 * and the file was grown again and new pages faulted in; the new
3896 * pages are unlocked but the segment still thinks they're locked.
3900 mutex_enter(&freemem_lock
);
3904 mutex_exit(&freemem_lock
);
3907 if (pp
->p_lckcnt
&& --pp
->p_lckcnt
== 0) {
3909 mutex_enter(&freemem_lock
);
3912 mutex_exit(&freemem_lock
);
3916 page_struct_unlock(pp
);
3920 * This routine reserves availrmem for npages.
3921 * It returns 1 on success or 0 on failure.
3923 * flags: KM_NOSLEEP or KM_SLEEP
3924 * cb_wait: called to induce delay when KM_SLEEP reservation requires kmem
3925 * reaping to potentially succeed. If the callback returns 0, the
3926 * reservation attempts will cease to repeat and page_xresv() may
3927 * report a failure. If cb_wait is NULL, the traditional delay(hz/2)
3928 * behavior will be used while waiting for a reap.
3931 page_xresv(pgcnt_t npages
, uint_t flags
, int (*cb_wait
)(void))
3933 mutex_enter(&freemem_lock
);
3934 if (availrmem
>= tune
.t_minarmem
+ npages
) {
3935 availrmem
-= npages
;
3936 mutex_exit(&freemem_lock
);
3938 } else if ((flags
& KM_NOSLEEP
) != 0) {
3939 mutex_exit(&freemem_lock
);
3942 mutex_exit(&freemem_lock
);
3945 * We signal memory pressure to the system by elevating 'needfree'.
3946 * Processes such as kmem reaping, pageout, and ZFS ARC shrinking can
3947 * then respond to said pressure by freeing pages.
3949 page_needfree(npages
);
3953 if (cb_wait
== NULL
) {
3959 mutex_enter(&freemem_lock
);
3960 if (availrmem
>= tune
.t_minarmem
+ npages
) {
3961 availrmem
-= npages
;
3962 mutex_exit(&freemem_lock
);
3963 page_needfree(-(spgcnt_t
)npages
);
3966 mutex_exit(&freemem_lock
);
3967 } while (nobail
!= 0);
3968 page_needfree(-(spgcnt_t
)npages
);
3974 * This routine reserves availrmem for npages;
3975 * flags: KM_NOSLEEP or KM_SLEEP
3976 * returns 1 on success or 0 on failure
3979 page_resv(pgcnt_t npages
, uint_t flags
)
3981 return (page_xresv(npages
, flags
, NULL
));
3985 * This routine unreserves availrmem for npages;
3988 page_unresv(pgcnt_t npages
)
3990 mutex_enter(&freemem_lock
);
3991 availrmem
+= npages
;
3992 mutex_exit(&freemem_lock
);
3996 * See Statement at the beginning of segvn_lockop() regarding
3997 * the way we handle cowcnts and lckcnts.
3999 * Transfer cowcnt on 'opp' to cowcnt on 'npp' if the vpage
4000 * that breaks COW has PROT_WRITE.
4002 * Note that, we may also break COW in case we are softlocking
4003 * on read access during physio;
4004 * in this softlock case, the vpage may not have PROT_WRITE.
4005 * So, we need to transfer lckcnt on 'opp' to lckcnt on 'npp'
4006 * if the vpage doesn't have PROT_WRITE.
4008 * This routine is never called if we are stealing a page
4011 * The caller subtracted from availrmem for read only mapping.
4012 * if lckcnt is 1 increment availrmem.
4016 page_t
*opp
, /* original page frame losing lock */
4017 page_t
*npp
, /* new page frame gaining lock */
4018 uint_t write_perm
) /* set if vpage has PROT_WRITE */
4023 ASSERT(PAGE_LOCKED(opp
));
4024 ASSERT(PAGE_LOCKED(npp
));
4027 * Since we have two pages we probably have two locks. We need to take
4028 * them in a defined order to avoid deadlocks. It's also possible they
4029 * both hash to the same lock in which case this is a non-issue.
4031 nidx
= PAGE_LLOCK_HASH(PP_PAGEROOT(npp
));
4032 oidx
= PAGE_LLOCK_HASH(PP_PAGEROOT(opp
));
4034 page_struct_lock(npp
);
4035 page_struct_lock(opp
);
4036 } else if (oidx
< nidx
) {
4037 page_struct_lock(opp
);
4038 page_struct_lock(npp
);
4039 } else { /* The pages hash to the same lock */
4040 page_struct_lock(npp
);
4043 ASSERT(npp
->p_cowcnt
== 0);
4044 ASSERT(npp
->p_lckcnt
== 0);
4046 /* Don't use claim if nothing is locked (see page_pp_unlock above) */
4047 if ((write_perm
&& opp
->p_cowcnt
!= 0) ||
4048 (!write_perm
&& opp
->p_lckcnt
!= 0)) {
4052 ASSERT(opp
->p_cowcnt
!= 0);
4056 ASSERT(opp
->p_lckcnt
!= 0);
4059 * We didn't need availrmem decremented if p_lckcnt on
4060 * original page is 1. Here, we are unlocking
4061 * read-only copy belonging to original page and
4062 * are locking a copy belonging to new page.
4064 if (opp
->p_lckcnt
== 1)
4072 mutex_enter(&freemem_lock
);
4075 mutex_exit(&freemem_lock
);
4079 page_struct_unlock(opp
);
4080 page_struct_unlock(npp
);
4081 } else if (oidx
< nidx
) {
4082 page_struct_unlock(npp
);
4083 page_struct_unlock(opp
);
4084 } else { /* The pages hash to the same lock */
4085 page_struct_unlock(npp
);
4090 * Simple claim adjust functions -- used to support changes in
4091 * claims due to changes in access permissions. Used by segvn_setprot().
4094 page_addclaim(page_t
*pp
)
4096 int r
= 0; /* result */
4098 ASSERT(PAGE_LOCKED(pp
));
4100 page_struct_lock(pp
);
4101 ASSERT(pp
->p_lckcnt
!= 0);
4103 if (pp
->p_lckcnt
== 1) {
4104 if (pp
->p_cowcnt
< (ushort_t
)PAGE_LOCK_MAXIMUM
) {
4107 if (++pp
->p_cowcnt
== (ushort_t
)PAGE_LOCK_MAXIMUM
) {
4109 "COW lock limit reached on pfn 0x%lx",
4114 mutex_enter(&freemem_lock
);
4115 if ((availrmem
> pages_pp_maximum
) &&
4116 (pp
->p_cowcnt
< (ushort_t
)PAGE_LOCK_MAXIMUM
)) {
4119 mutex_exit(&freemem_lock
);
4122 if (++pp
->p_cowcnt
== (ushort_t
)PAGE_LOCK_MAXIMUM
) {
4124 "COW lock limit reached on pfn 0x%lx",
4128 mutex_exit(&freemem_lock
);
4130 page_struct_unlock(pp
);
4135 page_subclaim(page_t
*pp
)
4139 ASSERT(PAGE_LOCKED(pp
));
4141 page_struct_lock(pp
);
4142 ASSERT(pp
->p_cowcnt
!= 0);
4145 if (pp
->p_lckcnt
< (ushort_t
)PAGE_LOCK_MAXIMUM
) {
4150 mutex_enter(&freemem_lock
);
4153 mutex_exit(&freemem_lock
);
4157 if (++pp
->p_lckcnt
== (ushort_t
)PAGE_LOCK_MAXIMUM
) {
4159 "Page lock limit reached on pfn 0x%lx",
4168 page_struct_unlock(pp
);
4173 * Variant of page_addclaim(), where ppa[] contains the pages of a single large
4177 page_addclaim_pages(page_t
**ppa
)
4179 pgcnt_t lckpgs
= 0, pg_idx
;
4181 VM_STAT_ADD(pagecnt
.pc_addclaim_pages
);
4184 * Only need to take the page struct lock on the large page root.
4186 page_struct_lock(ppa
[0]);
4187 for (pg_idx
= 0; ppa
[pg_idx
] != NULL
; pg_idx
++) {
4189 ASSERT(PAGE_LOCKED(ppa
[pg_idx
]));
4190 ASSERT(ppa
[pg_idx
]->p_lckcnt
!= 0);
4191 if (ppa
[pg_idx
]->p_cowcnt
== (ushort_t
)PAGE_LOCK_MAXIMUM
) {
4192 page_struct_unlock(ppa
[0]);
4195 if (ppa
[pg_idx
]->p_lckcnt
> 1)
4200 mutex_enter(&freemem_lock
);
4201 if (availrmem
>= pages_pp_maximum
+ lckpgs
) {
4202 availrmem
-= lckpgs
;
4203 pages_claimed
+= lckpgs
;
4205 mutex_exit(&freemem_lock
);
4206 page_struct_unlock(ppa
[0]);
4209 mutex_exit(&freemem_lock
);
4212 for (pg_idx
= 0; ppa
[pg_idx
] != NULL
; pg_idx
++) {
4213 ppa
[pg_idx
]->p_lckcnt
--;
4214 ppa
[pg_idx
]->p_cowcnt
++;
4216 page_struct_unlock(ppa
[0]);
4221 * Variant of page_subclaim(), where ppa[] contains the pages of a single large
4225 page_subclaim_pages(page_t
**ppa
)
4227 pgcnt_t ulckpgs
= 0, pg_idx
;
4229 VM_STAT_ADD(pagecnt
.pc_subclaim_pages
);
4232 * Only need to take the page struct lock on the large page root.
4234 page_struct_lock(ppa
[0]);
4235 for (pg_idx
= 0; ppa
[pg_idx
] != NULL
; pg_idx
++) {
4237 ASSERT(PAGE_LOCKED(ppa
[pg_idx
]));
4238 ASSERT(ppa
[pg_idx
]->p_cowcnt
!= 0);
4239 if (ppa
[pg_idx
]->p_lckcnt
== (ushort_t
)PAGE_LOCK_MAXIMUM
) {
4240 page_struct_unlock(ppa
[0]);
4243 if (ppa
[pg_idx
]->p_lckcnt
!= 0)
4248 mutex_enter(&freemem_lock
);
4249 availrmem
+= ulckpgs
;
4250 pages_claimed
-= ulckpgs
;
4251 mutex_exit(&freemem_lock
);
4254 for (pg_idx
= 0; ppa
[pg_idx
] != NULL
; pg_idx
++) {
4255 ppa
[pg_idx
]->p_cowcnt
--;
4256 ppa
[pg_idx
]->p_lckcnt
++;
4259 page_struct_unlock(ppa
[0]);
4264 page_numtopp(pfn_t pfnum
, se_t se
)
4269 pp
= page_numtopp_nolock(pfnum
);
4271 return ((page_t
*)NULL
);
4275 * Acquire the appropriate lock on the page.
4277 while (!page_lock(pp
, se
, (kmutex_t
*)NULL
, P_RECLAIM
)) {
4278 if (page_pptonum(pp
) != pfnum
)
4283 if (page_pptonum(pp
) != pfnum
) {
4292 page_numtopp_noreclaim(pfn_t pfnum
, se_t se
)
4297 pp
= page_numtopp_nolock(pfnum
);
4299 return ((page_t
*)NULL
);
4303 * Acquire the appropriate lock on the page.
4305 while (!page_lock(pp
, se
, (kmutex_t
*)NULL
, P_NO_RECLAIM
)) {
4306 if (page_pptonum(pp
) != pfnum
)
4311 if (page_pptonum(pp
) != pfnum
) {
4320 * This routine is like page_numtopp, but will only return page structs
4321 * for pages which are ok for loading into hardware using the page struct.
4324 page_numtopp_nowait(pfn_t pfnum
, se_t se
)
4329 pp
= page_numtopp_nolock(pfnum
);
4331 return ((page_t
*)NULL
);
4335 * Try to acquire the appropriate lock on the page.
4340 if (!page_trylock(pp
, se
))
4343 if (page_pptonum(pp
) != pfnum
) {
4347 if (PP_ISFREE(pp
)) {
4357 * Returns a count of dirty pages that are in the process
4358 * of being written out. If 'cleanit' is set, try to push the page.
4361 page_busy(int cleanit
)
4363 page_t
*page0
= page_first();
4365 pgcnt_t nppbusy
= 0;
4369 vnode_t
*vp
= pp
->p_vnode
;
4371 * A page is a candidate for syncing if it is:
4373 * (a) On neither the freelist nor the cachelist
4374 * (b) Hashed onto a vnode
4375 * (c) Not a kernel page
4377 * (e) Not part of a swapfile
4378 * (f) a page which belongs to a real vnode; eg has a non-null
4380 * (g) Backed by a filesystem which doesn't have a
4381 * stubbed-out sync operation
4383 if (!PP_ISFREE(pp
) && vp
!= NULL
&& !VN_ISKAS(vp
) &&
4384 hat_ismod(pp
) && !IS_SWAPVP(vp
) && vp
->v_vfsp
!= NULL
&&
4385 vfs_can_sync(vp
->v_vfsp
)) {
4390 if (!page_trylock(pp
, SE_EXCL
))
4393 if (PP_ISFREE(pp
) || vp
== NULL
|| IS_SWAPVP(vp
) ||
4394 pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0 ||
4396 HAT_SYNC_DONTZERO
| HAT_SYNC_STOPON_MOD
) & P_MOD
)) {
4403 (void) VOP_PUTPAGE(vp
, off
, PAGESIZE
,
4404 B_ASYNC
| B_FREE
, kcred
, NULL
);
4407 } while ((pp
= page_next(pp
)) != page0
);
4412 void page_invalidate_pages(void);
4415 * callback handler to vm sub-system
4417 * callers make sure no recursive entries to this func.
4421 callb_vm_cpr(void *arg
, int code
)
4423 if (code
== CB_CODE_CPR_CHKPT
)
4424 page_invalidate_pages();
4429 * Invalidate all pages of the system.
4430 * It shouldn't be called until all user page activities are all stopped.
4433 page_invalidate_pages()
4439 const int MAXRETRIES
= 4;
4442 * Flush dirty pages and destroy the clean ones.
4446 pp
= page0
= page_first();
4453 * skip the page if it has no vnode or the page associated
4454 * with the kernel vnode or prom allocated kernel mem.
4456 if ((vp
= pp
->p_vnode
) == NULL
|| VN_ISKAS(vp
))
4460 * skip the page which is already free invalidated.
4462 if (PP_ISFREE(pp
) && PP_ISAGED(pp
))
4466 * skip pages that are already locked or can't be "exclusively"
4467 * locked or are already free. After we lock the page, check
4468 * the free and age bits again to be sure it's not destroyed
4470 * To achieve max. parallelization, we use page_trylock instead
4471 * of page_lock so that we don't get block on individual pages
4472 * while we have thousands of other pages to process.
4474 if (!page_trylock(pp
, SE_EXCL
)) {
4477 } else if (PP_ISFREE(pp
)) {
4478 if (!PP_ISAGED(pp
)) {
4479 page_destroy_free(pp
);
4486 * Is this page involved in some I/O? shared?
4488 * The page_struct_lock need not be acquired to
4489 * examine these fields since the page has an
4492 if (pp
->p_lckcnt
!= 0 || pp
->p_cowcnt
!= 0) {
4497 if (vp
->v_type
== VCHR
) {
4498 panic("vp->v_type == VCHR");
4502 if (!page_try_demote_pages(pp
)) {
4508 * Check the modified bit. Leave the bits alone in hardware
4509 * (they will be modified if we do the putpage).
4511 mod
= (hat_pagesync(pp
, HAT_SYNC_DONTZERO
| HAT_SYNC_STOPON_MOD
)
4514 offset
= pp
->p_offset
;
4516 * Hold the vnode before releasing the page lock
4517 * to prevent it from being freed and re-used by
4518 * some other thread.
4523 * No error return is checked here. Callers such as
4524 * cpr deals with the dirty pages at the dump time
4525 * if this putpage fails.
4527 (void) VOP_PUTPAGE(vp
, offset
, PAGESIZE
, B_INVAL
,
4531 /*LINTED: constant in conditional context*/
4532 VN_DISPOSE(pp
, B_INVAL
, 0, kcred
);
4534 } while ((pp
= page_next(pp
)) != page0
);
4535 if (nbusypages
&& retry
++ < MAXRETRIES
) {
4542 * Replace the page "old" with the page "new" on the page hash and vnode lists
4544 * the replacement must be done in place, ie the equivalent sequence:
4546 * vp = old->p_vnode;
4547 * off = old->p_offset;
4548 * page_do_hashout(old)
4549 * page_do_hashin(new, vp, off)
4551 * doesn't work, since
4552 * 1) if old is the only page on the vnode, the v_pages list has a window
4553 * where it looks empty. This will break file system assumptions.
4555 * 2) pvn_vplist_dirty() can't deal with pages moving on the v_pages list.
4558 page_do_relocate_hash(page_t
*new, page_t
*old
)
4561 vnode_t
*vp
= old
->p_vnode
;
4564 ASSERT(PAGE_EXCL(old
));
4565 ASSERT(PAGE_EXCL(new));
4567 ASSERT(MUTEX_HELD(page_vnode_mutex(vp
)));
4568 ASSERT(MUTEX_HELD(PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp
, old
->p_offset
))));
4571 * First find old page on the page hash list
4573 hash_list
= &page_hash
[PAGE_HASH_FUNC(vp
, old
->p_offset
)];
4576 if (*hash_list
== old
)
4578 if (*hash_list
== NULL
) {
4579 panic("page_do_hashout");
4582 hash_list
= &(*hash_list
)->p_hash
;
4586 * update new and replace old with new on the page hash list
4588 new->p_vnode
= old
->p_vnode
;
4589 new->p_offset
= old
->p_offset
;
4590 new->p_hash
= old
->p_hash
;
4593 if ((new->p_vnode
->v_flag
& VISSWAP
) != 0)
4597 * replace old with new on the vnode's page list
4599 if (old
->p_vpnext
== old
) {
4600 new->p_vpnext
= new;
4601 new->p_vpprev
= new;
4603 new->p_vpnext
= old
->p_vpnext
;
4604 new->p_vpprev
= old
->p_vpprev
;
4605 new->p_vpnext
->p_vpprev
= new;
4606 new->p_vpprev
->p_vpnext
= new;
4608 if (vp
->v_pages
== old
)
4612 * clear out the old page
4615 old
->p_vpnext
= NULL
;
4616 old
->p_vpprev
= NULL
;
4617 old
->p_vnode
= NULL
;
4619 old
->p_offset
= (u_offset_t
)-1;
4620 page_clr_all_props(old
);
4623 * Wake up processes waiting for this page. The page's
4624 * identity has been changed, and is probably not the
4625 * desired page any longer.
4627 sep
= page_se_mutex(old
);
4629 old
->p_selock
&= ~SE_EWANTED
;
4630 if (CV_HAS_WAITERS(&old
->p_cv
))
4631 cv_broadcast(&old
->p_cv
);
4636 * This function moves the identity of page "pp_old" to page "pp_new".
4637 * Both pages must be locked on entry. "pp_new" is free, has no identity,
4638 * and need not be hashed out from anywhere.
4641 page_relocate_hash(page_t
*pp_new
, page_t
*pp_old
)
4643 vnode_t
*vp
= pp_old
->p_vnode
;
4644 u_offset_t off
= pp_old
->p_offset
;
4645 kmutex_t
*phm
, *vphm
;
4650 ASSERT(PAGE_EXCL(pp_old
));
4651 ASSERT(PAGE_EXCL(pp_new
));
4653 ASSERT(pp_new
->p_vnode
== NULL
);
4656 * hashout then hashin while holding the mutexes
4658 phm
= PAGE_HASH_MUTEX(PAGE_HASH_FUNC(vp
, off
));
4660 vphm
= page_vnode_mutex(vp
);
4663 page_do_relocate_hash(pp_new
, pp_old
);
4665 /* The following comment preserved from page_flip(). */
4666 pp_new
->p_fsdata
= pp_old
->p_fsdata
;
4667 pp_old
->p_fsdata
= 0;
4672 * The page_struct_lock need not be acquired for lckcnt and
4673 * cowcnt since the page has an "exclusive" lock.
4675 ASSERT(pp_new
->p_lckcnt
== 0);
4676 ASSERT(pp_new
->p_cowcnt
== 0);
4677 pp_new
->p_lckcnt
= pp_old
->p_lckcnt
;
4678 pp_new
->p_cowcnt
= pp_old
->p_cowcnt
;
4679 pp_old
->p_lckcnt
= pp_old
->p_cowcnt
= 0;
4684 * Helper routine used to lock all remaining members of a
4685 * large page. The caller is responsible for passing in a locked
4686 * pp. If pp is a large page, then it succeeds in locking all the
4687 * remaining constituent pages or it returns with only the
4688 * original page locked.
4690 * Returns 1 on success, 0 on failure.
4692 * If success is returned this routine guarantees p_szc for all constituent
4693 * pages of a large page pp belongs to can't change. To achieve this we
4694 * recheck szc of pp after locking all constituent pages and retry if szc
4695 * changed (it could only decrease). Since hat_page_demote() needs an EXCL
4696 * lock on one of constituent pages it can't be running after all constituent
4697 * pages are locked. hat_page_demote() with a lock on a constituent page
4698 * outside of this large page (i.e. pp belonged to a larger large page) is
4699 * already done with all constituent pages of pp since the root's p_szc is
4700 * changed last. Therefore no need to synchronize with hat_page_demote() that
4701 * locked a constituent page outside of pp's current large page.
4704 uint32_t gpg_trylock_mtbf
= 0;
4708 group_page_trylock(page_t
*pp
, se_t se
)
4712 uint_t pszc
= pp
->p_szc
;
4715 if (gpg_trylock_mtbf
&& !(gethrtime() % gpg_trylock_mtbf
)) {
4720 if (pp
!= PP_GROUPLEADER(pp
, pszc
)) {
4725 ASSERT(PAGE_LOCKED_SE(pp
, se
));
4726 ASSERT(!PP_ISFREE(pp
));
4730 npgs
= page_get_pagecnt(pszc
);
4732 for (i
= 1; i
< npgs
; i
++, tpp
++) {
4733 if (!page_trylock(tpp
, se
)) {
4735 for (j
= 1; j
< i
; j
++, tpp
++) {
4741 if (pp
->p_szc
!= pszc
) {
4742 ASSERT(pp
->p_szc
< pszc
);
4743 ASSERT(pp
->p_vnode
!= NULL
&& !PP_ISKAS(pp
) &&
4744 !IS_SWAPFSVP(pp
->p_vnode
));
4746 for (i
= 1; i
< npgs
; i
++, tpp
++) {
4756 group_page_unlock(page_t
*pp
)
4761 ASSERT(PAGE_LOCKED(pp
));
4762 ASSERT(!PP_ISFREE(pp
));
4763 ASSERT(pp
== PP_PAGEROOT(pp
));
4764 npgs
= page_get_pagecnt(pp
->p_szc
);
4765 for (i
= 1, tpp
= pp
+ 1; i
< npgs
; i
++, tpp
++) {
4772 * 0 : on success and *nrelocp is number of relocated PAGESIZE pages
4773 * ERANGE : this is not a base page
4774 * EBUSY : failure to get locks on the page/pages
4775 * ENOMEM : failure to obtain replacement pages
4776 * EAGAIN : OBP has not yet completed its boot-time handoff to the kernel
4777 * EIO : An error occurred while trying to copy the page data
4779 * Return with all constituent members of target and replacement
4780 * SE_EXCL locked. It is the callers responsibility to drop the
4786 page_t
**replacement
,
4796 pfn_t pfn
, repl_pfn
;
4799 int repl_contig
= 0;
4801 spgcnt_t dofree
= 0;
4805 #if defined(__sparc)
4807 * We need to wait till OBP has completed
4808 * its boot-time handoff of its resources to the kernel
4809 * before we allow page relocation
4811 if (page_relocate_ready
== 0) {
4817 * If this is not a base page,
4818 * just return with 0x0 pages relocated.
4821 ASSERT(PAGE_EXCL(targ
));
4822 ASSERT(!PP_ISFREE(targ
));
4824 ASSERT(szc
< mmu_page_sizes
);
4825 VM_STAT_ADD(vmm_vmstats
.ppr_reloc
[szc
]);
4826 pfn
= targ
->p_pagenum
;
4827 if (pfn
!= PFN_BASE(pfn
, szc
)) {
4828 VM_STAT_ADD(vmm_vmstats
.ppr_relocnoroot
[szc
]);
4832 if ((repl
= *replacement
) != NULL
&& repl
->p_szc
>= szc
) {
4833 repl_pfn
= repl
->p_pagenum
;
4834 if (repl_pfn
!= PFN_BASE(repl_pfn
, szc
)) {
4835 VM_STAT_ADD(vmm_vmstats
.ppr_reloc_replnoroot
[szc
]);
4842 * We must lock all members of this large page or we cannot
4843 * relocate any part of it.
4845 if (grouplock
!= 0 && !group_page_trylock(targ
, SE_EXCL
)) {
4846 VM_STAT_ADD(vmm_vmstats
.ppr_relocnolock
[targ
->p_szc
]);
4851 * reread szc it could have been decreased before
4852 * group_page_trylock() was done.
4855 ASSERT(szc
< mmu_page_sizes
);
4856 VM_STAT_ADD(vmm_vmstats
.ppr_reloc
[szc
]);
4857 ASSERT(pfn
== PFN_BASE(pfn
, szc
));
4859 npgs
= page_get_pagecnt(targ
->p_szc
);
4862 dofree
= npgs
; /* Size of target page in MMU pages */
4863 if (!page_create_wait(dofree
, 0)) {
4864 if (grouplock
!= 0) {
4865 group_page_unlock(targ
);
4867 VM_STAT_ADD(vmm_vmstats
.ppr_relocnomem
[szc
]);
4872 * seg kmem pages require that the target and replacement
4873 * page be the same pagesize.
4875 flags
= (VN_ISKAS(targ
->p_vnode
)) ? PGR_SAMESZC
: 0;
4876 repl
= page_get_replacement_page(targ
, lgrp
, flags
);
4878 if (grouplock
!= 0) {
4879 group_page_unlock(targ
);
4881 page_create_putback(dofree
);
4882 VM_STAT_ADD(vmm_vmstats
.ppr_relocnomem
[szc
]);
4888 ASSERT(PAGE_LOCKED(repl
));
4892 #if defined(__sparc)
4894 * Let hat_page_relocate() complete the relocation if it's kernel page
4896 if (VN_ISKAS(targ
->p_vnode
)) {
4897 *replacement
= repl
;
4898 if (hat_page_relocate(target
, replacement
, nrelocp
) != 0) {
4899 if (grouplock
!= 0) {
4900 group_page_unlock(targ
);
4903 *replacement
= NULL
;
4904 page_free_replacement_page(repl
);
4905 page_create_putback(dofree
);
4907 VM_STAT_ADD(vmm_vmstats
.ppr_krelocfail
[szc
]);
4910 VM_STAT_ADD(vmm_vmstats
.ppr_relocok
[szc
]);
4921 for (i
= 0; i
< npgs
; i
++) {
4922 ASSERT(PAGE_EXCL(targ
));
4923 ASSERT(targ
->p_slckcnt
== 0);
4924 ASSERT(repl
->p_slckcnt
== 0);
4926 (void) hat_pageunload(targ
, HAT_FORCE_PGUNLOAD
);
4928 ASSERT(hat_page_getshare(targ
) == 0);
4929 ASSERT(!PP_ISFREE(targ
));
4930 ASSERT(targ
->p_pagenum
== (pfn
+ i
));
4931 ASSERT(repl_contig
== 0 ||
4932 repl
->p_pagenum
== (repl_pfn
+ i
));
4935 * Copy the page contents and attributes then
4936 * relocate the page in the page hash.
4938 if (ppcopy(targ
, repl
) == 0) {
4941 VM_STAT_ADD(vmm_vmstats
.ppr_copyfail
);
4942 if (grouplock
!= 0) {
4943 group_page_unlock(targ
);
4946 *replacement
= NULL
;
4947 page_free_replacement_page(repl
);
4948 page_create_putback(dofree
);
4954 if (repl_contig
!= 0) {
4957 repl
= repl
->p_next
;
4964 for (i
= 0; i
< npgs
; i
++) {
4965 ppattr
= hat_page_getattr(targ
, (P_MOD
| P_REF
| P_RO
));
4966 page_clr_all_props(repl
);
4967 page_set_props(repl
, ppattr
);
4968 page_relocate_hash(repl
, targ
);
4970 ASSERT(hat_page_getshare(targ
) == 0);
4971 ASSERT(hat_page_getshare(repl
) == 0);
4973 * Now clear the props on targ, after the
4974 * page_relocate_hash(), they no longer
4977 page_clr_all_props(targ
);
4978 ASSERT(targ
->p_next
== targ
);
4979 ASSERT(targ
->p_prev
== targ
);
4980 page_list_concat(&pl
, &targ
);
4983 if (repl_contig
!= 0) {
4986 repl
= repl
->p_next
;
4989 /* assert that we have come full circle with repl */
4990 ASSERT(repl_contig
== 1 || first_repl
== repl
);
4993 if (*replacement
== NULL
) {
4994 ASSERT(first_repl
== repl
);
4995 *replacement
= repl
;
4997 VM_STAT_ADD(vmm_vmstats
.ppr_relocok
[szc
]);
5002 * On success returns 0 and *nrelocp the number of PAGESIZE pages relocated.
5007 page_t
**replacement
,
5015 /* do_page_relocate returns 0 on success or errno value */
5016 ret
= do_page_relocate(target
, replacement
, grouplock
, nrelocp
, lgrp
);
5018 if (ret
!= 0 || freetarget
== 0) {
5021 if (*nrelocp
== 1) {
5022 ASSERT(*target
!= NULL
);
5023 page_free(*target
, 1);
5025 page_t
*tpp
= *target
;
5026 uint_t szc
= tpp
->p_szc
;
5027 pgcnt_t npgs
= page_get_pagecnt(szc
);
5031 ASSERT(PAGE_EXCL(tpp
));
5032 ASSERT(!hat_page_is_mapped(tpp
));
5033 ASSERT(tpp
->p_szc
== szc
);
5037 } while ((tpp
= tpp
->p_next
) != *target
);
5039 page_list_add_pages(*target
, 0);
5040 npgs
= page_get_pagecnt(szc
);
5041 page_create_putback(npgs
);
5047 * it is up to the caller to deal with pcf accounting.
5050 page_free_replacement_page(page_t
*pplist
)
5054 while (pplist
!= NULL
) {
5056 * pp_targ is a linked list.
5059 if (pp
->p_szc
== 0) {
5060 page_sub(&pplist
, pp
);
5061 page_clr_all_props(pp
);
5064 page_list_add(pp
, PG_FREE_LIST
| PG_LIST_TAIL
);
5066 VM_STAT_ADD(pagecnt
.pc_free_replacement_page
[0]);
5068 spgcnt_t curnpgs
= page_get_pagecnt(pp
->p_szc
);
5070 page_list_break(&pp
, &pplist
, curnpgs
);
5073 ASSERT(PAGE_EXCL(tpp
));
5074 ASSERT(!hat_page_is_mapped(tpp
));
5075 page_clr_all_props(tpp
);
5078 } while ((tpp
= tpp
->p_next
) != pp
);
5079 page_list_add_pages(pp
, 0);
5080 VM_STAT_ADD(pagecnt
.pc_free_replacement_page
[1]);
5086 * Relocate target to non-relocatable replacement page.
5089 page_relocate_cage(page_t
**target
, page_t
**replacement
)
5092 spgcnt_t pgcnt
, npgs
;
5097 ASSERT(PAGE_EXCL(tpp
));
5098 ASSERT(tpp
->p_szc
== 0);
5100 pgcnt
= btop(page_get_pagesize(tpp
->p_szc
));
5103 (void) page_create_wait(pgcnt
, PG_WAIT
| PG_NORELOC
);
5104 rpp
= page_get_replacement_page(tpp
, NULL
, PGR_NORELOC
);
5106 page_create_putback(pgcnt
);
5107 kcage_cageout_wakeup();
5109 } while (rpp
== NULL
);
5111 ASSERT(PP_ISNORELOC(rpp
));
5113 result
= page_relocate(&tpp
, &rpp
, 0, 1, &npgs
, NULL
);
5118 panic("page_relocate_cage: partial relocation");
5125 * Release the page lock on a page, place on cachelist
5126 * tail if no longer mapped. Caller can let us know if
5127 * the page is known to be clean.
5130 page_release(page_t
*pp
, int checkmod
)
5134 ASSERT(PAGE_LOCKED(pp
) && !PP_ISFREE(pp
) &&
5135 (pp
->p_vnode
!= NULL
));
5137 if (!hat_page_is_mapped(pp
) && !IS_SWAPVP(pp
->p_vnode
) &&
5138 ((PAGE_SHARED(pp
) && page_tryupgrade(pp
)) || PAGE_EXCL(pp
)) &&
5139 pp
->p_lckcnt
== 0 && pp
->p_cowcnt
== 0 &&
5140 !hat_page_is_mapped(pp
)) {
5143 * If page is modified, unlock it
5145 * (p_nrm & P_MOD) bit has the latest stuff because:
5146 * (1) We found that this page doesn't have any mappings
5147 * _after_ holding SE_EXCL and
5148 * (2) We didn't drop SE_EXCL lock after the check in (1)
5150 if (checkmod
&& hat_ismod(pp
)) {
5154 /*LINTED: constant in conditional context*/
5155 VN_DISPOSE(pp
, B_FREE
, 0, kcred
);
5156 status
= PGREL_CLEAN
;
5160 status
= PGREL_NOTREL
;
5166 * Given a constituent page, try to demote the large page on the freelist.
5168 * Returns nonzero if the page could be demoted successfully. Returns with
5169 * the constituent page still locked.
5172 page_try_demote_free_pages(page_t
*pp
)
5174 page_t
*rootpp
= pp
;
5175 pfn_t pfn
= page_pptonum(pp
);
5177 uint_t szc
= pp
->p_szc
;
5179 ASSERT(PP_ISFREE(pp
));
5180 ASSERT(PAGE_EXCL(pp
));
5183 * Adjust rootpp and lock it, if `pp' is not the base
5186 npgs
= page_get_pagecnt(pp
->p_szc
);
5191 if (!IS_P2ALIGNED(pfn
, npgs
)) {
5192 pfn
= P2ALIGN(pfn
, npgs
);
5193 rootpp
= page_numtopp_nolock(pfn
);
5196 if (pp
!= rootpp
&& !page_trylock(rootpp
, SE_EXCL
)) {
5200 if (rootpp
->p_szc
!= szc
) {
5202 page_unlock(rootpp
);
5206 page_demote_free_pages(rootpp
);
5209 page_unlock(rootpp
);
5211 ASSERT(PP_ISFREE(pp
));
5212 ASSERT(PAGE_EXCL(pp
));
5217 * Given a constituent page, try to demote the large page.
5219 * Returns nonzero if the page could be demoted successfully. Returns with
5220 * the constituent page still locked.
5223 page_try_demote_pages(page_t
*pp
)
5225 page_t
*tpp
, *rootpp
= pp
;
5226 pfn_t pfn
= page_pptonum(pp
);
5228 uint_t szc
= pp
->p_szc
;
5229 vnode_t
*vp
= pp
->p_vnode
;
5231 ASSERT(PAGE_EXCL(pp
));
5233 VM_STAT_ADD(pagecnt
.pc_try_demote_pages
[0]);
5235 if (pp
->p_szc
== 0) {
5236 VM_STAT_ADD(pagecnt
.pc_try_demote_pages
[1]);
5240 if (vp
!= NULL
&& !IS_SWAPFSVP(vp
) && !VN_ISKAS(vp
)) {
5241 VM_STAT_ADD(pagecnt
.pc_try_demote_pages
[2]);
5242 page_demote_vp_pages(pp
);
5243 ASSERT(pp
->p_szc
== 0);
5248 * Adjust rootpp if passed in is not the base
5251 npgs
= page_get_pagecnt(pp
->p_szc
);
5253 if (!IS_P2ALIGNED(pfn
, npgs
)) {
5254 pfn
= P2ALIGN(pfn
, npgs
);
5255 rootpp
= page_numtopp_nolock(pfn
);
5256 VM_STAT_ADD(pagecnt
.pc_try_demote_pages
[3]);
5257 ASSERT(rootpp
->p_vnode
!= NULL
);
5258 ASSERT(rootpp
->p_szc
== szc
);
5262 * We can't demote kernel pages since we can't hat_unload()
5265 if (VN_ISKAS(rootpp
->p_vnode
))
5269 * Attempt to lock all constituent pages except the page passed
5270 * in since it's already locked.
5272 for (tpp
= rootpp
, i
= 0; i
< npgs
; i
++, tpp
++) {
5273 ASSERT(!PP_ISFREE(tpp
));
5274 ASSERT(tpp
->p_vnode
!= NULL
);
5276 if (tpp
!= pp
&& !page_trylock(tpp
, SE_EXCL
))
5278 ASSERT(tpp
->p_szc
== rootpp
->p_szc
);
5279 ASSERT(page_pptonum(tpp
) == page_pptonum(rootpp
) + i
);
5283 * If we failed to lock them all then unlock what we have
5284 * locked so far and bail.
5293 VM_STAT_ADD(pagecnt
.pc_try_demote_pages
[4]);
5297 for (tpp
= rootpp
, i
= 0; i
< npgs
; i
++, tpp
++) {
5298 ASSERT(PAGE_EXCL(tpp
));
5299 ASSERT(tpp
->p_slckcnt
== 0);
5300 (void) hat_pageunload(tpp
, HAT_FORCE_PGUNLOAD
);
5305 * Unlock all pages except the page passed in.
5307 for (tpp
= rootpp
, i
= 0; i
< npgs
; i
++, tpp
++) {
5308 ASSERT(!hat_page_is_mapped(tpp
));
5313 VM_STAT_ADD(pagecnt
.pc_try_demote_pages
[5]);
5318 * Called by page_free() and page_destroy() to demote the page size code
5319 * (p_szc) to 0 (since we can't just put a single PAGESIZE page with non zero
5320 * p_szc on free list, neither can we just clear p_szc of a single page_t
5321 * within a large page since it will break other code that relies on p_szc
5322 * being the same for all page_t's of a large page). Anonymous pages should
5323 * never end up here because anon_map_getpages() cannot deal with p_szc
5324 * changes after a single constituent page is locked. While anonymous or
5325 * kernel large pages are demoted or freed the entire large page at a time
5326 * with all constituent pages locked EXCL for the file system pages we
5327 * have to be able to demote a large page (i.e. decrease all constituent pages
5328 * p_szc) with only just an EXCL lock on one of constituent pages. The reason
5329 * we can easily deal with anonymous page demotion the entire large page at a
5330 * time is that those operation originate at address space level and concern
5331 * the entire large page region with actual demotion only done when pages are
5332 * not shared with any other processes (therefore we can always get EXCL lock
5333 * on all anonymous constituent pages after clearing segment page
5334 * cache). However file system pages can be truncated or invalidated at a
5335 * PAGESIZE level from the file system side and end up in page_free() or
5336 * page_destroy() (we also allow only part of the large page to be SOFTLOCKed
5337 * and therefore pageout should be able to demote a large page by EXCL locking
5338 * any constituent page that is not under SOFTLOCK). In those cases we cannot
5339 * rely on being able to lock EXCL all constituent pages.
5341 * To prevent szc changes on file system pages one has to lock all constituent
5342 * pages at least SHARED (or call page_szc_lock()). The only subsystem that
5343 * doesn't rely on locking all constituent pages (or using page_szc_lock()) to
5344 * prevent szc changes is hat layer that uses its own page level mlist
5345 * locks. hat assumes that szc doesn't change after mlist lock for a page is
5346 * taken. Therefore we need to change szc under hat level locks if we only
5347 * have an EXCL lock on a single constituent page and hat still references any
5348 * of constituent pages. (Note we can't "ignore" hat layer by simply
5349 * hat_pageunload() all constituent pages without having EXCL locks on all of
5350 * constituent pages). We use hat_page_demote() call to safely demote szc of
5351 * all constituent pages under hat locks when we only have an EXCL lock on one
5352 * of constituent pages.
5354 * This routine calls page_szc_lock() before calling hat_page_demote() to
5355 * allow segvn in one special case not to lock all constituent pages SHARED
5356 * before calling hat_memload_array() that relies on p_szc not changing even
5357 * before hat level mlist lock is taken. In that case segvn uses
5358 * page_szc_lock() to prevent hat_page_demote() changing p_szc values.
5360 * Anonymous or kernel page demotion still has to lock all pages exclusively
5361 * and do hat_pageunload() on all constituent pages before demoting the page
5362 * therefore there's no need for anonymous or kernel page demotion to use
5363 * hat_page_demote() mechanism.
5365 * hat_page_demote() removes all large mappings that map pp and then decreases
5366 * p_szc starting from the last constituent page of the large page. By working
5367 * from the tail of a large page in pfn decreasing order allows one looking at
5368 * the root page to know that hat_page_demote() is done for root's szc area.
5369 * e.g. if a root page has szc 1 one knows it only has to lock all constituent
5370 * pages within szc 1 area to prevent szc changes because hat_page_demote()
5371 * that started on this page when it had szc > 1 is done for this szc 1 area.
5373 * We are guaranteed that all constituent pages of pp's large page belong to
5374 * the same vnode with the consecutive offsets increasing in the direction of
5375 * the pfn i.e. the identity of constituent pages can't change until their
5376 * p_szc is decreased. Therefore it's safe for hat_page_demote() to remove
5377 * large mappings to pp even though we don't lock any constituent page except
5378 * pp (i.e. we won't unload e.g. kernel locked page).
5381 page_demote_vp_pages(page_t
*pp
)
5385 ASSERT(PAGE_EXCL(pp
));
5386 ASSERT(!PP_ISFREE(pp
));
5387 ASSERT(pp
->p_vnode
!= NULL
);
5388 ASSERT(!IS_SWAPFSVP(pp
->p_vnode
));
5389 ASSERT(!PP_ISKAS(pp
));
5391 VM_STAT_ADD(pagecnt
.pc_demote_pages
[0]);
5393 mtx
= page_szc_lock(pp
);
5395 hat_page_demote(pp
);
5398 ASSERT(pp
->p_szc
== 0);
5402 * Mark any existing pages for migration in the given range
5405 page_mark_migrate(struct seg
*seg
, caddr_t addr
, size_t len
,
5406 struct anon_map
*amp
, ulong_t anon_index
, vnode_t
*vp
,
5407 u_offset_t vnoff
, int rflag
)
5422 anon_sync_obj_t cookie
;
5424 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5427 * Don't do anything if don't need to do lgroup optimizations
5430 if (!lgrp_optimizations())
5434 * Align address and length to (potentially large) page boundary
5436 segpgsz
= page_get_pagesize(seg
->s_szc
);
5437 addr
= (caddr_t
)P2ALIGN((uintptr_t)addr
, segpgsz
);
5439 len
= P2ROUNDUP(len
, segpgsz
);
5442 * Do one (large) page at a time
5445 while (va
< addr
+ len
) {
5447 * Lookup (root) page for vnode and offset corresponding to
5448 * this virtual address
5449 * Try anonmap first since there may be copy-on-write
5450 * pages, but initialize vnode pointer and offset using
5451 * vnode arguments just in case there isn't an amp.
5454 off
= vnoff
+ va
- seg
->s_base
;
5456 ANON_LOCK_ENTER(&
->a_rwlock
, RW_READER
);
5457 an_idx
= anon_index
+ seg_page(seg
, va
);
5458 anon_array_enter(amp
, an_idx
, &cookie
);
5459 ap
= anon_get_ptr(amp
->ahp
, an_idx
);
5461 swap_xlate(ap
, &curvp
, &off
);
5462 anon_array_exit(&cookie
);
5463 ANON_LOCK_EXIT(&
->a_rwlock
);
5468 pp
= page_lookup(curvp
, off
, SE_SHARED
);
5471 * If there isn't a page at this virtual address,
5480 * Figure out which lgroup this page is in for kstats
5482 pfn
= page_pptonum(pp
);
5483 from
= lgrp_pfn_to_lgrp(pfn
);
5486 * Get page size, and round up and skip to next page boundary
5487 * if unaligned address
5490 pgsz
= page_get_pagesize(pszc
);
5492 if (!IS_P2ALIGNED(va
, pgsz
) ||
5493 !IS_P2ALIGNED(pfn
, pages
) ||
5495 pgsz
= MIN(pgsz
, segpgsz
);
5497 pages
= btop(P2END((uintptr_t)va
, pgsz
) -
5499 va
= (caddr_t
)P2END((uintptr_t)va
, pgsz
);
5500 lgrp_stat_add(from
->lgrp_id
, LGRP_PMM_FAIL_PGS
, pages
);
5505 * Upgrade to exclusive lock on page
5507 if (!page_tryupgrade(pp
)) {
5510 lgrp_stat_add(from
->lgrp_id
, LGRP_PMM_FAIL_PGS
,
5519 * Lock constituent pages if this is large page
5523 * Lock all constituents except root page, since it
5524 * should be locked already.
5526 for (; nlocked
< pages
; nlocked
++) {
5527 if (!page_trylock(pp
, SE_EXCL
)) {
5530 if (PP_ISFREE(pp
) ||
5531 pp
->p_szc
!= pszc
) {
5533 * hat_page_demote() raced in with us.
5535 ASSERT(!IS_SWAPFSVP(curvp
));
5544 * If all constituent pages couldn't be locked,
5545 * unlock pages locked so far and skip to next page.
5547 if (nlocked
< pages
) {
5552 lgrp_stat_add(from
->lgrp_id
, LGRP_PMM_FAIL_PGS
,
5558 * hat_page_demote() can no longer happen
5559 * since last cons page had the right p_szc after
5560 * all cons pages were locked. all cons pages
5561 * should now have the same p_szc.
5565 * All constituent pages locked successfully, so mark
5566 * large page for migration and unload the mappings of
5567 * constituent pages, so a fault will occur on any part of the
5572 (void) hat_pageunload(pp0
, HAT_FORCE_PGUNLOAD
);
5573 ASSERT(hat_page_getshare(pp0
) == 0);
5576 lgrp_stat_add(from
->lgrp_id
, LGRP_PMM_PGS
, nlocked
);
5583 * Migrate any pages that have been marked for migration in the given range
5602 ASSERT(seg
->s_as
&& AS_LOCK_HELD(seg
->s_as
));
5604 while (npages
> 0) {
5607 pgsz
= page_get_pagesize(pszc
);
5608 page_cnt
= btop(pgsz
);
5611 * Check to see whether this page is marked for migration
5613 * Assume that root page of large page is marked for
5614 * migration and none of the other constituent pages
5615 * are marked. This really simplifies clearing the
5616 * migrate bit by not having to clear it from each
5619 * note we don't want to relocate an entire large page if
5620 * someone is only using one subpage.
5622 if (npages
< page_cnt
)
5626 * Is it marked for migration?
5628 if (!PP_ISMIGRATE(pp
))
5632 * Determine lgroups that page is being migrated between
5634 pfn
= page_pptonum(pp
);
5635 if (!IS_P2ALIGNED(pfn
, page_cnt
)) {
5638 from
= lgrp_pfn_to_lgrp(pfn
);
5639 to
= lgrp_mem_choose(seg
, addr
, pgsz
);
5642 * Need to get exclusive lock's to migrate
5644 for (i
= 0; i
< page_cnt
; i
++) {
5645 ASSERT(PAGE_LOCKED(ppa
[i
]));
5646 if (page_pptonum(ppa
[i
]) != pfn
+ i
||
5647 ppa
[i
]->p_szc
!= pszc
) {
5650 if (!page_tryupgrade(ppa
[i
])) {
5651 lgrp_stat_add(from
->lgrp_id
,
5652 LGRP_PM_FAIL_LOCK_PGS
,
5658 * Check to see whether we are trying to migrate
5659 * page to lgroup where it is allocated already.
5660 * If so, clear the migrate bit and skip to next
5663 if (i
== 0 && to
== from
) {
5664 PP_CLRMIGRATE(ppa
[0]);
5665 page_downgrade(ppa
[0]);
5671 * If all constituent pages couldn't be locked,
5672 * unlock pages locked so far and skip to next page.
5674 if (i
!= page_cnt
) {
5676 page_downgrade(ppa
[i
]);
5681 (void) page_create_wait(page_cnt
, PG_WAIT
);
5682 newpp
= page_get_replacement_page(pp
, to
, PGR_SAMESZC
);
5683 if (newpp
== NULL
) {
5684 page_create_putback(page_cnt
);
5685 for (i
= 0; i
< page_cnt
; i
++) {
5686 page_downgrade(ppa
[i
]);
5688 lgrp_stat_add(to
->lgrp_id
, LGRP_PM_FAIL_ALLOC_PGS
,
5692 ASSERT(newpp
->p_szc
== pszc
);
5694 * Clear migrate bit and relocate page
5697 if (page_relocate(&pp
, &newpp
, 0, 1, &page_cnt
, to
)) {
5698 panic("page_migrate: page_relocate failed");
5700 ASSERT(page_cnt
* PAGESIZE
== pgsz
);
5703 * Keep stats for number of pages migrated from and to
5706 lgrp_stat_add(from
->lgrp_id
, LGRP_PM_SRC_PGS
, page_cnt
);
5707 lgrp_stat_add(to
->lgrp_id
, LGRP_PM_DEST_PGS
, page_cnt
);
5709 * update the page_t array we were passed in and
5710 * unlink constituent pages of a large page.
5712 for (i
= 0; i
< page_cnt
; ++i
, ++pp
) {
5713 ASSERT(PAGE_EXCL(newpp
));
5714 ASSERT(newpp
->p_szc
== pszc
);
5717 page_sub(&newpp
, pp
);
5720 ASSERT(newpp
== NULL
);
5728 uint_t page_reclaim_maxcnt
= 60; /* max total iterations */
5729 uint_t page_reclaim_nofree_maxcnt
= 3; /* max iterations without progress */
5731 * Reclaim/reserve availrmem for npages.
5732 * If there is not enough memory start reaping seg, kmem caches.
5733 * Start pageout scanner (via page_needfree()).
5734 * Exit after ~ MAX_CNT s regardless of how much memory has been released.
5735 * Note: There is no guarantee that any availrmem will be freed as
5736 * this memory typically is locked (kernel heap) or reserved for swap.
5737 * Also due to memory fragmentation kmem allocator may not be able
5738 * to free any memory (single user allocated buffer will prevent
5739 * freeing slab or a page).
5742 page_reclaim_mem(pgcnt_t npages
, pgcnt_t epages
, int adjust
)
5748 pgcnt_t old_availrmem
= 0;
5750 mutex_enter(&freemem_lock
);
5751 while (availrmem
< tune
.t_minarmem
+ npages
+ epages
&&
5752 i
++ < page_reclaim_maxcnt
) {
5753 /* ensure we made some progress in the last few iterations */
5754 if (old_availrmem
< availrmem
) {
5755 old_availrmem
= availrmem
;
5757 } else if (i_nofree
++ >= page_reclaim_nofree_maxcnt
) {
5761 deficit
= tune
.t_minarmem
+ npages
+ epages
- availrmem
;
5762 mutex_exit(&freemem_lock
);
5763 page_needfree(deficit
);
5766 page_needfree(-(spgcnt_t
)deficit
);
5767 mutex_enter(&freemem_lock
);
5770 if (adjust
&& (availrmem
>= tune
.t_minarmem
+ npages
+ epages
)) {
5771 availrmem
-= npages
;
5775 mutex_exit(&freemem_lock
);
5781 * Search the memory segments to locate the desired page. Within a
5782 * segment, pages increase linearly with one page structure per
5783 * physical page frame (size PAGESIZE). The search begins
5784 * with the segment that was accessed last, to take advantage of locality.
5785 * If the hint misses, we start from the beginning of the sorted memseg list
5790 * Some data structures for pfn to pp lookup.
5792 ulong_t mhash_per_slot
;
5793 struct memseg
*memseg_hash
[N_MEM_SLOTS
];
5796 page_numtopp_nolock(pfn_t pfnum
)
5803 * We need to disable kernel preemption while referencing the
5804 * cpu_vm_data field in order to prevent us from being switched to
5805 * another cpu and trying to reference it after it has been freed.
5806 * This will keep us on cpu and prevent it from being removed while
5807 * we are still on it.
5809 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5810 * which is being resued by DR who will flush those references
5811 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5814 vc
= CPU
->cpu_vm_data
;
5817 MEMSEG_STAT_INCR(nsearch
);
5819 /* Try last winner first */
5820 if (((seg
= vc
->vc_pnum_memseg
) != NULL
) &&
5821 (pfnum
>= seg
->pages_base
) && (pfnum
< seg
->pages_end
)) {
5822 MEMSEG_STAT_INCR(nlastwon
);
5823 pp
= seg
->pages
+ (pfnum
- seg
->pages_base
);
5824 if (pp
->p_pagenum
== pfnum
) {
5826 return ((page_t
*)pp
);
5831 if (((seg
= memseg_hash
[MEMSEG_PFN_HASH(pfnum
)]) != NULL
) &&
5832 (pfnum
>= seg
->pages_base
) && (pfnum
< seg
->pages_end
)) {
5833 MEMSEG_STAT_INCR(nhashwon
);
5834 vc
->vc_pnum_memseg
= seg
;
5835 pp
= seg
->pages
+ (pfnum
- seg
->pages_base
);
5836 if (pp
->p_pagenum
== pfnum
) {
5838 return ((page_t
*)pp
);
5842 /* Else Brute force */
5843 for (seg
= memsegs
; seg
!= NULL
; seg
= seg
->next
) {
5844 if (pfnum
>= seg
->pages_base
&& pfnum
< seg
->pages_end
) {
5845 vc
->vc_pnum_memseg
= seg
;
5846 pp
= seg
->pages
+ (pfnum
- seg
->pages_base
);
5847 if (pp
->p_pagenum
== pfnum
) {
5849 return ((page_t
*)pp
);
5853 vc
->vc_pnum_memseg
= NULL
;
5855 MEMSEG_STAT_INCR(nnotfound
);
5856 return ((page_t
*)NULL
);
5861 page_numtomemseg_nolock(pfn_t pfnum
)
5867 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5868 * which is being resued by DR who will flush those references
5869 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5873 if (((seg
= memseg_hash
[MEMSEG_PFN_HASH(pfnum
)]) != NULL
) &&
5874 (pfnum
>= seg
->pages_base
) && (pfnum
< seg
->pages_end
)) {
5875 pp
= seg
->pages
+ (pfnum
- seg
->pages_base
);
5876 if (pp
->p_pagenum
== pfnum
) {
5882 /* Else Brute force */
5883 for (seg
= memsegs
; seg
!= NULL
; seg
= seg
->next
) {
5884 if (pfnum
>= seg
->pages_base
&& pfnum
< seg
->pages_end
) {
5885 pp
= seg
->pages
+ (pfnum
- seg
->pages_base
);
5886 if (pp
->p_pagenum
== pfnum
) {
5893 return ((struct memseg
*)NULL
);
5897 * Given a page and a count return the page struct that is
5898 * n structs away from the current one in the global page
5901 * This function wraps to the first page upon
5902 * reaching the end of the memseg list.
5905 page_nextn(page_t
*pp
, ulong_t n
)
5912 * We need to disable kernel preemption while referencing the
5913 * cpu_vm_data field in order to prevent us from being switched to
5914 * another cpu and trying to reference it after it has been freed.
5915 * This will keep us on cpu and prevent it from being removed while
5916 * we are still on it.
5918 * We may be caching a memseg in vc_pnum_memseg/vc_pnext_memseg
5919 * which is being resued by DR who will flush those references
5920 * before modifying the reused memseg. See memseg_cpu_vm_flush().
5923 vc
= (vm_cpu_data_t
*)CPU
->cpu_vm_data
;
5927 if (((seg
= vc
->vc_pnext_memseg
) == NULL
) ||
5928 (seg
->pages_base
== seg
->pages_end
) ||
5929 !(pp
>= seg
->pages
&& pp
< seg
->epages
)) {
5931 for (seg
= memsegs
; seg
; seg
= seg
->next
) {
5932 if (pp
>= seg
->pages
&& pp
< seg
->epages
)
5937 /* Memory delete got in, return something valid. */
5944 /* check for wraparound - possible if n is large */
5945 while ((ppn
= (pp
+ n
)) >= seg
->epages
|| ppn
< pp
) {
5946 n
-= seg
->epages
- pp
;
5952 vc
->vc_pnext_memseg
= seg
;
5958 * Initialize for a loop using page_next_scan_large().
5961 page_next_scan_init(void **cookie
)
5963 ASSERT(cookie
!= NULL
);
5964 *cookie
= (void *)memsegs
;
5965 return ((page_t
*)memsegs
->pages
);
5969 * Return the next page in a scan of page_t's, assuming we want
5970 * to skip over sub-pages within larger page sizes.
5972 * The cookie is used to keep track of the current memseg.
5975 page_next_scan_large(
5980 struct memseg
*seg
= (struct memseg
*)*cookie
;
5987 * get the count of page_t's to skip based on the page size
5990 if (pp
->p_szc
== 0) {
5993 pfn
= page_pptonum(pp
);
5994 cnt
= page_get_pagecnt(pp
->p_szc
);
5995 cnt
-= pfn
& (cnt
- 1);
6001 * Catch if we went past the end of the current memory segment. If so,
6002 * just move to the next segment with pages.
6004 if (new_pp
>= seg
->epages
|| seg
->pages_base
== seg
->pages_end
) {
6009 } while (seg
->pages_base
== seg
->pages_end
);
6010 new_pp
= seg
->pages
;
6011 *cookie
= (void *)seg
;
6019 * Returns next page in list. Note: this function wraps
6020 * to the first page in the list upon reaching the end
6021 * of the list. Callers should be aware of this fact.
6024 /* We should change this be a #define */
6027 page_next(page_t
*pp
)
6029 return (page_nextn(pp
, 1));
6035 return ((page_t
*)memsegs
->pages
);
6040 * This routine is called at boot with the initial memory configuration
6041 * and when memory is added or removed.
6048 struct memseg
*pseg
;
6052 * Clear memseg_hash array.
6053 * Since memory add/delete is designed to operate concurrently
6054 * with normal operation, the hash rebuild must be able to run
6055 * concurrently with page_numtopp_nolock(). To support this
6056 * functionality, assignments to memseg_hash array members must
6057 * be done atomically.
6059 * NOTE: bzero() does not currently guarantee this for kernel
6060 * threads, and cannot be used here.
6062 for (i
= 0; i
< N_MEM_SLOTS
; i
++)
6063 memseg_hash
[i
] = NULL
;
6065 hat_kpm_mseghash_clear(N_MEM_SLOTS
);
6068 * Physmax is the last valid pfn.
6070 mhash_per_slot
= (physmax
+ 1) >> MEM_HASH_SHIFT
;
6071 for (pseg
= memsegs
; pseg
!= NULL
; pseg
= pseg
->next
) {
6072 index
= MEMSEG_PFN_HASH(pseg
->pages_base
);
6073 cur
= pseg
->pages_base
;
6075 if (index
>= N_MEM_SLOTS
)
6076 index
= MEMSEG_PFN_HASH(cur
);
6078 if (memseg_hash
[index
] == NULL
||
6079 memseg_hash
[index
]->pages_base
> pseg
->pages_base
) {
6080 memseg_hash
[index
] = pseg
;
6081 hat_kpm_mseghash_update(index
, pseg
);
6083 cur
+= mhash_per_slot
;
6085 } while (cur
< pseg
->pages_end
);
6090 * Return the pagenum for the pp
6093 page_pptonum(page_t
*pp
)
6095 return (pp
->p_pagenum
);
6099 * interface to the referenced and modified etc bits
6100 * in the PSM part of the page struct
6101 * when no locking is desired.
6104 page_set_props(page_t
*pp
, uint_t flags
)
6106 ASSERT((flags
& ~(P_MOD
| P_REF
| P_RO
)) == 0);
6107 pp
->p_nrm
|= (uchar_t
)flags
;
6111 page_clr_all_props(page_t
*pp
)
6117 * Clear p_lckcnt and p_cowcnt, adjusting freemem if required.
6120 page_clear_lck_cow(page_t
*pp
, int adjust
)
6124 ASSERT(PAGE_EXCL(pp
));
6127 * The page_struct_lock need not be acquired here since
6128 * we require the caller hold the page exclusively locked.
6136 f_amount
+= pp
->p_cowcnt
;
6140 if (adjust
&& f_amount
) {
6141 mutex_enter(&freemem_lock
);
6142 availrmem
+= f_amount
;
6143 mutex_exit(&freemem_lock
);
6150 * The following functions is called from free_vp_pages()
6151 * for an inexact estimate of a newly free'd page...
6154 page_share_cnt(page_t
*pp
)
6156 return (hat_page_getshare(pp
));
6160 page_isshared(page_t
*pp
)
6162 return (hat_page_checkshare(pp
, 1));
6166 page_isfree(page_t
*pp
)
6168 return (PP_ISFREE(pp
));
6172 page_isref(page_t
*pp
)
6174 return (hat_page_getattr(pp
, P_REF
));
6178 page_ismod(page_t
*pp
)
6180 return (hat_page_getattr(pp
, P_MOD
));
6184 * The following code all currently relates to the page capture logic:
6186 * This logic is used for cases where there is a desire to claim a certain
6187 * physical page in the system for the caller. As it may not be possible
6188 * to capture the page immediately, the p_toxic bits are used in the page
6189 * structure to indicate that someone wants to capture this page. When the
6190 * page gets unlocked, the toxic flag will be noted and an attempt to capture
6191 * the page will be made. If it is successful, the original callers callback
6192 * will be called with the page to do with it what they please.
6194 * There is also an async thread which wakes up to attempt to capture
6195 * pages occasionally which have the capture bit set. All of the pages which
6196 * need to be captured asynchronously have been inserted into the
6197 * page_capture_hash and thus this thread walks that hash list. Items in the
6198 * hash have an expiration time so this thread handles that as well by removing
6199 * the item from the hash if it has expired.
6201 * Some important things to note are:
6202 * - if the PR_CAPTURE bit is set on a page, then the page is in the
6203 * page_capture_hash. The page_capture_hash_head.pchh_mutex is needed
6204 * to set and clear this bit, and while the lock is held is the only time
6205 * you can add or remove an entry from the hash.
6206 * - the PR_CAPTURE bit can only be set and cleared while holding the
6207 * page_capture_hash_head.pchh_mutex
6208 * - the t_flag field of the thread struct is used with the T_CAPTURING
6209 * flag to prevent recursion while dealing with large pages.
6210 * - pages which need to be retired never expire on the page_capture_hash.
6213 static void page_capture_thread(void);
6214 static kthread_t
*pc_thread_id
;
6216 static kmutex_t pc_thread_mutex
;
6217 static clock_t pc_thread_shortwait
;
6218 static clock_t pc_thread_longwait
;
6219 static int pc_thread_retry
;
6221 struct page_capture_callback pc_cb
[PC_NUM_CALLBACKS
];
6223 /* Note that this is a circular linked list */
6224 typedef struct page_capture_hash_bucket
{
6229 clock_t expires
; /* lbolt at which this request expires. */
6230 void *datap
; /* Cached data passed in for callback */
6231 struct page_capture_hash_bucket
*next
;
6232 struct page_capture_hash_bucket
*prev
;
6233 } page_capture_hash_bucket_t
;
6235 #define PC_PRI_HI 0 /* capture now */
6236 #define PC_PRI_LO 1 /* capture later */
6237 #define PC_NUM_PRI 2
6239 #define PAGE_CAPTURE_PRIO(pp) (PP_ISRAF(pp) ? PC_PRI_LO : PC_PRI_HI)
6243 * Each hash bucket will have it's own mutex and two lists which are:
6244 * active (0): represents requests which have not been processed by
6245 * the page_capture async thread yet.
6246 * walked (1): represents requests which have been processed by the
6247 * page_capture async thread within it's given walk of this bucket.
6249 * These are all needed so that we can synchronize all async page_capture
6250 * events. When the async thread moves to a new bucket, it will append the
6251 * walked list to the active list and walk each item one at a time, moving it
6252 * from the active list to the walked list. Thus if there is an async request
6253 * outstanding for a given page, it will always be in one of the two lists.
6254 * New requests will always be added to the active list.
6255 * If we were not able to capture a page before the request expired, we'd free
6256 * up the request structure which would indicate to page_capture that there is
6257 * no longer a need for the given page, and clear the PR_CAPTURE flag if
6260 typedef struct page_capture_hash_head
{
6261 kmutex_t pchh_mutex
;
6262 uint_t num_pages
[PC_NUM_PRI
];
6263 page_capture_hash_bucket_t lists
[2]; /* sentinel nodes */
6264 } page_capture_hash_head_t
;
6267 #define NUM_PAGE_CAPTURE_BUCKETS 4
6269 #define NUM_PAGE_CAPTURE_BUCKETS 64
6272 page_capture_hash_head_t page_capture_hash
[NUM_PAGE_CAPTURE_BUCKETS
];
6274 /* for now use a very simple hash based upon the size of a page struct */
6275 #define PAGE_CAPTURE_HASH(pp) \
6276 ((int)(((uintptr_t)pp >> 7) & (NUM_PAGE_CAPTURE_BUCKETS - 1)))
6278 extern pgcnt_t swapfs_minfree
;
6280 int page_trycapture(page_t
*pp
, uint_t szc
, uint_t flags
, void *datap
);
6283 * a callback function is required for page capture requests.
6286 page_capture_register_callback(uint_t index
, clock_t duration
,
6287 int (*cb_func
)(page_t
*, void *, uint_t
))
6289 ASSERT(pc_cb
[index
].cb_active
== 0);
6290 ASSERT(cb_func
!= NULL
);
6291 rw_enter(&pc_cb
[index
].cb_rwlock
, RW_WRITER
);
6292 pc_cb
[index
].duration
= duration
;
6293 pc_cb
[index
].cb_func
= cb_func
;
6294 pc_cb
[index
].cb_active
= 1;
6295 rw_exit(&pc_cb
[index
].cb_rwlock
);
6299 page_capture_unregister_callback(uint_t index
)
6302 struct page_capture_hash_bucket
*bp1
;
6303 struct page_capture_hash_bucket
*bp2
;
6304 struct page_capture_hash_bucket
*head
= NULL
;
6305 uint_t flags
= (1 << index
);
6307 rw_enter(&pc_cb
[index
].cb_rwlock
, RW_WRITER
);
6308 ASSERT(pc_cb
[index
].cb_active
== 1);
6309 pc_cb
[index
].duration
= 0; /* Paranoia */
6310 pc_cb
[index
].cb_func
= NULL
; /* Paranoia */
6311 pc_cb
[index
].cb_active
= 0;
6312 rw_exit(&pc_cb
[index
].cb_rwlock
);
6315 * Just move all the entries to a private list which we can walk
6316 * through without the need to hold any locks.
6317 * No more requests can get added to the hash lists for this consumer
6318 * as the cb_active field for the callback has been cleared.
6320 for (i
= 0; i
< NUM_PAGE_CAPTURE_BUCKETS
; i
++) {
6321 mutex_enter(&page_capture_hash
[i
].pchh_mutex
);
6322 for (j
= 0; j
< 2; j
++) {
6323 bp1
= page_capture_hash
[i
].lists
[j
].next
;
6324 /* walk through all but first (sentinel) element */
6325 while (bp1
!= &page_capture_hash
[i
].lists
[j
]) {
6327 if (bp2
->flags
& flags
) {
6329 bp1
->prev
= bp2
->prev
;
6330 bp2
->prev
->next
= bp1
;
6334 * Clear the PR_CAPTURE bit as we
6335 * hold appropriate locks here.
6337 page_clrtoxic(head
->pp
, PR_CAPTURE
);
6338 page_capture_hash
[i
].
6339 num_pages
[bp2
->pri
]--;
6345 mutex_exit(&page_capture_hash
[i
].pchh_mutex
);
6348 while (head
!= NULL
) {
6351 kmem_free(bp1
, sizeof (*bp1
));
6357 * Find pp in the active list and move it to the walked list if it
6359 * Note that most often pp should be at the front of the active list
6360 * as it is currently used and thus there is no other sort of optimization
6361 * being done here as this is a linked list data structure.
6362 * Returns 1 on successful move or 0 if page could not be found.
6365 page_capture_move_to_walked(page_t
*pp
)
6367 page_capture_hash_bucket_t
*bp
;
6370 index
= PAGE_CAPTURE_HASH(pp
);
6372 mutex_enter(&page_capture_hash
[index
].pchh_mutex
);
6373 bp
= page_capture_hash
[index
].lists
[0].next
;
6374 while (bp
!= &page_capture_hash
[index
].lists
[0]) {
6376 /* Remove from old list */
6377 bp
->next
->prev
= bp
->prev
;
6378 bp
->prev
->next
= bp
->next
;
6380 /* Add to new list */
6381 bp
->next
= page_capture_hash
[index
].lists
[1].next
;
6382 bp
->prev
= &page_capture_hash
[index
].lists
[1];
6383 page_capture_hash
[index
].lists
[1].next
= bp
;
6384 bp
->next
->prev
= bp
;
6387 * There is a small probability of page on a free
6388 * list being retired while being allocated
6389 * and before P_RAF is set on it. The page may
6390 * end up marked as high priority request instead
6391 * of low priority request.
6392 * If P_RAF page is not marked as low priority request
6393 * change it to low priority request.
6395 page_capture_hash
[index
].num_pages
[bp
->pri
]--;
6396 bp
->pri
= PAGE_CAPTURE_PRIO(pp
);
6397 page_capture_hash
[index
].num_pages
[bp
->pri
]++;
6398 mutex_exit(&page_capture_hash
[index
].pchh_mutex
);
6403 mutex_exit(&page_capture_hash
[index
].pchh_mutex
);
6408 * Add a new entry to the page capture hash. The only case where a new
6409 * entry is not added is when the page capture consumer is no longer registered.
6410 * In this case, we'll silently not add the page to the hash. We know that
6411 * page retire will always be registered for the case where we are currently
6412 * unretiring a page and thus there are no conflicts.
6415 page_capture_add_hash(page_t
*pp
, uint_t szc
, uint_t flags
, void *datap
)
6417 page_capture_hash_bucket_t
*bp1
;
6418 page_capture_hash_bucket_t
*bp2
;
6424 page_capture_hash_bucket_t
*tp1
;
6428 ASSERT(!(flags
& CAPTURE_ASYNC
));
6430 bp1
= kmem_alloc(sizeof (struct page_capture_hash_bucket
), KM_SLEEP
);
6437 for (cb_index
= 0; cb_index
< PC_NUM_CALLBACKS
; cb_index
++) {
6438 if ((flags
>> cb_index
) & 1) {
6443 ASSERT(cb_index
!= PC_NUM_CALLBACKS
);
6445 rw_enter(&pc_cb
[cb_index
].cb_rwlock
, RW_READER
);
6446 if (pc_cb
[cb_index
].cb_active
) {
6447 if (pc_cb
[cb_index
].duration
== -1) {
6448 bp1
->expires
= (clock_t)-1;
6450 bp1
->expires
= ddi_get_lbolt() +
6451 pc_cb
[cb_index
].duration
;
6454 /* There's no callback registered so don't add to the hash */
6455 rw_exit(&pc_cb
[cb_index
].cb_rwlock
);
6456 kmem_free(bp1
, sizeof (*bp1
));
6460 index
= PAGE_CAPTURE_HASH(pp
);
6463 * Only allow capture flag to be modified under this mutex.
6464 * Prevents multiple entries for same page getting added.
6466 mutex_enter(&page_capture_hash
[index
].pchh_mutex
);
6469 * if not already on the hash, set capture bit and add to the hash
6471 if (!(pp
->p_toxic
& PR_CAPTURE
)) {
6473 /* Check for duplicate entries */
6474 for (l
= 0; l
< 2; l
++) {
6475 tp1
= page_capture_hash
[index
].lists
[l
].next
;
6476 while (tp1
!= &page_capture_hash
[index
].lists
[l
]) {
6477 if (tp1
->pp
== pp
) {
6478 panic("page pp 0x%p already on hash "
6480 (void *)pp
, (void *)tp1
);
6487 page_settoxic(pp
, PR_CAPTURE
);
6488 pri
= PAGE_CAPTURE_PRIO(pp
);
6490 bp1
->next
= page_capture_hash
[index
].lists
[0].next
;
6491 bp1
->prev
= &page_capture_hash
[index
].lists
[0];
6492 bp1
->next
->prev
= bp1
;
6493 page_capture_hash
[index
].lists
[0].next
= bp1
;
6494 page_capture_hash
[index
].num_pages
[pri
]++;
6495 if (flags
& CAPTURE_RETIRE
) {
6496 page_retire_incr_pend_count(datap
);
6498 mutex_exit(&page_capture_hash
[index
].pchh_mutex
);
6499 rw_exit(&pc_cb
[cb_index
].cb_rwlock
);
6505 * A page retire request will replace any other request.
6506 * A second physmem request which is for a different process than
6507 * the currently registered one will be dropped as there is
6508 * no way to hold the private data for both calls.
6509 * In the future, once there are more callers, this will have to
6510 * be worked out better as there needs to be private storage for
6511 * at least each type of caller (maybe have datap be an array of
6512 * *void's so that we can index based upon callers index).
6515 /* walk hash list to update expire time */
6516 for (i
= 0; i
< 2; i
++) {
6517 bp2
= page_capture_hash
[index
].lists
[i
].next
;
6518 while (bp2
!= &page_capture_hash
[index
].lists
[i
]) {
6519 if (bp2
->pp
== pp
) {
6520 if (flags
& CAPTURE_RETIRE
) {
6521 if (!(bp2
->flags
& CAPTURE_RETIRE
)) {
6522 page_retire_incr_pend_count(
6525 bp2
->expires
= bp1
->expires
;
6529 ASSERT(flags
& CAPTURE_PHYSMEM
);
6530 if (!(bp2
->flags
& CAPTURE_RETIRE
) &&
6531 (datap
== bp2
->datap
)) {
6532 bp2
->expires
= bp1
->expires
;
6535 mutex_exit(&page_capture_hash
[index
].
6537 rw_exit(&pc_cb
[cb_index
].cb_rwlock
);
6538 kmem_free(bp1
, sizeof (*bp1
));
6546 * the PR_CAPTURE flag is protected by the page_capture_hash mutexes
6547 * and thus it either has to be set or not set and can't change
6548 * while holding the mutex above.
6550 panic("page_capture_add_hash, PR_CAPTURE flag set on pp %p\n",
6555 * We have a page in our hands, lets try and make it ours by turning
6556 * it into a clean page like it had just come off the freelists.
6558 * Returns 0 on success, with the page still EXCL locked.
6559 * On failure, the page will be unlocked, and returns EAGAIN
6562 page_capture_clean_page(page_t
*pp
)
6565 int skip_unlock
= 0;
6571 ASSERT(PAGE_EXCL(pp
));
6572 ASSERT(!PP_RETIRED(pp
));
6573 ASSERT(curthread
->t_flag
& T_CAPTURING
);
6575 if (PP_ISFREE(pp
)) {
6576 if (!page_reclaim(pp
, NULL
)) {
6581 ASSERT(pp
->p_szc
== 0);
6582 if (pp
->p_vnode
!= NULL
) {
6584 * Since this page came from the
6585 * cachelist, we must destroy the
6586 * old vnode association.
6588 page_hashout(pp
, NULL
);
6594 * If we know page_relocate will fail, skip it
6595 * It could still fail due to a UE on another page but we
6596 * can't do anything about that.
6598 if (pp
->p_toxic
& PR_UE
) {
6603 * It's possible that pages can not have a vnode as fsflush comes
6604 * through and cleans up these pages. It's ugly but that's how it is.
6606 if (pp
->p_vnode
== NULL
) {
6611 * Page was not free, so lets try to relocate it.
6612 * page_relocate only works with root pages, so if this is not a root
6613 * page, we need to demote it to try and relocate it.
6614 * Unfortunately this is the best we can do right now.
6617 if ((pp
->p_szc
> 0) && (pp
!= PP_PAGEROOT(pp
))) {
6618 if (page_try_demote_pages(pp
) == 0) {
6623 ret
= page_relocate(&pp
, &newpp
, 1, 0, &count
, NULL
);
6626 /* unlock the new page(s) */
6627 while (count
-- > 0) {
6628 ASSERT(newpp
!= NULL
);
6630 page_sub(&newpp
, npp
);
6633 ASSERT(newpp
== NULL
);
6635 * Check to see if the page we have is too large.
6636 * If so, demote it freeing up the extra pages.
6638 if (pp
->p_szc
> 0) {
6639 /* For now demote extra pages to szc == 0 */
6640 extra
= page_get_pagecnt(pp
->p_szc
) - 1;
6648 /* Make sure to set our page to szc 0 as well */
6649 ASSERT(pp
->p_next
== pp
&& pp
->p_prev
== pp
);
6653 } else if (ret
== EIO
) {
6658 * Need to reset return type as we failed to relocate the page
6659 * but that does not mean that some of the next steps will not
6667 if (pp
->p_szc
> 0) {
6668 if (page_try_demote_pages(pp
) == 0) {
6674 ASSERT(pp
->p_szc
== 0);
6676 if (hat_ismod(pp
)) {
6684 if (pp
->p_lckcnt
|| pp
->p_cowcnt
) {
6689 (void) hat_pageunload(pp
, HAT_FORCE_PGUNLOAD
);
6690 ASSERT(!hat_page_is_mapped(pp
));
6692 if (hat_ismod(pp
)) {
6694 * This is a semi-odd case as the page is now modified but not
6695 * mapped as we just unloaded the mappings above.
6700 if (pp
->p_vnode
!= NULL
) {
6701 page_hashout(pp
, NULL
);
6705 * At this point, the page should be in a clean state and
6706 * we can do whatever we want with it.
6715 ASSERT(pp
->p_szc
== 0);
6716 ASSERT(PAGE_EXCL(pp
));
6725 * Various callers of page_trycapture() can have different restrictions upon
6726 * what memory they have access to.
6727 * Returns 0 on success, with the following error codes on failure:
6728 * EPERM - The requested page is long term locked, and thus repeated
6729 * requests to capture this page will likely fail.
6730 * ENOMEM - There was not enough free memory in the system to safely
6731 * map the requested page.
6732 * ENOENT - The requested page was inside the kernel cage, and the
6733 * PHYSMEM_CAGE flag was not set.
6736 page_capture_pre_checks(page_t
*pp
, uint_t flags
)
6740 #if defined(__sparc)
6741 if (pp
->p_vnode
== &promvp
) {
6745 if (PP_ISNORELOC(pp
) && !(flags
& CAPTURE_GET_CAGE
) &&
6746 (flags
& CAPTURE_PHYSMEM
)) {
6750 if (PP_ISNORELOCKERNEL(pp
)) {
6757 #endif /* __sparc */
6759 /* only physmem currently has the restrictions checked below */
6760 if (!(flags
& CAPTURE_PHYSMEM
)) {
6764 if (availrmem
< swapfs_minfree
) {
6766 * We won't try to capture this page as we are
6767 * running low on memory.
6775 * Once we have a page in our mits, go ahead and complete the capture
6777 * Returns 1 on failure where page is no longer needed
6778 * Returns 0 on success
6779 * Returns -1 if there was a transient failure.
6780 * Failure cases must release the SE_EXCL lock on pp (usually via page_free).
6783 page_capture_take_action(page_t
*pp
, uint_t flags
, void *datap
)
6787 page_capture_hash_bucket_t
*bp1
;
6788 page_capture_hash_bucket_t
*bp2
;
6793 ASSERT(PAGE_EXCL(pp
));
6794 ASSERT(curthread
->t_flag
& T_CAPTURING
);
6796 for (cb_index
= 0; cb_index
< PC_NUM_CALLBACKS
; cb_index
++) {
6797 if ((flags
>> cb_index
) & 1) {
6801 ASSERT(cb_index
< PC_NUM_CALLBACKS
);
6804 * Remove the entry from the page_capture hash, but don't free it yet
6805 * as we may need to put it back.
6806 * Since we own the page at this point in time, we should find it
6807 * in the hash if this is an ASYNC call. If we don't it's likely
6808 * that the page_capture_async() thread decided that this request
6809 * had expired, in which case we just continue on.
6811 if (flags
& CAPTURE_ASYNC
) {
6813 index
= PAGE_CAPTURE_HASH(pp
);
6815 mutex_enter(&page_capture_hash
[index
].pchh_mutex
);
6816 for (i
= 0; i
< 2 && !found
; i
++) {
6817 bp1
= page_capture_hash
[index
].lists
[i
].next
;
6818 while (bp1
!= &page_capture_hash
[index
].lists
[i
]) {
6819 if (bp1
->pp
== pp
) {
6820 bp1
->next
->prev
= bp1
->prev
;
6821 bp1
->prev
->next
= bp1
->next
;
6822 page_capture_hash
[index
].
6823 num_pages
[bp1
->pri
]--;
6824 page_clrtoxic(pp
, PR_CAPTURE
);
6831 mutex_exit(&page_capture_hash
[index
].pchh_mutex
);
6834 /* Synchronize with the unregister func. */
6835 rw_enter(&pc_cb
[cb_index
].cb_rwlock
, RW_READER
);
6836 if (!pc_cb
[cb_index
].cb_active
) {
6838 rw_exit(&pc_cb
[cb_index
].cb_rwlock
);
6840 kmem_free(bp1
, sizeof (*bp1
));
6846 * We need to remove the entry from the page capture hash and turn off
6847 * the PR_CAPTURE bit before calling the callback. We'll need to cache
6848 * the entry here, and then based upon the return value, cleanup
6849 * appropriately or re-add it to the hash, making sure that someone else
6850 * hasn't already done so.
6851 * It should be rare for the callback to fail and thus it's ok for
6852 * the failure path to be a bit complicated as the success path is
6853 * cleaner and the locking rules are easier to follow.
6856 ret
= pc_cb
[cb_index
].cb_func(pp
, datap
, flags
);
6858 rw_exit(&pc_cb
[cb_index
].cb_rwlock
);
6861 * If this was an ASYNC request, we need to cleanup the hash if the
6862 * callback was successful or if the request was no longer valid.
6863 * For non-ASYNC requests, we return failure to map and the caller
6864 * will take care of adding the request to the hash.
6865 * Note also that the callback itself is responsible for the page
6866 * at this point in time in terms of locking ... The most common
6867 * case for the failure path should just be a page_free.
6871 if (bp1
->flags
& CAPTURE_RETIRE
) {
6872 page_retire_decr_pend_count(datap
);
6874 kmem_free(bp1
, sizeof (*bp1
));
6882 ASSERT(flags
& CAPTURE_ASYNC
);
6885 * Check for expiration time first as we can just free it up if it's
6888 if (ddi_get_lbolt() > bp1
->expires
&& bp1
->expires
!= -1) {
6889 kmem_free(bp1
, sizeof (*bp1
));
6894 * The callback failed and there used to be an entry in the hash for
6895 * this page, so we need to add it back to the hash.
6897 mutex_enter(&page_capture_hash
[index
].pchh_mutex
);
6898 if (!(pp
->p_toxic
& PR_CAPTURE
)) {
6899 /* just add bp1 back to head of walked list */
6900 page_settoxic(pp
, PR_CAPTURE
);
6901 bp1
->next
= page_capture_hash
[index
].lists
[1].next
;
6902 bp1
->prev
= &page_capture_hash
[index
].lists
[1];
6903 bp1
->next
->prev
= bp1
;
6904 bp1
->pri
= PAGE_CAPTURE_PRIO(pp
);
6905 page_capture_hash
[index
].lists
[1].next
= bp1
;
6906 page_capture_hash
[index
].num_pages
[bp1
->pri
]++;
6907 mutex_exit(&page_capture_hash
[index
].pchh_mutex
);
6912 * Otherwise there was a new capture request added to list
6913 * Need to make sure that our original data is represented if
6916 for (i
= 0; i
< 2; i
++) {
6917 bp2
= page_capture_hash
[index
].lists
[i
].next
;
6918 while (bp2
!= &page_capture_hash
[index
].lists
[i
]) {
6919 if (bp2
->pp
== pp
) {
6920 if (bp1
->flags
& CAPTURE_RETIRE
) {
6921 if (!(bp2
->flags
& CAPTURE_RETIRE
)) {
6922 bp2
->szc
= bp1
->szc
;
6923 bp2
->flags
= bp1
->flags
;
6924 bp2
->expires
= bp1
->expires
;
6925 bp2
->datap
= bp1
->datap
;
6928 ASSERT(bp1
->flags
& CAPTURE_PHYSMEM
);
6929 if (!(bp2
->flags
& CAPTURE_RETIRE
)) {
6930 bp2
->szc
= bp1
->szc
;
6931 bp2
->flags
= bp1
->flags
;
6932 bp2
->expires
= bp1
->expires
;
6933 bp2
->datap
= bp1
->datap
;
6936 page_capture_hash
[index
].num_pages
[bp2
->pri
]--;
6937 bp2
->pri
= PAGE_CAPTURE_PRIO(pp
);
6938 page_capture_hash
[index
].num_pages
[bp2
->pri
]++;
6939 mutex_exit(&page_capture_hash
[index
].
6941 kmem_free(bp1
, sizeof (*bp1
));
6947 panic("PR_CAPTURE set but not on hash for pp 0x%p\n", (void *)pp
);
6952 * Try to capture the given page for the caller specified in the flags
6953 * parameter. The page will either be captured and handed over to the
6954 * appropriate callback, or will be queued up in the page capture hash
6955 * to be captured asynchronously.
6956 * If the current request is due to an async capture, the page must be
6957 * exclusively locked before calling this function.
6958 * Currently szc must be 0 but in the future this should be expandable to
6960 * Returns 0 on success, with the following error codes on failure:
6961 * EPERM - The requested page is long term locked, and thus repeated
6962 * requests to capture this page will likely fail.
6963 * ENOMEM - There was not enough free memory in the system to safely
6964 * map the requested page.
6965 * ENOENT - The requested page was inside the kernel cage, and the
6966 * CAPTURE_GET_CAGE flag was not set.
6967 * EAGAIN - The requested page could not be capturead at this point in
6968 * time but future requests will likely work.
6969 * EBUSY - The requested page is retired and the CAPTURE_GET_RETIRED flag
6973 page_itrycapture(page_t
*pp
, uint_t szc
, uint_t flags
, void *datap
)
6978 if (flags
& CAPTURE_ASYNC
) {
6979 ASSERT(PAGE_EXCL(pp
));
6983 /* Make sure there's enough availrmem ... */
6984 ret
= page_capture_pre_checks(pp
, flags
);
6989 if (!page_trylock(pp
, SE_EXCL
)) {
6990 for (cb_index
= 0; cb_index
< PC_NUM_CALLBACKS
; cb_index
++) {
6991 if ((flags
>> cb_index
) & 1) {
6995 ASSERT(cb_index
< PC_NUM_CALLBACKS
);
6997 /* Special case for retired pages */
6998 if (PP_RETIRED(pp
)) {
6999 if (flags
& CAPTURE_GET_RETIRED
) {
7000 if (!page_unretire_pp(pp
, PR_UNR_TEMP
)) {
7002 * Need to set capture bit and add to
7003 * hash so that the page will be
7004 * retired when freed.
7006 page_capture_add_hash(pp
, szc
,
7007 CAPTURE_RETIRE
, NULL
);
7015 page_capture_add_hash(pp
, szc
, flags
, datap
);
7020 ASSERT(PAGE_EXCL(pp
));
7022 /* Need to check for physmem async requests that availrmem is sane */
7023 if ((flags
& (CAPTURE_ASYNC
| CAPTURE_PHYSMEM
)) ==
7024 (CAPTURE_ASYNC
| CAPTURE_PHYSMEM
) &&
7025 (availrmem
< swapfs_minfree
)) {
7030 ret
= page_capture_clean_page(pp
);
7033 /* We failed to get the page, so lets add it to the hash */
7034 if (!(flags
& CAPTURE_ASYNC
)) {
7035 page_capture_add_hash(pp
, szc
, flags
, datap
);
7041 ASSERT(PAGE_EXCL(pp
));
7042 ASSERT(pp
->p_szc
== 0);
7044 /* Call the callback */
7045 ret
= page_capture_take_action(pp
, flags
, datap
);
7052 * Note that in the failure cases from page_capture_take_action, the
7053 * EXCL lock will have already been dropped.
7055 if ((ret
== -1) && (!(flags
& CAPTURE_ASYNC
))) {
7056 page_capture_add_hash(pp
, szc
, flags
, datap
);
7062 page_trycapture(page_t
*pp
, uint_t szc
, uint_t flags
, void *datap
)
7066 curthread
->t_flag
|= T_CAPTURING
;
7067 ret
= page_itrycapture(pp
, szc
, flags
, datap
);
7068 curthread
->t_flag
&= ~T_CAPTURING
; /* xor works as we know its set */
7073 * When unlocking a page which has the PR_CAPTURE bit set, this routine
7074 * gets called to try and capture the page.
7077 page_unlock_capture(page_t
*pp
)
7079 page_capture_hash_bucket_t
*bp
;
7086 extern vnode_t retired_pages
;
7089 * We need to protect against a possible deadlock here where we own
7090 * the vnode page hash mutex and want to acquire it again as there
7091 * are locations in the code, where we unlock a page while holding
7092 * the mutex which can lead to the page being captured and eventually
7093 * end up here. As we may be hashing out the old page and hashing into
7094 * the retire vnode, we need to make sure we don't own them.
7095 * Other callbacks who do hash operations also need to make sure that
7096 * before they hashin to a vnode that they do not currently own the
7097 * vphm mutex otherwise there will be a panic.
7099 if (mutex_owned(page_vnode_mutex(&retired_pages
))) {
7100 page_unlock_nocapture(pp
);
7103 if (pp
->p_vnode
!= NULL
&& mutex_owned(page_vnode_mutex(pp
->p_vnode
))) {
7104 page_unlock_nocapture(pp
);
7108 index
= PAGE_CAPTURE_HASH(pp
);
7110 mp
= &page_capture_hash
[index
].pchh_mutex
;
7112 for (i
= 0; i
< 2; i
++) {
7113 bp
= page_capture_hash
[index
].lists
[i
].next
;
7114 while (bp
!= &page_capture_hash
[index
].lists
[i
]) {
7117 flags
= bp
->flags
| CAPTURE_ASYNC
;
7120 (void) page_trycapture(pp
, szc
, flags
, datap
);
7127 /* Failed to find page in hash so clear flags and unlock it. */
7128 page_clrtoxic(pp
, PR_CAPTURE
);
7138 for (i
= 0; i
< NUM_PAGE_CAPTURE_BUCKETS
; i
++) {
7139 page_capture_hash
[i
].lists
[0].next
=
7140 &page_capture_hash
[i
].lists
[0];
7141 page_capture_hash
[i
].lists
[0].prev
=
7142 &page_capture_hash
[i
].lists
[0];
7143 page_capture_hash
[i
].lists
[1].next
=
7144 &page_capture_hash
[i
].lists
[1];
7145 page_capture_hash
[i
].lists
[1].prev
=
7146 &page_capture_hash
[i
].lists
[1];
7149 pc_thread_shortwait
= 23 * hz
;
7150 pc_thread_longwait
= 1201 * hz
;
7151 pc_thread_retry
= 3;
7152 mutex_init(&pc_thread_mutex
, NULL
, MUTEX_DEFAULT
, NULL
);
7153 cv_init(&pc_cv
, NULL
, CV_DEFAULT
, NULL
);
7154 pc_thread_id
= thread_create(NULL
, 0, page_capture_thread
, NULL
, 0, &p0
,
7155 TS_RUN
, minclsyspri
);
7159 * It is necessary to scrub any failing pages prior to reboot in order to
7160 * prevent a latent error trap from occurring on the next boot.
7163 page_retire_mdboot()
7167 page_capture_hash_bucket_t
*bp
;
7170 /* walk lists looking for pages to scrub */
7171 for (i
= 0; i
< NUM_PAGE_CAPTURE_BUCKETS
; i
++) {
7172 for (pri
= 0; pri
< PC_NUM_PRI
; pri
++) {
7173 if (page_capture_hash
[i
].num_pages
[pri
] != 0) {
7177 if (pri
== PC_NUM_PRI
)
7180 mutex_enter(&page_capture_hash
[i
].pchh_mutex
);
7182 for (j
= 0; j
< 2; j
++) {
7183 bp
= page_capture_hash
[i
].lists
[j
].next
;
7184 while (bp
!= &page_capture_hash
[i
].lists
[j
]) {
7187 if (page_trylock(pp
, SE_EXCL
)) {
7189 pagescrub(pp
, 0, PAGESIZE
);
7196 mutex_exit(&page_capture_hash
[i
].pchh_mutex
);
7201 * Walk the page_capture_hash trying to capture pages and also cleanup old
7202 * entries which have expired.
7205 page_capture_async()
7210 page_capture_hash_bucket_t
*bp1
, *bp2
;
7216 /* If there are outstanding pages to be captured, get to work */
7217 for (i
= 0; i
< NUM_PAGE_CAPTURE_BUCKETS
; i
++) {
7218 for (pri
= 0; pri
< PC_NUM_PRI
; pri
++) {
7219 if (page_capture_hash
[i
].num_pages
[pri
] != 0)
7222 if (pri
== PC_NUM_PRI
)
7225 /* Append list 1 to list 0 and then walk through list 0 */
7226 mutex_enter(&page_capture_hash
[i
].pchh_mutex
);
7227 bp1
= &page_capture_hash
[i
].lists
[1];
7230 bp1
->prev
->next
= page_capture_hash
[i
].lists
[0].next
;
7231 bp2
->prev
= &page_capture_hash
[i
].lists
[0];
7232 page_capture_hash
[i
].lists
[0].next
->prev
= bp1
->prev
;
7233 page_capture_hash
[i
].lists
[0].next
= bp2
;
7238 /* list[1] will be empty now */
7240 bp1
= page_capture_hash
[i
].lists
[0].next
;
7241 while (bp1
!= &page_capture_hash
[i
].lists
[0]) {
7242 /* Check expiration time */
7243 if ((ddi_get_lbolt() > bp1
->expires
&&
7244 bp1
->expires
!= -1) ||
7245 page_deleted(bp1
->pp
)) {
7246 page_capture_hash
[i
].lists
[0].next
= bp1
->next
;
7248 &page_capture_hash
[i
].lists
[0];
7249 page_capture_hash
[i
].num_pages
[bp1
->pri
]--;
7252 * We can safely remove the PR_CAPTURE bit
7253 * without holding the EXCL lock on the page
7254 * as the PR_CAPTURE bit requres that the
7255 * page_capture_hash[].pchh_mutex be held
7258 page_clrtoxic(bp1
->pp
, PR_CAPTURE
);
7259 mutex_exit(&page_capture_hash
[i
].pchh_mutex
);
7260 kmem_free(bp1
, sizeof (*bp1
));
7261 mutex_enter(&page_capture_hash
[i
].pchh_mutex
);
7262 bp1
= page_capture_hash
[i
].lists
[0].next
;
7269 mutex_exit(&page_capture_hash
[i
].pchh_mutex
);
7270 if (page_trylock(pp
, SE_EXCL
)) {
7271 ret
= page_trycapture(pp
, szc
,
7272 flags
| CAPTURE_ASYNC
, datap
);
7274 ret
= 1; /* move to walked hash */
7278 /* Move to walked hash */
7279 (void) page_capture_move_to_walked(pp
);
7281 mutex_enter(&page_capture_hash
[i
].pchh_mutex
);
7282 bp1
= page_capture_hash
[i
].lists
[0].next
;
7285 mutex_exit(&page_capture_hash
[i
].pchh_mutex
);
7290 * This function is called by the page_capture_thread, and is needed in
7291 * in order to initiate aio cleanup, so that pages used in aio
7292 * will be unlocked and subsequently retired by page_capture_thread.
7295 do_aio_cleanup(void)
7298 int (*aio_cleanup_dr_delete_memory
)(proc_t
*);
7301 if (modload("sys", "kaio") == -1) {
7302 cmn_err(CE_WARN
, "do_aio_cleanup: cannot load kaio");
7306 * We use the aio_cleanup_dr_delete_memory function to
7307 * initiate the actual clean up; this function will wake
7308 * up the per-process aio_cleanup_thread.
7310 aio_cleanup_dr_delete_memory
= (int (*)(proc_t
*))
7311 modgetsymvalue("aio_cleanup_dr_delete_memory", 0);
7312 if (aio_cleanup_dr_delete_memory
== NULL
) {
7314 "aio_cleanup_dr_delete_memory not found in kaio");
7317 mutex_enter(&pidlock
);
7318 for (procp
= practive
; (procp
!= NULL
); procp
= procp
->p_next
) {
7319 mutex_enter(&procp
->p_lock
);
7320 if (procp
->p_aio
!= NULL
) {
7321 /* cleanup proc's outstanding kaio */
7322 cleaned
+= (*aio_cleanup_dr_delete_memory
)(procp
);
7324 mutex_exit(&procp
->p_lock
);
7326 mutex_exit(&pidlock
);
7331 * helper function for page_capture_thread
7334 page_capture_handle_outstanding(void)
7338 /* Reap pages before attempting capture pages */
7341 if ((page_retire_pend_count() > page_retire_pend_kas_count()) &&
7342 hat_supported(HAT_DYNAMIC_ISM_UNMAP
, (void *)0)) {
7344 * Note: Purging only for platforms that support
7345 * ISM hat_pageunload() - mainly SPARC. On x86/x64
7346 * platforms ISM pages SE_SHARED locked until destroyed.
7349 /* disable and purge seg_pcache */
7350 (void) seg_p_disable();
7351 for (ntry
= 0; ntry
< pc_thread_retry
; ntry
++) {
7352 if (!page_retire_pend_count())
7354 if (do_aio_cleanup()) {
7356 * allow the apps cleanup threads
7359 delay(pc_thread_shortwait
);
7361 page_capture_async();
7363 /* reenable seg_pcache */
7366 /* completed what can be done. break out */
7371 * For kernel pages and/or unsupported HAT_DYNAMIC_ISM_UNMAP, reap
7372 * and then attempt to capture.
7375 page_capture_async();
7379 * The page_capture_thread loops forever, looking to see if there are
7380 * pages still waiting to be captured.
7383 page_capture_thread(void)
7391 CALLB_CPR_INIT(&c
, &pc_thread_mutex
, callb_generic_cpr
, "page_capture");
7393 mutex_enter(&pc_thread_mutex
);
7397 for (i
= 0; i
< NUM_PAGE_CAPTURE_BUCKETS
; i
++) {
7399 page_capture_hash
[i
].num_pages
[PC_PRI_HI
];
7401 page_capture_hash
[i
].num_pages
[PC_PRI_LO
];
7404 timeout
= pc_thread_longwait
;
7405 if (high_pri_pages
!= 0) {
7406 timeout
= pc_thread_shortwait
;
7407 page_capture_handle_outstanding();
7408 } else if (low_pri_pages
!= 0) {
7409 page_capture_async();
7411 CALLB_CPR_SAFE_BEGIN(&c
);
7412 (void) cv_reltimedwait(&pc_cv
, &pc_thread_mutex
,
7413 timeout
, TR_CLOCK_TICK
);
7414 CALLB_CPR_SAFE_END(&c
, &pc_thread_mutex
);
7419 * Attempt to locate a bucket that has enough pages to satisfy the request.
7420 * The initial check is done without the lock to avoid unneeded contention.
7421 * The function returns 1 if enough pages were found, else 0 if it could not
7422 * find enough pages in a bucket.
7425 pcf_decrement_bucket(pgcnt_t npages
)
7431 p
= &pcf
[PCF_INDEX()];
7432 q
= &pcf
[pcf_fanout
];
7433 for (i
= 0; i
< pcf_fanout
; i
++) {
7434 if (p
->pcf_count
> npages
) {
7436 * a good one to try.
7438 mutex_enter(&p
->pcf_lock
);
7439 if (p
->pcf_count
> npages
) {
7440 p
->pcf_count
-= (uint_t
)npages
;
7442 * freemem is not protected by any lock.
7443 * Thus, we cannot have any assertion
7444 * containing freemem here.
7447 mutex_exit(&p
->pcf_lock
);
7450 mutex_exit(&p
->pcf_lock
);
7462 * pcftotal_ret: If the value is not NULL and we have walked all the
7463 * buckets but did not find enough pages then it will
7464 * be set to the total number of pages in all the pcf
7466 * npages: Is the number of pages we have been requested to
7468 * unlock: If set to 0 we will leave the buckets locked if the
7469 * requested number of pages are not found.
7471 * Go and try to satisfy the page request from any number of buckets.
7472 * This can be a very expensive operation as we have to lock the buckets
7473 * we are checking (and keep them locked), starting at bucket 0.
7475 * The function returns 1 if enough pages were found, else 0 if it could not
7476 * find enough pages in the buckets.
7480 pcf_decrement_multiple(pgcnt_t
*pcftotal_ret
, pgcnt_t npages
, int unlock
)
7487 /* try to collect pages from several pcf bins */
7488 for (pcftotal
= 0, i
= 0; i
< pcf_fanout
; i
++) {
7489 mutex_enter(&p
->pcf_lock
);
7490 pcftotal
+= p
->pcf_count
;
7491 if (pcftotal
>= npages
) {
7493 * Wow! There are enough pages laying around
7494 * to satisfy the request. Do the accounting,
7495 * drop the locks we acquired, and go back.
7497 * freemem is not protected by any lock. So,
7498 * we cannot have any assertion containing
7503 if (p
->pcf_count
<= npages
) {
7504 npages
-= p
->pcf_count
;
7507 p
->pcf_count
-= (uint_t
)npages
;
7510 mutex_exit(&p
->pcf_lock
);
7513 ASSERT(npages
== 0);
7519 /* failed to collect pages - release the locks */
7520 while (--p
>= pcf
) {
7521 mutex_exit(&p
->pcf_lock
);
7524 if (pcftotal_ret
!= NULL
)
7525 *pcftotal_ret
= pcftotal
;