bootadm: remove dead #defines
[unleashed.git] / kernel / os / mem_cage.c
blobd668ae93dd8821ef18d494a58842e7536d3ac6bd
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/types.h>
26 #include <sys/param.h>
27 #include <sys/thread.h>
28 #include <sys/proc.h>
29 #include <sys/callb.h>
30 #include <sys/vnode.h>
31 #include <sys/debug.h>
32 #include <sys/systm.h> /* for bzero */
33 #include <sys/memlist.h>
34 #include <sys/cmn_err.h>
35 #include <sys/sysmacros.h>
36 #include <sys/vmsystm.h> /* for NOMEMWAIT() */
37 #include <sys/atomic.h> /* used to update kcage_freemem */
38 #include <sys/kmem.h> /* for kmem_reap */
39 #include <sys/errno.h>
40 #include <sys/mem_cage.h>
41 #include <vm/seg_kmem.h>
42 #include <vm/page.h>
43 #include <vm/hat.h>
44 #include <vm/vm_dep.h>
45 #include <sys/mem_config.h>
46 #include <sys/lgrp.h>
47 #include <sys/rwlock.h>
48 #include <sys/cpupart.h>
50 extern pri_t maxclsyspri;
52 #ifdef DEBUG
53 #define KCAGE_STATS
54 #endif
56 #ifdef KCAGE_STATS
58 #define KCAGE_STATS_VERSION 9 /* can help report generators */
59 #define KCAGE_STATS_NSCANS 256 /* depth of scan statistics buffer */
61 struct kcage_stats_scan {
62 /* managed by KCAGE_STAT_* macros */
63 clock_t scan_lbolt;
64 uint_t scan_id;
66 /* set in kcage_cageout() */
67 uint_t kt_passes;
68 clock_t kt_ticks;
69 pgcnt_t kt_kcage_freemem_start;
70 pgcnt_t kt_kcage_freemem_end;
71 pgcnt_t kt_freemem_start;
72 pgcnt_t kt_freemem_end;
73 uint_t kt_examined;
74 uint_t kt_cantlock;
75 uint_t kt_gotone;
76 uint_t kt_gotonefree;
77 uint_t kt_skipshared;
78 uint_t kt_skiprefd;
79 uint_t kt_destroy;
81 /* set in kcage_invalidate_page() */
82 uint_t kip_reloclocked;
83 uint_t kip_relocmod;
84 uint_t kip_destroy;
85 uint_t kip_nomem;
86 uint_t kip_demotefailed;
88 /* set in kcage_expand() */
89 uint_t ke_wanted;
90 uint_t ke_examined;
91 uint_t ke_lefthole;
92 uint_t ke_gotone;
93 uint_t ke_gotonefree;
96 struct kcage_stats {
97 /* managed by KCAGE_STAT_* macros */
98 uint_t version;
99 uint_t size;
101 /* set in kcage_cageout */
102 uint_t kt_wakeups;
103 uint_t kt_scans;
104 uint_t kt_cageout_break;
106 /* set in kcage_expand */
107 uint_t ke_calls;
108 uint_t ke_nopfn;
109 uint_t ke_nopaget;
110 uint_t ke_isnoreloc;
111 uint_t ke_deleting;
112 uint_t ke_lowfreemem;
113 uint_t ke_terminate;
115 /* set in kcage_freemem_add() */
116 uint_t kfa_trottlewake;
118 /* set in kcage_freemem_sub() */
119 uint_t kfs_cagewake;
121 /* set in kcage_create_throttle */
122 uint_t kct_calls;
123 uint_t kct_cageout;
124 uint_t kct_critical;
125 uint_t kct_exempt;
126 uint_t kct_cagewake;
127 uint_t kct_wait;
128 uint_t kct_progress;
129 uint_t kct_noprogress;
130 uint_t kct_timeout;
132 /* set in kcage_cageout_wakeup */
133 uint_t kcw_expandearly;
135 /* managed by KCAGE_STAT_* macros */
136 uint_t scan_array_size;
137 uint_t scan_index;
138 struct kcage_stats_scan scans[KCAGE_STATS_NSCANS];
141 static struct kcage_stats kcage_stats;
142 static struct kcage_stats_scan kcage_stats_scan_zero;
145 * No real need for atomics here. For the most part the incs and sets are
146 * done by the kernel cage thread. There are a few that are done by any
147 * number of other threads. Those cases are noted by comments.
149 #define KCAGE_STAT_INCR(m) kcage_stats.m++
151 #define KCAGE_STAT_NINCR(m, v) kcage_stats.m += (v)
153 #define KCAGE_STAT_INCR_SCAN(m) \
154 KCAGE_STAT_INCR(scans[kcage_stats.scan_index].m)
156 #define KCAGE_STAT_NINCR_SCAN(m, v) \
157 KCAGE_STAT_NINCR(scans[kcage_stats.scan_index].m, v)
159 #define KCAGE_STAT_SET(m, v) kcage_stats.m = (v)
161 #define KCAGE_STAT_SETZ(m, v) \
162 if (kcage_stats.m == 0) kcage_stats.m = (v)
164 #define KCAGE_STAT_SET_SCAN(m, v) \
165 KCAGE_STAT_SET(scans[kcage_stats.scan_index].m, v)
167 #define KCAGE_STAT_SETZ_SCAN(m, v) \
168 KCAGE_STAT_SETZ(scans[kcage_stats.scan_index].m, v)
170 #define KCAGE_STAT_INC_SCAN_INDEX \
171 KCAGE_STAT_SET_SCAN(scan_lbolt, ddi_get_lbolt()); \
172 KCAGE_STAT_SET_SCAN(scan_id, kcage_stats.scan_index); \
173 kcage_stats.scan_index = \
174 (kcage_stats.scan_index + 1) % KCAGE_STATS_NSCANS; \
175 kcage_stats.scans[kcage_stats.scan_index] = kcage_stats_scan_zero
177 #define KCAGE_STAT_INIT_SCAN_INDEX \
178 kcage_stats.version = KCAGE_STATS_VERSION; \
179 kcage_stats.size = sizeof (kcage_stats); \
180 kcage_stats.scan_array_size = KCAGE_STATS_NSCANS; \
181 kcage_stats.scan_index = 0
183 #else /* KCAGE_STATS */
185 #define KCAGE_STAT_INCR(v)
186 #define KCAGE_STAT_NINCR(m, v)
187 #define KCAGE_STAT_INCR_SCAN(v)
188 #define KCAGE_STAT_NINCR_SCAN(m, v)
189 #define KCAGE_STAT_SET(m, v)
190 #define KCAGE_STAT_SETZ(m, v)
191 #define KCAGE_STAT_SET_SCAN(m, v)
192 #define KCAGE_STAT_SETZ_SCAN(m, v)
193 #define KCAGE_STAT_INC_SCAN_INDEX
194 #define KCAGE_STAT_INIT_SCAN_INDEX
196 #endif /* KCAGE_STATS */
198 static kmutex_t kcage_throttle_mutex; /* protects kcage_throttle_cv */
199 static kcondvar_t kcage_throttle_cv;
201 static kmutex_t kcage_cageout_mutex; /* protects cv and ready flag */
202 static kcondvar_t kcage_cageout_cv; /* cageout thread naps here */
203 static int kcage_cageout_ready; /* nonzero when cageout thread ready */
204 kthread_id_t kcage_cageout_thread; /* to aid debugging */
206 static krwlock_t kcage_range_rwlock; /* protects kcage_glist elements */
209 * Cage expansion happens within a range.
211 struct kcage_glist {
212 struct kcage_glist *next;
213 pfn_t base;
214 pfn_t lim;
215 pfn_t curr;
216 int decr;
219 static struct kcage_glist *kcage_glist;
220 static struct kcage_glist *kcage_current_glist;
223 * The firstfree element is provided so that kmem_alloc can be avoided
224 * until that cage has somewhere to go. This is not currently a problem
225 * as early kmem_alloc's use BOP_ALLOC instead of page_create_va.
227 static vmem_t *kcage_arena;
228 static struct kcage_glist kcage_glist_firstfree;
229 static struct kcage_glist *kcage_glist_freelist = &kcage_glist_firstfree;
232 * Miscellaneous forward references
234 static struct kcage_glist *kcage_glist_alloc(void);
235 static int kcage_glist_delete(pfn_t, pfn_t, struct kcage_glist **);
236 static void kcage_cageout(void);
237 static int kcage_invalidate_page(page_t *, pgcnt_t *);
238 static int kcage_setnoreloc_pages(page_t *, se_t);
239 static int kcage_range_add_internal(pfn_t base, pgcnt_t npgs, kcage_dir_t);
240 static void kcage_init(pgcnt_t preferred_size);
241 static int kcage_range_delete_internal(pfn_t base, pgcnt_t npgs);
244 * Kernel Memory Cage counters and thresholds.
246 int kcage_on = 0;
247 pgcnt_t kcage_freemem;
248 pgcnt_t kcage_needfree;
249 pgcnt_t kcage_lotsfree;
250 pgcnt_t kcage_desfree;
251 pgcnt_t kcage_minfree;
252 pgcnt_t kcage_throttlefree;
253 pgcnt_t kcage_reserve;
254 int kcage_maxwait = 10; /* in seconds */
256 /* when we use lp for kmem we start the cage at a higher initial value */
257 pgcnt_t kcage_kmemlp_mincage;
259 #ifdef DEBUG
260 pgcnt_t kcage_pagets;
261 #define KCAGEPAGETS_INC() kcage_pagets++
262 #else
263 #define KCAGEPAGETS_INC()
264 #endif
266 /* kstats to export what pages are currently caged */
267 kmutex_t kcage_kstat_lock;
268 static int kcage_kstat_update(kstat_t *ksp, int rw);
269 static int kcage_kstat_snapshot(kstat_t *ksp, void *buf, int rw);
272 * Startup and Dynamic Reconfiguration interfaces.
273 * kcage_range_add()
274 * kcage_range_del()
275 * kcage_range_delete_post_mem_del()
276 * kcage_range_init()
277 * kcage_set_thresholds()
281 * Called from page_get_contig_pages to get the approximate kcage pfn range
282 * for exclusion from search for contiguous pages. This routine is called
283 * without kcage_range lock (kcage routines can call page_get_contig_pages
284 * through page_relocate) and with the assumption, based on kcage_range_add,
285 * that kcage_current_glist always contain a valid pointer.
289 kcage_current_pfn(pfn_t *pfncur)
291 struct kcage_glist *lp = kcage_current_glist;
293 ASSERT(kcage_on);
295 ASSERT(lp != NULL);
297 *pfncur = lp->curr;
299 return (lp->decr);
303 * Called from vm_pagelist.c during coalesce to find kernel cage regions
304 * within an mnode. Looks for the lowest range between lo and hi.
306 * Kernel cage memory is defined between kcage_glist and kcage_current_glist.
307 * Non-cage memory is defined between kcage_current_glist and list end.
309 * If incage is set, returns the lowest kcage range. Otherwise returns lowest
310 * non-cage range.
312 * Returns zero on success and nlo, nhi:
313 * lo <= nlo < nhi <= hi
314 * Returns non-zero if no overlapping range is found.
317 kcage_next_range(int incage, pfn_t lo, pfn_t hi,
318 pfn_t *nlo, pfn_t *nhi)
320 struct kcage_glist *lp;
321 pfn_t tlo = hi;
322 pfn_t thi = hi;
324 ASSERT(lo <= hi);
327 * Reader lock protects the list, but kcage_get_pfn
328 * running concurrently may advance kcage_current_glist
329 * and also update kcage_current_glist->curr. Page
330 * coalesce can handle this race condition.
332 rw_enter(&kcage_range_rwlock, RW_READER);
334 for (lp = incage ? kcage_glist : kcage_current_glist;
335 lp != NULL; lp = lp->next) {
337 pfn_t klo, khi;
339 /* find the range limits in this element */
340 if ((incage && lp->decr) || (!incage && !lp->decr)) {
341 klo = lp->curr;
342 khi = lp->lim;
343 } else {
344 klo = lp->base;
345 khi = lp->curr;
348 /* handle overlap */
349 if (klo < tlo && klo < khi && lo < khi && klo < hi) {
350 tlo = MAX(lo, klo);
351 thi = MIN(hi, khi);
352 if (tlo == lo)
353 break;
356 /* check end of kcage */
357 if (incage && lp == kcage_current_glist) {
358 break;
362 rw_exit(&kcage_range_rwlock);
364 /* return non-zero if no overlapping range found */
365 if (tlo == thi)
366 return (1);
368 ASSERT(lo <= tlo && tlo < thi && thi <= hi);
370 /* return overlapping range */
371 *nlo = tlo;
372 *nhi = thi;
373 return (0);
376 void
377 kcage_range_init(struct memlist *ml, kcage_dir_t d, pgcnt_t preferred_size)
379 int ret = 0;
381 ASSERT(kcage_arena == NULL);
382 kcage_arena = vmem_create("kcage_arena", NULL, 0, sizeof (uint64_t),
383 segkmem_alloc, segkmem_free, heap_arena, 0, VM_SLEEP);
384 ASSERT(kcage_arena != NULL);
386 if (d == KCAGE_DOWN) {
387 while (ml->ml_next != NULL)
388 ml = ml->ml_next;
391 rw_enter(&kcage_range_rwlock, RW_WRITER);
393 while (ml != NULL) {
394 ret = kcage_range_add_internal(btop(ml->ml_address),
395 btop(ml->ml_size), d);
396 if (ret)
397 panic("kcage_range_add_internal failed: "
398 "ml=%p, ret=0x%x\n", (void *)ml, ret);
400 ml = (d == KCAGE_DOWN ? ml->ml_prev : ml->ml_next);
403 rw_exit(&kcage_range_rwlock);
405 if (ret == 0)
406 kcage_init(preferred_size);
410 * Third arg controls direction of growth: 0: increasing pfns,
411 * 1: decreasing.
413 static int
414 kcage_range_add_internal(pfn_t base, pgcnt_t npgs, kcage_dir_t d)
416 struct kcage_glist *new, **lpp;
417 pfn_t lim;
419 ASSERT(rw_write_held(&kcage_range_rwlock));
421 ASSERT(npgs != 0);
422 if (npgs == 0)
423 return (EINVAL);
425 lim = base + npgs;
427 ASSERT(lim > base);
428 if (lim <= base)
429 return (EINVAL);
431 new = kcage_glist_alloc();
432 if (new == NULL) {
433 return (ENOMEM);
436 new->base = base;
437 new->lim = lim;
438 new->decr = (d == KCAGE_DOWN);
439 if (new->decr != 0)
440 new->curr = new->lim;
441 else
442 new->curr = new->base;
444 * Any overlapping existing ranges are removed by deleting
445 * from the new list as we search for the tail.
447 lpp = &kcage_glist;
448 while (*lpp != NULL) {
449 int ret;
450 ret = kcage_glist_delete((*lpp)->base, (*lpp)->lim, &new);
451 if (ret != 0)
452 return (ret);
453 lpp = &(*lpp)->next;
456 *lpp = new;
458 if (kcage_current_glist == NULL) {
459 kcage_current_glist = kcage_glist;
462 return (0);
466 kcage_range_add(pfn_t base, pgcnt_t npgs, kcage_dir_t d)
468 int ret;
470 rw_enter(&kcage_range_rwlock, RW_WRITER);
471 ret = kcage_range_add_internal(base, npgs, d);
472 rw_exit(&kcage_range_rwlock);
473 return (ret);
477 * Calls to add and delete must be protected by kcage_range_rwlock
479 static int
480 kcage_range_delete_internal(pfn_t base, pgcnt_t npgs)
482 struct kcage_glist *lp;
483 pfn_t lim;
485 ASSERT(rw_write_held(&kcage_range_rwlock));
487 ASSERT(npgs != 0);
488 if (npgs == 0)
489 return (EINVAL);
491 lim = base + npgs;
493 ASSERT(lim > base);
494 if (lim <= base)
495 return (EINVAL);
498 * Check if the delete is OK first as a number of elements
499 * might be involved and it will be difficult to go
500 * back and undo (can't just add the range back in).
502 for (lp = kcage_glist; lp != NULL; lp = lp->next) {
504 * If there have been no pages allocated from this
505 * element, we don't need to check it.
507 if ((lp->decr == 0 && lp->curr == lp->base) ||
508 (lp->decr != 0 && lp->curr == lp->lim))
509 continue;
511 * If the element does not overlap, its OK.
513 if (base >= lp->lim || lim <= lp->base)
514 continue;
516 * Overlapping element: Does the range to be deleted
517 * overlap the area already used? If so fail.
519 if (lp->decr == 0 && base < lp->curr && lim >= lp->base) {
520 return (EBUSY);
522 if (lp->decr != 0 && base < lp->lim && lim >= lp->curr) {
523 return (EBUSY);
526 return (kcage_glist_delete(base, lim, &kcage_glist));
530 kcage_range_delete(pfn_t base, pgcnt_t npgs)
532 int ret;
534 rw_enter(&kcage_range_rwlock, RW_WRITER);
535 ret = kcage_range_delete_internal(base, npgs);
536 rw_exit(&kcage_range_rwlock);
537 return (ret);
541 * Calls to add and delete must be protected by kcage_range_rwlock.
542 * This routine gets called after successful Solaris memory
543 * delete operation from DR post memory delete routines.
545 static int
546 kcage_range_delete_post_mem_del_internal(pfn_t base, pgcnt_t npgs)
548 pfn_t lim;
550 ASSERT(rw_write_held(&kcage_range_rwlock));
552 ASSERT(npgs != 0);
553 if (npgs == 0)
554 return (EINVAL);
556 lim = base + npgs;
558 ASSERT(lim > base);
559 if (lim <= base)
560 return (EINVAL);
562 return (kcage_glist_delete(base, lim, &kcage_glist));
566 kcage_range_delete_post_mem_del(pfn_t base, pgcnt_t npgs)
568 int ret;
570 rw_enter(&kcage_range_rwlock, RW_WRITER);
571 ret = kcage_range_delete_post_mem_del_internal(base, npgs);
572 rw_exit(&kcage_range_rwlock);
573 return (ret);
577 * No locking is required here as the whole operation is covered
578 * by kcage_range_rwlock writer lock.
580 static struct kcage_glist *
581 kcage_glist_alloc(void)
583 struct kcage_glist *new;
585 if ((new = kcage_glist_freelist) != NULL) {
586 kcage_glist_freelist = new->next;
587 } else if (kernel_cage_enable) {
588 new = vmem_alloc(kcage_arena, sizeof (*new), VM_NOSLEEP);
589 } else {
591 * On DR supported platforms we allow memory add
592 * even when kernel cage is disabled. "kcage_arena" is
593 * created only when kernel cage is enabled.
595 new = kmem_zalloc(sizeof (*new), KM_NOSLEEP);
598 if (new != NULL)
599 bzero(new, sizeof (*new));
601 return (new);
604 static void
605 kcage_glist_free(struct kcage_glist *lp)
607 lp->next = kcage_glist_freelist;
608 kcage_glist_freelist = lp;
611 static int
612 kcage_glist_delete(pfn_t base, pfn_t lim, struct kcage_glist **lpp)
614 struct kcage_glist *lp, *prev = *lpp;
616 while ((lp = *lpp) != NULL) {
617 if (lim > lp->base && base < lp->lim) {
618 /* The delete range overlaps this element. */
619 if (base <= lp->base && lim >= lp->lim) {
620 /* Delete whole element. */
621 *lpp = lp->next;
622 if (lp == kcage_current_glist) {
623 /* This can never happen. */
624 ASSERT(kcage_current_glist != prev);
625 kcage_current_glist = prev;
627 kcage_glist_free(lp);
628 continue;
631 /* Partial delete. */
632 if (base > lp->base && lim < lp->lim) {
633 struct kcage_glist *new;
636 * Remove a section from the middle,
637 * need to allocate a new element.
639 new = kcage_glist_alloc();
640 if (new == NULL) {
641 return (ENOMEM);
645 * Tranfser unused range to new.
646 * Edit lp in place to preserve
647 * kcage_current_glist.
649 new->decr = lp->decr;
650 if (new->decr != 0) {
651 new->base = lp->base;
652 new->lim = base;
653 new->curr = base;
655 lp->base = lim;
656 } else {
657 new->base = lim;
658 new->lim = lp->lim;
659 new->curr = new->base;
661 lp->lim = base;
664 /* Insert new. */
665 new->next = lp->next;
666 lp->next = new;
667 lpp = &lp->next;
668 } else {
669 /* Delete part of current block. */
670 if (base > lp->base) {
671 ASSERT(lim >= lp->lim);
672 ASSERT(base < lp->lim);
673 if (lp->decr != 0 &&
674 lp->curr == lp->lim)
675 lp->curr = base;
676 lp->lim = base;
677 } else {
678 ASSERT(base <= lp->base);
679 ASSERT(lim > lp->base);
680 if (lp->decr == 0 &&
681 lp->curr == lp->base)
682 lp->curr = lim;
683 lp->base = lim;
687 prev = *lpp;
688 lpp = &(*lpp)->next;
691 return (0);
695 * If lockit is 1, kcage_get_pfn holds the
696 * reader lock for kcage_range_rwlock.
697 * Changes to lp->curr can cause race conditions, but
698 * they are handled by higher level code (see kcage_next_range.)
700 static pfn_t
701 kcage_get_pfn(int lockit)
703 struct kcage_glist *lp;
704 pfn_t pfn = PFN_INVALID;
706 if (lockit && !rw_tryenter(&kcage_range_rwlock, RW_READER))
707 return (pfn);
709 lp = kcage_current_glist;
710 while (lp != NULL) {
711 if (lp->decr != 0) {
712 if (lp->curr != lp->base) {
713 pfn = --lp->curr;
714 break;
716 } else {
717 if (lp->curr != lp->lim) {
718 pfn = lp->curr++;
719 break;
723 lp = lp->next;
724 if (lp)
725 kcage_current_glist = lp;
728 if (lockit)
729 rw_exit(&kcage_range_rwlock);
730 return (pfn);
734 * Walk the physical address space of the cage.
735 * This routine does not guarantee to return PFNs in the order
736 * in which they were allocated to the cage. Instead, it walks
737 * each range as they appear on the growth list returning the PFNs
738 * range in ascending order.
740 * To begin scanning at lower edge of cage, reset should be nonzero.
741 * To step through cage, reset should be zero.
743 * PFN_INVALID will be returned when the upper end of the cage is
744 * reached -- indicating a full scan of the cage has been completed since
745 * previous reset. PFN_INVALID will continue to be returned until
746 * kcage_walk_cage is reset.
748 * It is possible to receive a PFN_INVALID result on reset if a growth
749 * list is not installed or if none of the PFNs in the installed list have
750 * been allocated to the cage. In otherwords, there is no cage.
752 * Caller need not hold kcage_range_rwlock while calling this function
753 * as the front part of the list is static - pages never come out of
754 * the cage.
756 * The caller is expected to only be kcage_cageout().
758 static pfn_t
759 kcage_walk_cage(int reset)
761 static struct kcage_glist *lp = NULL;
762 static pfn_t pfn;
764 if (reset)
765 lp = NULL;
766 if (lp == NULL) {
767 lp = kcage_glist;
768 pfn = PFN_INVALID;
770 again:
771 if (pfn == PFN_INVALID) {
772 if (lp == NULL)
773 return (PFN_INVALID);
775 if (lp->decr != 0) {
777 * In this range the cage grows from the highest
778 * address towards the lowest.
779 * Arrange to return pfns from curr to lim-1,
780 * inclusive, in ascending order.
783 pfn = lp->curr;
784 } else {
786 * In this range the cage grows from the lowest
787 * address towards the highest.
788 * Arrange to return pfns from base to curr,
789 * inclusive, in ascending order.
792 pfn = lp->base;
796 if (lp->decr != 0) { /* decrementing pfn */
797 if (pfn == lp->lim) {
798 /* Don't go beyond the static part of the glist. */
799 if (lp == kcage_current_glist)
800 lp = NULL;
801 else
802 lp = lp->next;
803 pfn = PFN_INVALID;
804 goto again;
807 ASSERT(pfn >= lp->curr && pfn < lp->lim);
808 } else { /* incrementing pfn */
809 if (pfn == lp->curr) {
810 /* Don't go beyond the static part of the glist. */
811 if (lp == kcage_current_glist)
812 lp = NULL;
813 else
814 lp = lp->next;
815 pfn = PFN_INVALID;
816 goto again;
819 ASSERT(pfn >= lp->base && pfn < lp->curr);
822 return (pfn++);
826 * Callback functions for to recalc cage thresholds after
827 * Kphysm memory add/delete operations.
829 /*ARGSUSED*/
830 static void
831 kcage_kphysm_postadd_cb(void *arg, pgcnt_t delta_pages)
833 kcage_recalc_thresholds();
836 /*ARGSUSED*/
837 static int
838 kcage_kphysm_predel_cb(void *arg, pgcnt_t delta_pages)
840 /* TODO: when should cage refuse memory delete requests? */
841 return (0);
844 /*ARGSUSED*/
845 static void
846 kcage_kphysm_postdel_cb(void *arg, pgcnt_t delta_pages, int cancelled)
848 kcage_recalc_thresholds();
851 static kphysm_setup_vector_t kcage_kphysm_vectors = {
852 KPHYSM_SETUP_VECTOR_VERSION,
853 kcage_kphysm_postadd_cb,
854 kcage_kphysm_predel_cb,
855 kcage_kphysm_postdel_cb
859 * This is called before a CPR suspend and after a CPR resume. We have to
860 * turn off kcage_cageout_ready before a suspend, and turn it back on after a
861 * restart.
863 /*ARGSUSED*/
864 static boolean_t
865 kcage_cageout_cpr(void *arg, int code)
867 if (code == CB_CODE_CPR_CHKPT) {
868 ASSERT(kcage_cageout_ready);
869 kcage_cageout_ready = 0;
870 return (B_TRUE);
871 } else if (code == CB_CODE_CPR_RESUME) {
872 ASSERT(kcage_cageout_ready == 0);
873 kcage_cageout_ready = 1;
874 return (B_TRUE);
876 return (B_FALSE);
880 * kcage_recalc_preferred_size() increases initial cage size to improve large
881 * page availability when lp for kmem is enabled and kpr is disabled
883 static pgcnt_t
884 kcage_recalc_preferred_size(pgcnt_t preferred_size)
886 if (SEGKMEM_USE_LARGEPAGES && segkmem_reloc == 0) {
887 pgcnt_t lpmincage = kcage_kmemlp_mincage;
888 if (lpmincage == 0) {
889 lpmincage = MIN(P2ROUNDUP(((physmem * PAGESIZE) / 8),
890 segkmem_heaplp_quantum), 0x40000000UL) / PAGESIZE;
892 kcage_kmemlp_mincage = MIN(lpmincage,
893 (segkmem_kmemlp_max / PAGESIZE));
894 preferred_size = MAX(kcage_kmemlp_mincage, preferred_size);
896 return (preferred_size);
900 * Kcage_init() builds the cage and initializes the cage thresholds.
901 * The size of the cage is determined by the argument preferred_size.
902 * or the actual amount of memory, whichever is smaller.
904 static void
905 kcage_init(pgcnt_t preferred_size)
907 pgcnt_t wanted;
908 pfn_t pfn;
909 page_t *pp;
910 kstat_t *ksp;
912 extern void page_list_noreloc_startup(page_t *);
914 ASSERT(!kcage_on);
916 /* increase preferred cage size for lp for kmem */
917 preferred_size = kcage_recalc_preferred_size(preferred_size);
919 /* Debug note: initialize this now so early expansions can stat */
920 KCAGE_STAT_INIT_SCAN_INDEX;
923 * Initialize cage thresholds and install kphysm callback.
924 * If we can't arrange to have the thresholds track with
925 * available physical memory, then the cage thresholds may
926 * end up over time at levels that adversly effect system
927 * performance; so, bail out.
929 kcage_recalc_thresholds();
930 if (kphysm_setup_func_register(&kcage_kphysm_vectors, NULL)) {
931 ASSERT(0); /* Catch this in DEBUG kernels. */
932 return;
936 * Limit startup cage size within the range of kcage_minfree
937 * and availrmem, inclusively.
939 wanted = MIN(MAX(preferred_size, kcage_minfree), availrmem);
942 * Construct the cage. PFNs are allocated from the glist. It
943 * is assumed that the list has been properly ordered for the
944 * platform by the platform code. Typically, this is as simple
945 * as calling kcage_range_init(phys_avail, decr), where decr is
946 * 1 if the kernel has been loaded into upper end of physical
947 * memory, or 0 if the kernel has been loaded at the low end.
949 * Note: it is assumed that we are in the startup flow, so there
950 * is no reason to grab the page lock.
952 kcage_freemem = 0;
953 pfn = PFN_INVALID; /* prime for alignment test */
954 while (wanted != 0) {
955 if ((pfn = kcage_get_pfn(0)) == PFN_INVALID)
956 break;
958 if ((pp = page_numtopp_nolock(pfn)) != NULL) {
959 KCAGEPAGETS_INC();
961 * Set the noreloc state on the page.
962 * If the page is free and not already
963 * on the noreloc list then move it.
965 if (PP_ISFREE(pp)) {
966 if (PP_ISNORELOC(pp) == 0)
967 page_list_noreloc_startup(pp);
968 } else {
969 ASSERT(pp->p_szc == 0);
970 PP_SETNORELOC(pp);
973 PLCNT_XFER_NORELOC(pp);
974 wanted -= 1;
978 * Need to go through and find kernel allocated pages
979 * and capture them into the Cage. These will primarily
980 * be pages gotten through boot_alloc().
982 for (pp = vmobject_get_head(&kvp.v_object);
983 pp != NULL;
984 pp = vmobject_get_next(&kvp.v_object, pp)) {
985 ASSERT(!PP_ISFREE(pp));
986 ASSERT(pp->p_szc == 0);
987 if (PP_ISNORELOC(pp) == 0) {
988 PP_SETNORELOC(pp);
989 PLCNT_XFER_NORELOC(pp);
993 kcage_on = 1;
996 * CB_CL_CPR_POST_KERNEL is the class that executes from cpr_suspend()
997 * after the cageout thread is blocked, and executes from cpr_resume()
998 * before the cageout thread is restarted. By executing in this class,
999 * we are assured that the kernel cage thread won't miss wakeup calls
1000 * and also CPR's larger kmem_alloc requests will not fail after
1001 * CPR shuts down the cageout kernel thread.
1003 (void) callb_add(kcage_cageout_cpr, NULL, CB_CL_CPR_POST_KERNEL,
1004 "cageout");
1007 * Coalesce pages to improve large page availability. A better fix
1008 * would to coalesce pages as they are included in the cage
1010 if (SEGKMEM_USE_LARGEPAGES) {
1011 extern void page_freelist_coalesce_all(int mnode);
1012 page_freelist_coalesce_all(-1); /* do all mnodes */
1015 ksp = kstat_create("kcage", 0, "kcage_page_list", "misc",
1016 KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VAR_SIZE | KSTAT_FLAG_VIRTUAL);
1017 if (ksp != NULL) {
1018 ksp->ks_update = kcage_kstat_update;
1019 ksp->ks_snapshot = kcage_kstat_snapshot;
1020 ksp->ks_lock = &kcage_kstat_lock; /* XXX - not really needed */
1021 kstat_install(ksp);
1025 static int
1026 kcage_kstat_update(kstat_t *ksp, int rw)
1028 struct kcage_glist *lp;
1029 uint_t count;
1031 if (rw == KSTAT_WRITE)
1032 return (EACCES);
1034 count = 0;
1035 rw_enter(&kcage_range_rwlock, RW_WRITER);
1036 for (lp = kcage_glist; lp != NULL; lp = lp->next) {
1037 if (lp->decr) {
1038 if (lp->curr != lp->lim) {
1039 count++;
1041 } else {
1042 if (lp->curr != lp->base) {
1043 count++;
1047 rw_exit(&kcage_range_rwlock);
1049 ksp->ks_ndata = count;
1050 ksp->ks_data_size = count * 2 * sizeof (uint64_t);
1052 return (0);
1055 static int
1056 kcage_kstat_snapshot(kstat_t *ksp, void *buf, int rw)
1058 struct kcage_glist *lp;
1059 struct memunit {
1060 uint64_t address;
1061 uint64_t size;
1062 } *kspmem;
1064 if (rw == KSTAT_WRITE)
1065 return (EACCES);
1067 ksp->ks_snaptime = gethrtime();
1069 kspmem = (struct memunit *)buf;
1070 rw_enter(&kcage_range_rwlock, RW_WRITER);
1071 for (lp = kcage_glist; lp != NULL; lp = lp->next, kspmem++) {
1072 if ((caddr_t)kspmem >= (caddr_t)buf + ksp->ks_data_size)
1073 break;
1075 if (lp->decr) {
1076 if (lp->curr != lp->lim) {
1077 kspmem->address = ptob(lp->curr);
1078 kspmem->size = ptob(lp->lim - lp->curr);
1080 } else {
1081 if (lp->curr != lp->base) {
1082 kspmem->address = ptob(lp->base);
1083 kspmem->size = ptob(lp->curr - lp->base);
1087 rw_exit(&kcage_range_rwlock);
1089 return (0);
1092 void
1093 kcage_recalc_thresholds()
1095 static int first = 1;
1096 static pgcnt_t init_lotsfree;
1097 static pgcnt_t init_desfree;
1098 static pgcnt_t init_minfree;
1099 static pgcnt_t init_throttlefree;
1100 static pgcnt_t init_reserve;
1102 /* TODO: any reason to take more care than this with live editing? */
1103 mutex_enter(&kcage_cageout_mutex);
1104 mutex_enter(&freemem_lock);
1106 if (first) {
1107 first = 0;
1108 init_lotsfree = kcage_lotsfree;
1109 init_desfree = kcage_desfree;
1110 init_minfree = kcage_minfree;
1111 init_throttlefree = kcage_throttlefree;
1112 init_reserve = kcage_reserve;
1113 } else {
1114 kcage_lotsfree = init_lotsfree;
1115 kcage_desfree = init_desfree;
1116 kcage_minfree = init_minfree;
1117 kcage_throttlefree = init_throttlefree;
1118 kcage_reserve = init_reserve;
1121 if (kcage_lotsfree == 0)
1122 kcage_lotsfree = MAX(32, total_pages / 256);
1124 if (kcage_minfree == 0)
1125 kcage_minfree = MAX(32, kcage_lotsfree / 2);
1127 if (kcage_desfree == 0)
1128 kcage_desfree = MAX(32, kcage_minfree);
1130 if (kcage_throttlefree == 0)
1131 kcage_throttlefree = MAX(32, kcage_minfree / 2);
1133 if (kcage_reserve == 0)
1134 kcage_reserve = MIN(32, kcage_throttlefree / 2);
1136 mutex_exit(&freemem_lock);
1137 mutex_exit(&kcage_cageout_mutex);
1139 if (kcage_cageout_ready) {
1140 if (kcage_freemem < kcage_desfree)
1141 kcage_cageout_wakeup();
1143 if (kcage_needfree) {
1144 mutex_enter(&kcage_throttle_mutex);
1145 cv_broadcast(&kcage_throttle_cv);
1146 mutex_exit(&kcage_throttle_mutex);
1152 * Pageout interface:
1153 * kcage_cageout_init()
1155 void
1156 kcage_cageout_init()
1158 if (kcage_on) {
1159 (void) lwp_kernel_create(proc_pageout, kcage_cageout, NULL,
1160 TS_RUN, maxclsyspri - 1);
1166 * VM Interfaces:
1167 * kcage_create_throttle()
1168 * kcage_freemem_add()
1169 * kcage_freemem_sub()
1173 * Wakeup cageout thread and throttle waiting for the number of pages
1174 * requested to become available. For non-critical requests, a
1175 * timeout is added, since freemem accounting is separate from cage
1176 * freemem accounting: it's possible for us to get stuck and not make
1177 * forward progress even though there was sufficient freemem before
1178 * arriving here.
1181 kcage_create_throttle(pgcnt_t npages, int flags)
1184 KCAGE_STAT_INCR(kct_calls); /* unprotected incr. */
1187 * Obviously, we can't throttle the cageout thread since
1188 * we depend on it. We also can't throttle the panic thread.
1190 if (curthread == kcage_cageout_thread || panicstr) {
1191 KCAGE_STAT_INCR(kct_cageout); /* unprotected incr. */
1192 return (KCT_CRIT);
1196 * Don't throttle threads which are critical for proper
1197 * vm management if we're above kcage_throttlefree or
1198 * if freemem is very low.
1200 if (NOMEMWAIT()) {
1201 if (kcage_freemem > kcage_throttlefree + npages) {
1202 KCAGE_STAT_INCR(kct_exempt); /* unprotected incr. */
1203 return (KCT_CRIT);
1204 } else if (freemem < minfree) {
1205 KCAGE_STAT_INCR(kct_critical); /* unprotected incr. */
1206 return (KCT_CRIT);
1211 * Don't throttle real-time threads if kcage_freemem > kcage_reserve.
1213 if (DISP_PRIO(curthread) > maxclsyspri &&
1214 kcage_freemem > kcage_reserve) {
1215 KCAGE_STAT_INCR(kct_exempt); /* unprotected incr. */
1216 return (KCT_CRIT);
1220 * Cause all other threads (which are assumed to not be
1221 * critical to cageout) to wait here until their request
1222 * can be satisfied. Be a little paranoid and wake the
1223 * kernel cage on each loop through this logic.
1225 while (kcage_freemem < kcage_throttlefree + npages) {
1226 ASSERT(kcage_on);
1227 if (kcage_cageout_ready) {
1228 mutex_enter(&kcage_throttle_mutex);
1230 kcage_needfree += npages;
1231 KCAGE_STAT_INCR(kct_wait);
1233 kcage_cageout_wakeup();
1234 KCAGE_STAT_INCR(kct_cagewake);
1236 cv_wait(&kcage_throttle_cv, &kcage_throttle_mutex);
1238 kcage_needfree -= npages;
1240 mutex_exit(&kcage_throttle_mutex);
1241 } else {
1243 * NOTE: atomics are used just in case we enter
1244 * mp operation before the cageout thread is ready.
1246 atomic_add_long(&kcage_needfree, npages);
1248 kcage_cageout_wakeup();
1249 KCAGE_STAT_INCR(kct_cagewake); /* unprotected incr. */
1251 atomic_add_long(&kcage_needfree, -npages);
1254 if (NOMEMWAIT() && freemem < minfree) {
1255 return (KCT_CRIT);
1257 if ((flags & PG_WAIT) == 0) {
1258 pgcnt_t limit = (flags & PG_NORMALPRI) ?
1259 throttlefree : pageout_reserve;
1261 if ((kcage_freemem < kcage_throttlefree + npages) &&
1262 (freemem < limit + npages)) {
1263 return (KCT_FAILURE);
1264 } else {
1265 return (KCT_NONCRIT);
1269 return (KCT_NONCRIT);
1272 void
1273 kcage_freemem_add(pgcnt_t npages)
1275 extern void wakeup_pcgs(void);
1277 atomic_add_long(&kcage_freemem, npages);
1279 wakeup_pcgs(); /* wakeup threads in pcgs() */
1281 if (kcage_needfree != 0 &&
1282 kcage_freemem >= (kcage_throttlefree + kcage_needfree)) {
1284 mutex_enter(&kcage_throttle_mutex);
1285 cv_broadcast(&kcage_throttle_cv);
1286 KCAGE_STAT_INCR(kfa_trottlewake);
1287 mutex_exit(&kcage_throttle_mutex);
1291 void
1292 kcage_freemem_sub(pgcnt_t npages)
1294 atomic_add_long(&kcage_freemem, -npages);
1296 if (kcage_freemem < kcage_desfree) {
1297 kcage_cageout_wakeup();
1298 KCAGE_STAT_INCR(kfs_cagewake); /* unprotected incr. */
1303 * return 0 on failure and 1 on success.
1305 static int
1306 kcage_setnoreloc_pages(page_t *rootpp, se_t se)
1308 pgcnt_t npgs, i;
1309 page_t *pp;
1310 pfn_t rootpfn = page_pptonum(rootpp);
1311 uint_t szc;
1313 ASSERT(!PP_ISFREE(rootpp));
1314 ASSERT(PAGE_LOCKED_SE(rootpp, se));
1315 if (!group_page_trylock(rootpp, se)) {
1316 return (0);
1318 szc = rootpp->p_szc;
1319 if (szc == 0) {
1321 * The szc of a locked page can only change for pages that are
1322 * non-swapfs (i.e. anonymous memory) file system pages.
1324 VERIFY(rootpp->p_object != NULL);
1325 ASSERT(rootpp->p_vnode != NULL);
1326 ASSERT(!PP_ISKAS(rootpp));
1327 ASSERT(!IS_SWAPFSVP(rootpp->p_vnode));
1328 PP_SETNORELOC(rootpp);
1329 return (1);
1331 npgs = page_get_pagecnt(szc);
1332 ASSERT(IS_P2ALIGNED(rootpfn, npgs));
1333 pp = rootpp;
1334 for (i = 0; i < npgs; i++, pp++) {
1335 ASSERT(PAGE_LOCKED_SE(pp, se));
1336 ASSERT(!PP_ISFREE(pp));
1337 ASSERT(pp->p_szc == szc);
1338 PP_SETNORELOC(pp);
1340 group_page_unlock(rootpp);
1341 return (1);
1345 * Attempt to convert page to a caged page (set the P_NORELOC flag).
1346 * If successful and pages is free, move page to the tail of whichever
1347 * list it is on.
1348 * Returns:
1349 * EBUSY page already locked, assimilated but not free.
1350 * ENOMEM page assimilated, but memory too low to relocate. Page not free.
1351 * EAGAIN page not assimilated. Page not free.
1352 * ERANGE page assimilated. Page not root.
1353 * 0 page assimilated. Page free.
1354 * *nfreedp number of pages freed.
1355 * NOTE: With error codes ENOMEM, EBUSY, and 0 (zero), there is no way
1356 * to distinguish between a page that was already a NORELOC page from
1357 * those newly converted to NORELOC pages by this invocation of
1358 * kcage_assimilate_page.
1360 static int
1361 kcage_assimilate_page(page_t *pp, pgcnt_t *nfreedp)
1363 if (page_trylock(pp, SE_EXCL)) {
1364 if (PP_ISNORELOC(pp)) {
1365 check_free_and_return:
1366 if (PP_ISFREE(pp)) {
1367 page_unlock(pp);
1368 *nfreedp = 0;
1369 return (0);
1370 } else {
1371 page_unlock(pp);
1372 return (EBUSY);
1374 /*NOTREACHED*/
1376 } else {
1377 if (page_trylock(pp, SE_SHARED)) {
1378 if (PP_ISNORELOC(pp))
1379 goto check_free_and_return;
1380 } else {
1381 return (EAGAIN);
1383 if (!PP_ISFREE(pp)) {
1384 page_unlock(pp);
1385 return (EAGAIN);
1389 * Need to upgrade the lock on it and set the NORELOC
1390 * bit. If it is free then remove it from the free
1391 * list so that the platform free list code can keep
1392 * NORELOC pages where they should be.
1395 * Before doing anything, get the exclusive lock.
1396 * This may fail (eg ISM pages are left shared locked).
1397 * If the page is free this will leave a hole in the
1398 * cage. There is no solution yet to this.
1400 if (!page_tryupgrade(pp)) {
1401 page_unlock(pp);
1402 return (EAGAIN);
1406 ASSERT(PAGE_EXCL(pp));
1408 if (PP_ISFREE(pp)) {
1409 int which = PP_ISAGED(pp) ? PG_FREE_LIST : PG_CACHE_LIST;
1411 page_list_sub(pp, which);
1412 ASSERT(pp->p_szc == 0);
1413 PP_SETNORELOC(pp);
1414 PLCNT_XFER_NORELOC(pp);
1415 page_list_add(pp, which | PG_LIST_TAIL);
1417 page_unlock(pp);
1418 *nfreedp = 1;
1419 return (0);
1420 } else {
1421 if (pp->p_szc != 0) {
1422 if (!kcage_setnoreloc_pages(pp, SE_EXCL)) {
1423 page_unlock(pp);
1424 return (EAGAIN);
1426 ASSERT(PP_ISNORELOC(pp));
1427 } else {
1428 PP_SETNORELOC(pp);
1430 PLCNT_XFER_NORELOC(pp);
1431 return (kcage_invalidate_page(pp, nfreedp));
1433 /*NOTREACHED*/
1436 static int
1437 kcage_expand()
1439 int did_something = 0;
1441 spgcnt_t wanted;
1442 pfn_t pfn;
1443 page_t *pp;
1444 /* TODO: we don't really need n any more? */
1445 pgcnt_t n;
1446 pgcnt_t nf, nfreed;
1449 * Expand the cage if available cage memory is really low. Calculate
1450 * the amount required to return kcage_freemem to the level of
1451 * kcage_lotsfree, or to satisfy throttled requests, whichever is
1452 * more. It is rare for their sum to create an artificial threshold
1453 * above kcage_lotsfree, but it is possible.
1455 * Exit early if expansion amount is equal to or less than zero.
1456 * (<0 is possible if kcage_freemem rises suddenly.)
1458 * Exit early when freemem drops below pageout_reserve plus the request.
1460 wanted = MAX(kcage_lotsfree, kcage_throttlefree + kcage_needfree)
1461 - kcage_freemem;
1462 if (wanted <= 0) {
1463 return (0);
1464 } else if (freemem < pageout_reserve + wanted) {
1465 KCAGE_STAT_INCR(ke_lowfreemem);
1466 return (0);
1469 KCAGE_STAT_INCR(ke_calls);
1470 KCAGE_STAT_SET_SCAN(ke_wanted, (uint_t)wanted);
1473 * Assimilate more pages from the global page pool into the cage.
1475 n = 0; /* number of pages PP_SETNORELOC'd */
1476 nf = 0; /* number of those actually free */
1477 while (kcage_on && nf < wanted) {
1478 pfn = kcage_get_pfn(1);
1479 if (pfn == PFN_INVALID) { /* eek! no where to grow */
1480 KCAGE_STAT_INCR(ke_nopfn);
1481 goto terminate;
1484 KCAGE_STAT_INCR_SCAN(ke_examined);
1486 if ((pp = page_numtopp_nolock(pfn)) == NULL) {
1487 KCAGE_STAT_INCR(ke_nopaget);
1488 continue;
1490 KCAGEPAGETS_INC();
1492 * Sanity check. Skip this pfn if it is
1493 * being deleted.
1495 if (pfn_is_being_deleted(pfn)) {
1496 KCAGE_STAT_INCR(ke_deleting);
1497 continue;
1500 if (PP_ISNORELOC(pp)) {
1501 KCAGE_STAT_INCR(ke_isnoreloc);
1502 continue;
1505 switch (kcage_assimilate_page(pp, &nfreed)) {
1506 case 0: /* assimilated, page is free */
1507 KCAGE_STAT_NINCR_SCAN(ke_gotonefree, nfreed);
1508 did_something = 1;
1509 nf += nfreed;
1510 n++;
1511 break;
1513 case EBUSY: /* assimilated, page not free */
1514 case ERANGE: /* assimilated, page not root */
1515 KCAGE_STAT_INCR_SCAN(ke_gotone);
1516 did_something = 1;
1517 n++;
1518 break;
1520 case ENOMEM: /* assimilated, but no mem */
1521 KCAGE_STAT_INCR(ke_terminate);
1522 did_something = 1;
1523 n++;
1524 goto terminate;
1526 case EAGAIN: /* can't assimilate */
1527 KCAGE_STAT_INCR_SCAN(ke_lefthole);
1528 break;
1530 default: /* catch this with debug kernels */
1531 ASSERT(0);
1532 break;
1537 * Realign cage edge with the nearest physical address
1538 * boundry for big pages. This is done to give us a
1539 * better chance of actually getting usable big pages
1540 * in the cage.
1543 terminate:
1545 return (did_something);
1549 * Relocate page opp (Original Page Pointer) from cage pool to page rpp
1550 * (Replacement Page Pointer) in the global pool. Page opp will be freed
1551 * if relocation is successful, otherwise it is only unlocked.
1552 * On entry, page opp must be exclusively locked and not free.
1553 * *nfreedp: number of pages freed.
1555 static int
1556 kcage_relocate_page(page_t *pp, pgcnt_t *nfreedp)
1558 page_t *opp = pp;
1559 page_t *rpp = NULL;
1560 spgcnt_t npgs;
1561 int result;
1563 ASSERT(!PP_ISFREE(opp));
1564 ASSERT(PAGE_EXCL(opp));
1566 result = page_relocate(&opp, &rpp, 1, 1, &npgs, NULL);
1567 *nfreedp = npgs;
1568 if (result == 0) {
1569 while (npgs-- > 0) {
1570 page_t *tpp;
1572 ASSERT(rpp != NULL);
1573 tpp = rpp;
1574 page_sub(&rpp, tpp);
1575 page_unlock(tpp);
1578 ASSERT(rpp == NULL);
1580 return (0); /* success */
1583 page_unlock(opp);
1584 return (result);
1588 * Based on page_invalidate_pages()
1590 * Kcage_invalidate_page() uses page_relocate() twice. Both instances
1591 * of use must be updated to match the new page_relocate() when it
1592 * becomes available.
1594 * Return result of kcage_relocate_page or zero if page was directly freed.
1595 * *nfreedp: number of pages freed.
1597 static int
1598 kcage_invalidate_page(page_t *pp, pgcnt_t *nfreedp)
1600 int result;
1602 #if defined(__sparc)
1603 VERIFY(pp->p_object != &promvp.v_object);
1604 ASSERT(pp->p_vnode != &promvp);
1605 #endif /* __sparc */
1606 ASSERT(!PP_ISFREE(pp));
1607 ASSERT(PAGE_EXCL(pp));
1610 * Is this page involved in some I/O? shared?
1611 * The page_struct_lock need not be acquired to
1612 * examine these fields since the page has an
1613 * "exclusive" lock.
1615 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
1616 result = kcage_relocate_page(pp, nfreedp);
1617 #ifdef KCAGE_STATS
1618 if (result == 0)
1619 KCAGE_STAT_INCR_SCAN(kip_reloclocked);
1620 else if (result == ENOMEM)
1621 KCAGE_STAT_INCR_SCAN(kip_nomem);
1622 #endif
1623 return (result);
1626 ASSERT(pp->p_vnode->v_type != VCHR);
1629 * Unload the mappings and check if mod bit is set.
1631 (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
1633 if (hat_ismod(pp)) {
1634 result = kcage_relocate_page(pp, nfreedp);
1635 #ifdef KCAGE_STATS
1636 if (result == 0)
1637 KCAGE_STAT_INCR_SCAN(kip_relocmod);
1638 else if (result == ENOMEM)
1639 KCAGE_STAT_INCR_SCAN(kip_nomem);
1640 #endif
1641 return (result);
1644 if (!page_try_demote_pages(pp)) {
1645 KCAGE_STAT_INCR_SCAN(kip_demotefailed);
1646 page_unlock(pp);
1647 return (EAGAIN);
1650 VN_DISPOSE(pp, B_INVAL, 0, kcred);
1651 KCAGE_STAT_INCR_SCAN(kip_destroy);
1652 *nfreedp = 1;
1653 return (0);
1657 * Expand cage only if there is not enough memory to satisfy
1658 * current request. We only do one (complete) scan of the cage.
1659 * Dirty pages and pages with shared mappings are skipped;
1660 * Locked pages (p_lckcnt and p_cowcnt) are also skipped.
1661 * All other pages are freed (if they can be locked).
1662 * This may affect caching of user pages which are in cage by freeing/
1663 * reclaiming them more often. However cage is mainly for kernel (heap)
1664 * pages and we want to keep user pages outside of cage. The above policy
1665 * should also reduce cage expansion plus it should speed up cage mem
1666 * allocations.
1668 static void
1669 kcage_cageout()
1671 pfn_t pfn;
1672 page_t *pp;
1673 callb_cpr_t cprinfo;
1674 int did_something;
1675 pfn_t start_pfn;
1676 ulong_t shared_level = 8;
1677 pgcnt_t nfreed;
1678 #ifdef KCAGE_STATS
1679 clock_t scan_start;
1680 #endif
1682 CALLB_CPR_INIT(&cprinfo, &kcage_cageout_mutex,
1683 callb_generic_cpr, "cageout");
1685 mutex_enter(&kcage_cageout_mutex);
1686 kcage_cageout_thread = curthread;
1688 pfn = PFN_INVALID; /* force scan reset */
1689 start_pfn = PFN_INVALID; /* force init with 1st cage pfn */
1690 kcage_cageout_ready = 1; /* switch kcage_cageout_wakeup mode */
1692 loop:
1694 * Wait here. Sooner or later, kcage_freemem_sub() will notice
1695 * that kcage_freemem is less than kcage_desfree. When it does
1696 * notice, kcage_freemem_sub() will wake us up via call to
1697 * kcage_cageout_wakeup().
1699 CALLB_CPR_SAFE_BEGIN(&cprinfo);
1700 cv_wait(&kcage_cageout_cv, &kcage_cageout_mutex);
1701 CALLB_CPR_SAFE_END(&cprinfo, &kcage_cageout_mutex);
1703 KCAGE_STAT_INCR(kt_wakeups);
1704 KCAGE_STAT_SET_SCAN(kt_freemem_start, freemem);
1705 KCAGE_STAT_SET_SCAN(kt_kcage_freemem_start, kcage_freemem);
1706 #ifdef KCAGE_STATS
1707 scan_start = ddi_get_lbolt();
1708 #endif
1709 if (!kcage_on)
1710 goto loop;
1712 KCAGE_STAT_INCR(kt_scans);
1713 KCAGE_STAT_INCR_SCAN(kt_passes);
1715 did_something = 0;
1716 while (kcage_freemem < kcage_lotsfree + kcage_needfree) {
1718 if ((pfn = kcage_walk_cage(pfn == PFN_INVALID)) ==
1719 PFN_INVALID) {
1720 break;
1723 if (start_pfn == PFN_INVALID)
1724 start_pfn = pfn;
1725 else if (start_pfn == pfn) {
1727 * Did a complete walk of kernel cage, but didn't free
1728 * any pages. If only one cpu is active then
1729 * stop kernel cage walk and try expanding.
1731 if (cp_default.cp_ncpus == 1 && did_something == 0) {
1732 KCAGE_STAT_INCR(kt_cageout_break);
1733 break;
1737 pp = page_numtopp_nolock(pfn);
1738 if (pp == NULL) {
1739 continue;
1742 KCAGE_STAT_INCR_SCAN(kt_examined);
1745 * Do a quick PP_ISNORELOC() and PP_ISFREE test outside
1746 * of the lock. If one is missed it will be seen next
1747 * time through.
1749 * Skip non-caged-pages. These pages can exist in the cage
1750 * because, if during cage expansion, a page is
1751 * encountered that is long-term locked the lock prevents the
1752 * expansion logic from setting the P_NORELOC flag. Hence,
1753 * non-caged-pages surrounded by caged-pages.
1755 if (!PP_ISNORELOC(pp)) {
1756 switch (kcage_assimilate_page(pp, &nfreed)) {
1757 case 0:
1758 did_something = 1;
1759 KCAGE_STAT_NINCR_SCAN(kt_gotonefree,
1760 nfreed);
1761 break;
1763 case EBUSY:
1764 case ERANGE:
1765 did_something = 1;
1766 KCAGE_STAT_INCR_SCAN(kt_gotone);
1767 break;
1769 case EAGAIN:
1770 case ENOMEM:
1771 break;
1773 default:
1774 /* catch this with debug kernels */
1775 ASSERT(0);
1776 break;
1779 continue;
1780 } else {
1781 if (PP_ISFREE(pp)) {
1782 continue;
1785 if ((PP_ISKAS(pp) && pp->p_lckcnt > 0) ||
1786 !page_trylock(pp, SE_EXCL)) {
1787 KCAGE_STAT_INCR_SCAN(kt_cantlock);
1788 continue;
1791 /* P_NORELOC bit should not have gone away. */
1792 ASSERT(PP_ISNORELOC(pp));
1793 if (PP_ISFREE(pp) || (PP_ISKAS(pp) &&
1794 pp->p_lckcnt > 0)) {
1795 page_unlock(pp);
1796 continue;
1799 if (hat_page_checkshare(pp, shared_level)) {
1800 page_unlock(pp);
1801 KCAGE_STAT_INCR_SCAN(kt_skipshared);
1802 continue;
1805 if (kcage_invalidate_page(pp, &nfreed) == 0) {
1806 did_something = 1;
1807 KCAGE_STAT_NINCR_SCAN(kt_gotonefree, nfreed);
1811 * No need to drop the page lock here.
1812 * Kcage_invalidate_page has done that for us
1813 * either explicitly or through a page_free.
1818 if (kcage_freemem < kcage_throttlefree + kcage_needfree)
1819 (void) kcage_expand();
1821 if (kcage_on && kcage_cageout_ready)
1822 cv_broadcast(&kcage_throttle_cv);
1824 KCAGE_STAT_SET_SCAN(kt_freemem_end, freemem);
1825 KCAGE_STAT_SET_SCAN(kt_kcage_freemem_end, kcage_freemem);
1826 KCAGE_STAT_SET_SCAN(kt_ticks, ddi_get_lbolt() - scan_start);
1827 KCAGE_STAT_INC_SCAN_INDEX;
1828 goto loop;
1830 /*NOTREACHED*/
1833 void
1834 kcage_cageout_wakeup()
1836 if (mutex_tryenter(&kcage_cageout_mutex)) {
1837 if (kcage_cageout_ready) {
1838 cv_signal(&kcage_cageout_cv);
1839 } else if (kcage_freemem < kcage_minfree || kcage_needfree) {
1841 * Available cage memory is really low. Time to
1842 * start expanding the cage. However, the
1843 * kernel cage thread is not yet ready to
1844 * do the work. Use *this* thread, which is
1845 * most likely to be t0, to do the work.
1847 KCAGE_STAT_INCR(kcw_expandearly);
1848 (void) kcage_expand();
1849 KCAGE_STAT_INC_SCAN_INDEX;
1852 mutex_exit(&kcage_cageout_mutex);
1854 /* else, kernel cage thread is already running */
1857 void
1858 kcage_tick()
1861 * Once per second we wake up all the threads throttled
1862 * waiting for cage memory, in case we've become stuck
1863 * and haven't made forward progress expanding the cage.
1865 if (kcage_on && kcage_cageout_ready)
1866 cv_broadcast(&kcage_throttle_cv);