4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Kernel Physical Mapping (segkpm) hat interface routines for sun4u.
30 #include <sys/types.h>
32 #include <vm/hat_sfmmu.h>
34 #include <sys/sysmacros.h>
35 #include <sys/cmn_err.h>
36 #include <sys/machsystm.h>
37 #include <vm/seg_kpm.h>
38 #include <sys/cpu_module.h>
39 #include <vm/mach_kpm.h>
42 static caddr_t
sfmmu_kpm_mapin(page_t
*);
43 static void sfmmu_kpm_mapout(page_t
*, caddr_t
);
44 static int sfmmu_kpme_lookup(struct kpme
*, page_t
*);
45 static void sfmmu_kpme_add(struct kpme
*, page_t
*);
46 static void sfmmu_kpme_sub(struct kpme
*, page_t
*);
47 static caddr_t
sfmmu_kpm_getvaddr(page_t
*, int *);
48 static int sfmmu_kpm_fault(caddr_t
, struct memseg
*, page_t
*);
49 static int sfmmu_kpm_fault_small(caddr_t
, struct memseg
*, page_t
*);
50 static void sfmmu_kpm_vac_conflict(page_t
*, caddr_t
);
51 void sfmmu_kpm_pageunload(page_t
*);
52 void sfmmu_kpm_vac_unload(page_t
*, caddr_t
);
53 static void sfmmu_kpm_demap_large(caddr_t
);
54 static void sfmmu_kpm_demap_small(caddr_t
);
55 static void sfmmu_kpm_demap_tlbs(caddr_t
);
56 void sfmmu_kpm_hme_unload(page_t
*);
57 kpm_hlk_t
*sfmmu_kpm_kpmp_enter(page_t
*, pgcnt_t
);
58 void sfmmu_kpm_kpmp_exit(kpm_hlk_t
*kpmp
);
59 void sfmmu_kpm_page_cache(page_t
*, int, int);
61 extern uint_t vac_colors
;
64 * Kernel Physical Mapping (kpm) facility
71 /* -- hat_kpm interface section -- */
74 * Mapin a locked page and return the vaddr.
75 * When a kpme is provided by the caller it is added to
76 * the page p_kpmelist. The page to be mapped in must
77 * be at least read locked (p_selock).
80 hat_kpm_mapin(struct page
*pp
, struct kpme
*kpme
)
85 if (kpm_enable
== 0) {
86 cmn_err(CE_WARN
, "hat_kpm_mapin: kpm_enable not set");
90 if (pp
== NULL
|| PAGE_LOCKED(pp
) == 0) {
91 cmn_err(CE_WARN
, "hat_kpm_mapin: pp zero or not locked");
95 pml
= sfmmu_mlist_enter(pp
);
96 ASSERT(pp
->p_kpmref
>= 0);
98 vaddr
= (pp
->p_kpmref
== 0) ?
99 sfmmu_kpm_mapin(pp
) : hat_kpm_page2va(pp
, 1);
103 * Tolerate multiple mapins for the same kpme to avoid
104 * the need for an extra serialization.
106 if ((sfmmu_kpme_lookup(kpme
, pp
)) == 0)
107 sfmmu_kpme_add(kpme
, pp
);
109 ASSERT(pp
->p_kpmref
> 0);
115 sfmmu_mlist_exit(pml
);
120 * Mapout a locked page.
121 * When a kpme is provided by the caller it is removed from
122 * the page p_kpmelist. The page to be mapped out must be at
123 * least read locked (p_selock).
124 * Note: The seg_kpm layer provides a mapout interface for the
125 * case that a kpme is used and the underlying page is unlocked.
126 * This can be used instead of calling this function directly.
129 hat_kpm_mapout(struct page
*pp
, struct kpme
*kpme
, caddr_t vaddr
)
133 if (kpm_enable
== 0) {
134 cmn_err(CE_WARN
, "hat_kpm_mapout: kpm_enable not set");
138 if (IS_KPM_ADDR(vaddr
) == 0) {
139 cmn_err(CE_WARN
, "hat_kpm_mapout: no kpm address");
143 if (pp
== NULL
|| PAGE_LOCKED(pp
) == 0) {
144 cmn_err(CE_WARN
, "hat_kpm_mapout: page zero or not locked");
149 ASSERT(pp
== kpme
->kpe_page
);
151 pml
= sfmmu_mlist_enter(pp
);
153 if (sfmmu_kpme_lookup(kpme
, pp
) == 0)
154 panic("hat_kpm_mapout: kpme not found pp=%p",
157 ASSERT(pp
->p_kpmref
> 0);
158 sfmmu_kpme_sub(kpme
, pp
);
161 pml
= sfmmu_mlist_enter(pp
);
165 ASSERT(pp
->p_kpmref
>= 0);
166 if (pp
->p_kpmref
== 0)
167 sfmmu_kpm_mapout(pp
, vaddr
);
169 sfmmu_mlist_exit(pml
);
173 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
174 * memory addresses that are not described by a page_t. It can
175 * only be supported if vac_colors=1, because there is no page_t
176 * and corresponding kpm_page_t to track VAC conflicts. Currently,
177 * this may not be used on pfn's backed by page_t's, because the
178 * kpm state may not be consistent in hat_kpm_fault if the page is
179 * mapped using both this routine and hat_kpm_mapin. KPM should be
180 * cleaned up on sun4u/vac_colors=1 to be minimal as on sun4v.
181 * The caller must only pass pfn's for valid physical addresses; violation
182 * of this rule will cause panic.
185 hat_kpm_mapin_pfn(pfn_t pfn
)
187 caddr_t paddr
, vaddr
;
189 uint_t szc
= kpm_smallpages
? TTE8K
: TTE4M
;
190 uint_t shift
= kpm_smallpages
? MMU_PAGESHIFT
: MMU_PAGESHIFT4M
;
192 if (kpm_enable
== 0 || vac_colors
> 1 ||
193 page_numtomemseg_nolock(pfn
) != NULL
)
196 paddr
= (caddr_t
)ptob(pfn
);
197 vaddr
= (uintptr_t)kpm_vbase
+ paddr
;
199 KPM_TTE_VCACHED(tte
.ll
, pfn
, szc
);
200 sfmmu_kpm_load_tsb(vaddr
, &tte
, shift
);
207 hat_kpm_mapout_pfn(pfn_t pfn
)
213 * Return the kpm virtual address for the page at pp.
214 * If checkswap is non zero and the page is backed by a
215 * swap vnode the physical address is used rather than
216 * p_offset to determine the kpm region.
217 * Note: The function has to be used w/ extreme care. The
218 * stability of the page identity is in the responsibility
223 hat_kpm_page2va(struct page
*pp
, int checkswap
)
225 int vcolor
, vcolor_pa
;
226 uintptr_t paddr
, vaddr
;
230 paddr
= ptob(pp
->p_pagenum
);
231 vcolor_pa
= addr_to_vcolor(paddr
);
233 if (checkswap
&& pp
->p_vnode
&& IS_SWAPFSVP(pp
->p_vnode
))
234 vcolor
= (PP_ISNC(pp
)) ? vcolor_pa
: PP_GET_VCOLOR(pp
);
236 vcolor
= addr_to_vcolor(pp
->p_offset
);
238 vaddr
= (uintptr_t)kpm_vbase
+ paddr
;
240 if (vcolor_pa
!= vcolor
) {
241 vaddr
+= ((uintptr_t)(vcolor
- vcolor_pa
) << MMU_PAGESHIFT
);
242 vaddr
+= (vcolor_pa
> vcolor
) ?
243 ((uintptr_t)vcolor_pa
<< kpm_size_shift
) :
244 ((uintptr_t)(vcolor
- vcolor_pa
) << kpm_size_shift
);
247 return ((caddr_t
)vaddr
);
251 * Return the page for the kpm virtual address vaddr.
252 * Caller is responsible for the kpm mapping and lock
256 hat_kpm_vaddr2page(caddr_t vaddr
)
261 ASSERT(IS_KPM_ADDR(vaddr
));
263 SFMMU_KPM_VTOP(vaddr
, paddr
);
264 pfn
= (pfn_t
)btop(paddr
);
266 return (page_numtopp_nolock(pfn
));
269 /* page to kpm_page */
270 #define PP2KPMPG(pp, kp) { \
271 struct memseg *mseg; \
275 pfn = pp->p_pagenum; \
276 mseg = page_numtomemseg_nolock(pfn); \
278 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); \
279 ASSERT(inx < mseg->kpm_nkpmpgs); \
280 kp = &mseg->kpm_pages[inx]; \
283 /* page to kpm_spage */
284 #define PP2KPMSPG(pp, ksp) { \
285 struct memseg *mseg; \
289 pfn = pp->p_pagenum; \
290 mseg = page_numtomemseg_nolock(pfn); \
292 inx = pfn - mseg->kpm_pbase; \
293 ksp = &mseg->kpm_spages[inx]; \
297 * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred
298 * which could not be resolved by the trap level tsbmiss handler for the
300 * . The vaddr is in VAC alias range (always PAGESIZE mapping size).
301 * . The kpm (s)page range of vaddr is in a VAC alias prevention state.
302 * . tsbmiss handling at trap level is not desired (DEBUG kernel only,
306 hat_kpm_fault(struct hat
*hat
, caddr_t vaddr
)
314 if (kpm_enable
== 0) {
315 cmn_err(CE_WARN
, "hat_kpm_fault: kpm_enable not set");
319 ASSERT(hat
== ksfmmup
);
320 ASSERT(IS_KPM_ADDR(vaddr
));
322 SFMMU_KPM_VTOP(vaddr
, paddr
);
323 pfn
= (pfn_t
)btop(paddr
);
324 if ((mseg
= page_numtomemseg_nolock(pfn
)) != NULL
) {
325 pp
= &mseg
->pages
[(pgcnt_t
)(pfn
- mseg
->pages_base
)];
326 ASSERT((pfn_t
)pp
->p_pagenum
== pfn
);
330 * hat_kpm_mapin_pfn may add a kpm translation for memory that falls
331 * outside of memsegs. Check for this case and provide the translation
334 if (vac_colors
== 1 && mseg
== NULL
) {
336 uint_t szc
= kpm_smallpages
? TTE8K
: TTE4M
;
337 uint_t shift
= kpm_smallpages
? MMU_PAGESHIFT
: MMU_PAGESHIFT4M
;
339 ASSERT(address_in_memlist(phys_install
, paddr
, 1));
340 KPM_TTE_VCACHED(tte
.ll
, pfn
, szc
);
341 sfmmu_kpm_load_tsb(vaddr
, &tte
, shift
);
343 } else if (mseg
== NULL
|| !PAGE_LOCKED(pp
))
345 else if (kpm_smallpages
== 0)
346 error
= sfmmu_kpm_fault(vaddr
, mseg
, pp
);
348 error
= sfmmu_kpm_fault_small(vaddr
, mseg
, pp
);
354 * memseg_hash[] was cleared, need to clear memseg_phash[] too.
357 hat_kpm_mseghash_clear(int nentries
)
364 for (i
= 0; i
< nentries
; i
++)
365 memseg_phash
[i
] = MSEG_NULLPTR_PA
;
369 * Update memseg_phash[inx] when memseg_hash[inx] was changed.
372 hat_kpm_mseghash_update(pgcnt_t inx
, struct memseg
*msp
)
377 memseg_phash
[inx
] = (msp
) ? va_to_pa(msp
) : MSEG_NULLPTR_PA
;
381 * Update kpm memseg members from basic memseg info.
384 hat_kpm_addmem_mseg_update(struct memseg
*msp
, pgcnt_t nkpmpgs
,
385 offset_t kpm_pages_off
)
390 msp
->kpm_pages
= (kpm_page_t
*)((caddr_t
)msp
->pages
+ kpm_pages_off
);
391 msp
->kpm_nkpmpgs
= nkpmpgs
;
392 msp
->kpm_pbase
= kpmptop(ptokpmp(msp
->pages_base
));
393 msp
->pagespa
= va_to_pa(msp
->pages
);
394 msp
->epagespa
= va_to_pa(msp
->epages
);
395 msp
->kpm_pagespa
= va_to_pa(msp
->kpm_pages
);
399 * Setup nextpa when a memseg is inserted.
400 * Assumes that the memsegslock is already held.
403 hat_kpm_addmem_mseg_insert(struct memseg
*msp
)
408 ASSERT(memsegs_lock_held());
409 msp
->nextpa
= (memsegs
) ? va_to_pa(memsegs
) : MSEG_NULLPTR_PA
;
413 * Setup memsegspa when a memseg is (head) inserted.
414 * Called before memsegs is updated to complete a
415 * memseg insert operation.
416 * Assumes that the memsegslock is already held.
419 hat_kpm_addmem_memsegs_update(struct memseg
*msp
)
424 ASSERT(memsegs_lock_held());
426 memsegspa
= va_to_pa(msp
);
430 * Return end of metadata for an already setup memseg.
432 * Note: kpm_pages and kpm_spages are aliases and the underlying
433 * member of struct memseg is a union, therefore they always have
434 * the same address within a memseg. They must be differentiated
435 * when pointer arithmetic is used with them.
438 hat_kpm_mseg_reuse(struct memseg
*msp
)
442 if (kpm_smallpages
== 0)
443 end
= (caddr_t
)(msp
->kpm_pages
+ msp
->kpm_nkpmpgs
);
445 end
= (caddr_t
)(msp
->kpm_spages
+ msp
->kpm_nkpmpgs
);
451 * Update memsegspa (when first memseg in list
452 * is deleted) or nextpa when a memseg deleted.
453 * Assumes that the memsegslock is already held.
456 hat_kpm_delmem_mseg_update(struct memseg
*msp
, struct memseg
**mspp
)
463 ASSERT(memsegs_lock_held());
465 if (mspp
== &memsegs
) {
466 memsegspa
= (msp
->next
) ?
467 va_to_pa(msp
->next
) : MSEG_NULLPTR_PA
;
469 lmsp
= (struct memseg
*)
470 ((uint64_t)mspp
- offsetof(struct memseg
, next
));
471 lmsp
->nextpa
= (msp
->next
) ?
472 va_to_pa(msp
->next
) : MSEG_NULLPTR_PA
;
477 * Update kpm members for all memseg's involved in a split operation
478 * and do the atomic update of the physical memseg chain.
480 * Note: kpm_pages and kpm_spages are aliases and the underlying member
481 * of struct memseg is a union, therefore they always have the same
482 * address within a memseg. With that the direct assignments and
483 * va_to_pa conversions below don't have to be distinguished wrt. to
484 * kpm_smallpages. They must be differentiated when pointer arithmetic
487 * Assumes that the memsegslock is already held.
490 hat_kpm_split_mseg_update(struct memseg
*msp
, struct memseg
**mspp
,
491 struct memseg
*lo
, struct memseg
*mid
, struct memseg
*hi
)
493 pgcnt_t start
, end
, kbase
, kstart
, num
;
499 ASSERT(memsegs_lock_held());
500 ASSERT(msp
&& mid
&& msp
->kpm_pages
);
502 kbase
= ptokpmp(msp
->kpm_pbase
);
505 num
= lo
->pages_end
- lo
->pages_base
;
506 start
= kpmptop(ptokpmp(lo
->pages_base
));
507 /* align end to kpm page size granularity */
508 end
= kpmptop(ptokpmp(start
+ num
- 1)) + kpmpnpgs
;
509 lo
->kpm_pbase
= start
;
510 lo
->kpm_nkpmpgs
= ptokpmp(end
- start
);
511 lo
->kpm_pages
= msp
->kpm_pages
;
512 lo
->kpm_pagespa
= va_to_pa(lo
->kpm_pages
);
513 lo
->pagespa
= va_to_pa(lo
->pages
);
514 lo
->epagespa
= va_to_pa(lo
->epages
);
515 lo
->nextpa
= va_to_pa(lo
->next
);
519 num
= mid
->pages_end
- mid
->pages_base
;
520 kstart
= ptokpmp(mid
->pages_base
);
521 start
= kpmptop(kstart
);
522 /* align end to kpm page size granularity */
523 end
= kpmptop(ptokpmp(start
+ num
- 1)) + kpmpnpgs
;
524 mid
->kpm_pbase
= start
;
525 mid
->kpm_nkpmpgs
= ptokpmp(end
- start
);
526 if (kpm_smallpages
== 0) {
527 mid
->kpm_pages
= msp
->kpm_pages
+ (kstart
- kbase
);
529 mid
->kpm_spages
= msp
->kpm_spages
+ (kstart
- kbase
);
531 mid
->kpm_pagespa
= va_to_pa(mid
->kpm_pages
);
532 mid
->pagespa
= va_to_pa(mid
->pages
);
533 mid
->epagespa
= va_to_pa(mid
->epages
);
534 mid
->nextpa
= (mid
->next
) ? va_to_pa(mid
->next
) : MSEG_NULLPTR_PA
;
537 num
= hi
->pages_end
- hi
->pages_base
;
538 kstart
= ptokpmp(hi
->pages_base
);
539 start
= kpmptop(kstart
);
540 /* align end to kpm page size granularity */
541 end
= kpmptop(ptokpmp(start
+ num
- 1)) + kpmpnpgs
;
542 hi
->kpm_pbase
= start
;
543 hi
->kpm_nkpmpgs
= ptokpmp(end
- start
);
544 if (kpm_smallpages
== 0) {
545 hi
->kpm_pages
= msp
->kpm_pages
+ (kstart
- kbase
);
547 hi
->kpm_spages
= msp
->kpm_spages
+ (kstart
- kbase
);
549 hi
->kpm_pagespa
= va_to_pa(hi
->kpm_pages
);
550 hi
->pagespa
= va_to_pa(hi
->pages
);
551 hi
->epagespa
= va_to_pa(hi
->epages
);
552 hi
->nextpa
= (hi
->next
) ? va_to_pa(hi
->next
) : MSEG_NULLPTR_PA
;
556 * Atomic update of the physical memseg chain
558 if (mspp
== &memsegs
) {
559 memsegspa
= (lo
) ? va_to_pa(lo
) : va_to_pa(mid
);
561 lmsp
= (struct memseg
*)
562 ((uint64_t)mspp
- offsetof(struct memseg
, next
));
563 lmsp
->nextpa
= (lo
) ? va_to_pa(lo
) : va_to_pa(mid
);
568 * Walk the memsegs chain, applying func to each memseg span and vcolor.
571 hat_kpm_walk(void (*func
)(void *, void *, size_t), void *arg
)
579 for (msp
= memsegs
; msp
; msp
= msp
->next
) {
580 pbase
= msp
->pages_base
;
581 pend
= msp
->pages_end
;
582 for (vcolor
= 0; vcolor
< vac_colors
; vcolor
++) {
583 base
= ptob(pbase
) + kpm_vbase
+ kpm_size
* vcolor
;
584 size
= ptob(pend
- pbase
);
585 func(arg
, base
, size
);
591 /* -- sfmmu_kpm internal section -- */
594 * Return the page frame number if a valid segkpm mapping exists
595 * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
596 * Should only be used by other sfmmu routines.
599 sfmmu_kpm_vatopfn(caddr_t vaddr
)
605 ASSERT(kpm_enable
&& IS_KPM_ADDR(vaddr
));
607 SFMMU_KPM_VTOP(vaddr
, paddr
);
608 pfn
= (pfn_t
)btop(paddr
);
609 pp
= page_numtopp_nolock(pfn
);
610 if (pp
&& pp
->p_kpmref
)
613 return ((pfn_t
)PFN_INVALID
);
617 * Lookup a kpme in the p_kpmelist.
620 sfmmu_kpme_lookup(struct kpme
*kpme
, page_t
*pp
)
624 for (p
= pp
->p_kpmelist
; p
; p
= p
->kpe_next
) {
632 * Insert a kpme into the p_kpmelist and increment
633 * the per page kpm reference count.
636 sfmmu_kpme_add(struct kpme
*kpme
, page_t
*pp
)
638 ASSERT(pp
->p_kpmref
>= 0);
641 kpme
->kpe_prev
= NULL
;
642 kpme
->kpe_next
= pp
->p_kpmelist
;
645 pp
->p_kpmelist
->kpe_prev
= kpme
;
647 pp
->p_kpmelist
= kpme
;
653 * Remove a kpme from the p_kpmelist and decrement
654 * the per page kpm reference count.
657 sfmmu_kpme_sub(struct kpme
*kpme
, page_t
*pp
)
659 ASSERT(pp
->p_kpmref
> 0);
661 if (kpme
->kpe_prev
) {
662 ASSERT(pp
->p_kpmelist
!= kpme
);
663 ASSERT(kpme
->kpe_prev
->kpe_page
== pp
);
664 kpme
->kpe_prev
->kpe_next
= kpme
->kpe_next
;
666 ASSERT(pp
->p_kpmelist
== kpme
);
667 pp
->p_kpmelist
= kpme
->kpe_next
;
670 if (kpme
->kpe_next
) {
671 ASSERT(kpme
->kpe_next
->kpe_page
== pp
);
672 kpme
->kpe_next
->kpe_prev
= kpme
->kpe_prev
;
675 kpme
->kpe_next
= kpme
->kpe_prev
= NULL
;
676 kpme
->kpe_page
= NULL
;
681 * Mapin a single page, it is called every time a page changes it's state
682 * from kpm-unmapped to kpm-mapped. It may not be called, when only a new
683 * kpm instance does a mapin and wants to share the mapping.
684 * Assumes that the mlist mutex is already grabbed.
687 sfmmu_kpm_mapin(page_t
*pp
)
701 ASSERT(sfmmu_mlist_held(pp
));
702 ASSERT(pp
->p_kpmref
== 0);
704 vaddr
= sfmmu_kpm_getvaddr(pp
, &kpm_vac_range
);
706 ASSERT(IS_KPM_ADDR(vaddr
));
707 uncached
= PP_ISNC(pp
);
711 goto smallpages_mapin
;
715 kpmp
= KPMP_HASH(kp
);
716 mutex_enter(&kpmp
->khl_mutex
);
718 ASSERT(PP_ISKPMC(pp
) == 0);
719 ASSERT(PP_ISKPMS(pp
) == 0);
722 /* ASSERT(pp->p_share); XXX use hat_page_getshare */
723 if (kpm_vac_range
== 0) {
724 if (kp
->kp_refcnts
== 0) {
726 * Must remove large page mapping if it exists.
727 * Pages in uncached state can only be mapped
728 * small (PAGESIZE) within the regular kpm
731 if (kp
->kp_refcntc
== -1) {
732 /* remove go indication */
733 sfmmu_kpm_tsbmtl(&kp
->kp_refcntc
,
734 &kpmp
->khl_lock
, KPMTSBM_STOP
);
736 if (kp
->kp_refcnt
> 0 && kp
->kp_refcntc
== 0)
737 sfmmu_kpm_demap_large(vaddr
);
739 ASSERT(kp
->kp_refcntc
>= 0);
742 pmtx
= sfmmu_page_enter(pp
);
744 sfmmu_page_exit(pmtx
);
747 if ((kp
->kp_refcntc
> 0 || kp
->kp_refcnts
> 0) && kpm_vac_range
== 0) {
749 * Have to do a small (PAGESIZE) mapin within this kpm_page
750 * range since it is marked to be in VAC conflict mode or
751 * when there are still other small mappings around.
756 KPM_TTE_VCACHED(tte
.ll
, pfn
, TTE8K
);
758 KPM_TTE_VUNCACHED(tte
.ll
, pfn
, TTE8K
);
761 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT
);
763 pmtx
= sfmmu_page_enter(pp
);
765 sfmmu_page_exit(pmtx
);
768 ASSERT(kp
->kp_refcnts
> 0);
772 if (kpm_vac_range
== 0) {
774 * Fast path / regular case, no VAC conflict handling
775 * in progress within this kpm_page range.
777 if (kp
->kp_refcnt
== 0) {
780 KPM_TTE_VCACHED(tte
.ll
, pfn
, TTE4M
);
783 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT4M
);
785 /* Set go flag for TL tsbmiss handler */
786 if (kp
->kp_refcntc
== 0)
787 sfmmu_kpm_tsbmtl(&kp
->kp_refcntc
,
788 &kpmp
->khl_lock
, KPMTSBM_START
);
790 ASSERT(kp
->kp_refcntc
== -1);
793 ASSERT(kp
->kp_refcnt
);
797 * The page is not setup according to the common VAC
798 * prevention rules for the regular and kpm mapping layer
799 * E.g. the page layer was not able to deliver a right
800 * vcolor'ed page for a given vaddr corresponding to
801 * the wanted p_offset. It has to be mapped in small in
802 * within the corresponding kpm vac range in order to
803 * prevent VAC alias conflicts.
808 KPM_TTE_VCACHED(tte
.ll
, pfn
, TTE8K
);
810 KPM_TTE_VUNCACHED(tte
.ll
, pfn
, TTE8K
);
814 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT
);
817 if (kp
->kp_refcntc
== -1) {
818 ASSERT(kp
->kp_refcnt
> 0);
820 /* remove go indication */
821 sfmmu_kpm_tsbmtl(&kp
->kp_refcntc
, &kpmp
->khl_lock
,
824 ASSERT(kp
->kp_refcntc
>= 0);
827 mutex_exit(&kpmp
->khl_mutex
);
833 KPM_TTE_VCACHED(tte
.ll
, pfn
, TTE8K
);
836 * Just in case this same page was mapped cacheable prior to
837 * this and the old tte remains in tlb.
839 sfmmu_kpm_demap_small(vaddr
);
841 /* ASSERT(pp->p_share); XXX use hat_page_getshare */
842 pmtx
= sfmmu_page_enter(pp
);
844 sfmmu_page_exit(pmtx
);
846 KPM_TTE_VUNCACHED(tte
.ll
, pfn
, TTE8K
);
850 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT
);
853 kpmsp
= KPMP_SHASH(ksp
);
855 oldval
= sfmmu_kpm_stsbmtl(&ksp
->kp_mapped_flag
, &kpmsp
->kshl_lock
,
856 (uncached
) ? (KPM_MAPPED_GO
| KPM_MAPPEDSC
) :
857 (KPM_MAPPED_GO
| KPM_MAPPEDS
));
860 panic("sfmmu_kpm_mapin: stale smallpages mapping");
866 * Mapout a single page, it is called every time a page changes it's state
867 * from kpm-mapped to kpm-unmapped. It may not be called, when only a kpm
868 * instance calls mapout and there are still other instances mapping the
869 * page. Assumes that the mlist mutex is already grabbed.
871 * Note: In normal mode (no VAC conflict prevention pending) TLB's are
872 * not flushed. This is the core segkpm behavior to avoid xcalls. It is
873 * no problem because a translation from a segkpm virtual address to a
874 * physical address is always the same. The only downside is a slighty
875 * increased window of vulnerability for misbehaving _kernel_ modules.
878 sfmmu_kpm_mapout(page_t
*pp
, caddr_t vaddr
)
888 ASSERT(sfmmu_mlist_held(pp
));
889 ASSERT(pp
->p_kpmref
== 0);
891 alias_range
= IS_KPM_ALIAS_RANGE(vaddr
);
894 goto smallpages_mapout
;
897 kpmp
= KPMP_HASH(kp
);
898 mutex_enter(&kpmp
->khl_mutex
);
901 ASSERT(PP_ISKPMS(pp
) == 0);
902 if (kp
->kp_refcnta
<= 0) {
903 panic("sfmmu_kpm_mapout: bad refcnta kp=%p",
908 if (PP_ISKPMC(pp
) == 0) {
910 * Uncached kpm mappings must always have
911 * forced "small page" mode.
913 panic("sfmmu_kpm_mapout: uncached page not "
916 sfmmu_kpm_demap_small(vaddr
);
918 pmtx
= sfmmu_page_enter(pp
);
920 sfmmu_page_exit(pmtx
);
923 * Check if we can resume cached mode. This might
924 * be the case if the kpm mapping was the only
925 * mapping in conflict with other non rule
926 * compliant mappings. The page is no more marked
927 * as kpm mapped, so the conv_tnc path will not
932 } else if (PP_ISKPMC(pp
) == 0) {
933 /* remove TSB entry only */
934 sfmmu_kpm_unload_tsb(vaddr
, MMU_PAGESHIFT
);
937 /* already demapped */
938 pmtx
= sfmmu_page_enter(pp
);
940 sfmmu_page_exit(pmtx
);
946 if (kp
->kp_refcntc
<= 0 && kp
->kp_refcnts
== 0) {
948 * Fast path / regular case.
950 ASSERT(kp
->kp_refcntc
>= -1);
951 ASSERT(!(pp
->p_nrm
& (P_KPMC
| P_KPMS
| P_TNC
| P_PNC
)));
953 if (kp
->kp_refcnt
<= 0)
954 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp
);
956 if (--kp
->kp_refcnt
== 0) {
957 /* remove go indication */
958 if (kp
->kp_refcntc
== -1) {
959 sfmmu_kpm_tsbmtl(&kp
->kp_refcntc
,
960 &kpmp
->khl_lock
, KPMTSBM_STOP
);
962 ASSERT(kp
->kp_refcntc
== 0);
964 /* remove TSB entry */
965 sfmmu_kpm_unload_tsb(vaddr
, MMU_PAGESHIFT4M
);
968 sfmmu_kpm_demap_tlbs(vaddr
);
974 * The VAC alias path.
975 * We come here if the kpm vaddr is not in any alias_range
976 * and we are unmapping a page within the regular kpm_page
977 * range. The kpm_page either holds conflict pages and/or
978 * is in "small page" mode. If the page is not marked
979 * P_KPMS it couldn't have a valid PAGESIZE sized TSB
980 * entry. Dcache flushing is done lazy and follows the
981 * rules of the regular virtual page coloring scheme.
983 * Per page states and required actions:
984 * P_KPMC: remove a kpm mapping that is conflicting.
985 * P_KPMS: remove a small kpm mapping within a kpm_page.
986 * P_TNC: check if we can re-cache the page.
987 * P_PNC: we cannot re-cache, sorry.
989 * kp_refcntc > 0: page is part of a kpm_page with conflicts.
990 * kp_refcnts > 0: rm a small mapped page within a kpm_page.
994 if (kp
->kp_refcnts
< 1) {
995 panic("sfmmu_kpm_mapout: bad refcnts kp=%p",
998 sfmmu_kpm_demap_small(vaddr
);
1001 * Check if we can resume cached mode. This might
1002 * be the case if the kpm mapping was the only
1003 * mapping in conflict with other non rule
1004 * compliant mappings. The page is no more marked
1005 * as kpm mapped, so the conv_tnc path will not
1009 if (!PP_ISKPMC(pp
)) {
1011 * Uncached kpm mappings must always
1012 * have forced "small page" mode.
1014 panic("sfmmu_kpm_mapout: uncached "
1015 "page not kpm marked");
1017 conv_tnc(pp
, TTE8K
);
1021 pmtx
= sfmmu_page_enter(pp
);
1023 sfmmu_page_exit(pmtx
);
1026 if (PP_ISKPMC(pp
)) {
1027 if (kp
->kp_refcntc
< 1) {
1028 panic("sfmmu_kpm_mapout: bad refcntc kp=%p",
1031 pmtx
= sfmmu_page_enter(pp
);
1033 sfmmu_page_exit(pmtx
);
1037 if (kp
->kp_refcnt
-- < 1)
1038 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp
);
1041 mutex_exit(&kpmp
->khl_mutex
);
1046 kpmsp
= KPMP_SHASH(ksp
);
1048 if (PP_ISKPMC(pp
) == 0) {
1049 oldval
= sfmmu_kpm_stsbmtl(&ksp
->kp_mapped_flag
,
1050 &kpmsp
->kshl_lock
, 0);
1052 if (oldval
!= KPM_MAPPEDS
) {
1054 * When we're called after sfmmu_kpm_hme_unload,
1055 * KPM_MAPPEDSC is valid too.
1057 if (oldval
!= KPM_MAPPEDSC
)
1058 panic("sfmmu_kpm_mapout: incorrect mapping");
1061 /* remove TSB entry */
1062 sfmmu_kpm_unload_tsb(vaddr
, MMU_PAGESHIFT
);
1065 sfmmu_kpm_demap_tlbs(vaddr
);
1068 } else if (PP_ISTNC(pp
)) {
1069 oldval
= sfmmu_kpm_stsbmtl(&ksp
->kp_mapped_flag
,
1070 &kpmsp
->kshl_lock
, 0);
1072 if (oldval
!= KPM_MAPPEDSC
|| PP_ISKPMC(pp
) == 0)
1073 panic("sfmmu_kpm_mapout: inconsistent TNC mapping");
1075 sfmmu_kpm_demap_small(vaddr
);
1077 pmtx
= sfmmu_page_enter(pp
);
1079 sfmmu_page_exit(pmtx
);
1082 * Check if we can resume cached mode. This might be
1083 * the case if the kpm mapping was the only mapping
1084 * in conflict with other non rule compliant mappings.
1085 * The page is no more marked as kpm mapped, so the
1086 * conv_tnc path will not change the kpm state.
1088 conv_tnc(pp
, TTE8K
);
1091 oldval
= sfmmu_kpm_stsbmtl(&ksp
->kp_mapped_flag
,
1092 &kpmsp
->kshl_lock
, 0);
1094 if (oldval
!= KPM_MAPPEDSC
)
1095 panic("sfmmu_kpm_mapout: inconsistent mapping");
1097 pmtx
= sfmmu_page_enter(pp
);
1099 sfmmu_page_exit(pmtx
);
1103 #define abs(x) ((x) < 0 ? -(x) : (x))
1106 * Determine appropriate kpm mapping address and handle any kpm/hme
1107 * conflicts. Page mapping list and its vcolor parts must be protected.
1110 sfmmu_kpm_getvaddr(page_t
*pp
, int *kpm_vac_rangep
)
1112 int vcolor
, vcolor_pa
;
1117 ASSERT(sfmmu_mlist_held(pp
));
1119 paddr
= ptob(pp
->p_pagenum
);
1120 vcolor_pa
= addr_to_vcolor(paddr
);
1122 if (pp
->p_vnode
&& IS_SWAPFSVP(pp
->p_vnode
)) {
1123 vcolor
= (PP_NEWPAGE(pp
) || PP_ISNC(pp
)) ?
1124 vcolor_pa
: PP_GET_VCOLOR(pp
);
1126 vcolor
= addr_to_vcolor(pp
->p_offset
);
1129 vaddr
= kpm_vbase
+ paddr
;
1130 *kpm_vac_rangep
= 0;
1132 if (vcolor_pa
!= vcolor
) {
1133 *kpm_vac_rangep
= abs(vcolor
- vcolor_pa
);
1134 vaddr
+= ((uintptr_t)(vcolor
- vcolor_pa
) << MMU_PAGESHIFT
);
1135 vaddr
+= (vcolor_pa
> vcolor
) ?
1136 ((uintptr_t)vcolor_pa
<< kpm_size_shift
) :
1137 ((uintptr_t)(vcolor
- vcolor_pa
) << kpm_size_shift
);
1139 ASSERT(!PP_ISMAPPED_LARGE(pp
));
1145 if (PP_NEWPAGE(pp
)) {
1146 PP_SET_VCOLOR(pp
, vcolor
);
1150 if (PP_GET_VCOLOR(pp
) == vcolor
)
1153 ASSERT(!PP_ISMAPPED_KPM(pp
));
1154 sfmmu_kpm_vac_conflict(pp
, vaddr
);
1160 * VAC conflict state bit values.
1161 * The following defines are used to make the handling of the
1162 * various input states more concise. For that the kpm states
1163 * per kpm_page and per page are combined in a summary state.
1164 * Each single state has a corresponding bit value in the
1165 * summary state. These defines only apply for kpm large page
1166 * mappings. Within comments the abbreviations "kc, c, ks, s"
1167 * are used as short form of the actual state, e.g. "kc" for
1168 * "kp_refcntc > 0", etc.
1170 #define KPM_KC 0x00000008 /* kpm_page: kp_refcntc > 0 */
1171 #define KPM_C 0x00000004 /* page: P_KPMC set */
1172 #define KPM_KS 0x00000002 /* kpm_page: kp_refcnts > 0 */
1173 #define KPM_S 0x00000001 /* page: P_KPMS set */
1176 * Summary states used in sfmmu_kpm_fault (KPM_TSBM_*).
1177 * See also more detailed comments within in the sfmmu_kpm_fault switch.
1178 * Abbreviations used:
1179 * CONFL: VAC conflict(s) within a kpm_page.
1180 * MAPS: Mapped small: Page mapped in using a regular page size kpm mapping.
1181 * RASM: Re-assembling of a large page mapping possible.
1182 * RPLS: Replace: TSB miss due to TSB replacement only.
1183 * BRKO: Breakup Other: A large kpm mapping has to be broken because another
1184 * page within the kpm_page is already involved in a VAC conflict.
1185 * BRKT: Breakup This: A large kpm mapping has to be broken, this page is
1186 * is involved in a VAC conflict.
1188 #define KPM_TSBM_CONFL_GONE (0)
1189 #define KPM_TSBM_MAPS_RASM (KPM_KS)
1190 #define KPM_TSBM_RPLS_RASM (KPM_KS | KPM_S)
1191 #define KPM_TSBM_MAPS_BRKO (KPM_KC)
1192 #define KPM_TSBM_MAPS (KPM_KC | KPM_KS)
1193 #define KPM_TSBM_RPLS (KPM_KC | KPM_KS | KPM_S)
1194 #define KPM_TSBM_MAPS_BRKT (KPM_KC | KPM_C)
1195 #define KPM_TSBM_MAPS_CONFL (KPM_KC | KPM_C | KPM_KS)
1196 #define KPM_TSBM_RPLS_CONFL (KPM_KC | KPM_C | KPM_KS | KPM_S)
1199 * kpm fault handler for mappings with large page size.
1202 sfmmu_kpm_fault(caddr_t vaddr
, struct memseg
*mseg
, page_t
*pp
)
1208 pfn_t pfn
= pp
->p_pagenum
;
1217 alias_range
= IS_KPM_ALIAS_RANGE(vaddr
);
1219 inx
= ptokpmp(kpmptop(ptokpmp(pfn
)) - mseg
->kpm_pbase
);
1220 if (inx
>= mseg
->kpm_nkpmpgs
) {
1221 cmn_err(CE_PANIC
, "sfmmu_kpm_fault: kpm overflow in memseg "
1222 "0x%p pp 0x%p", (void *)mseg
, (void *)pp
);
1225 kp
= &mseg
->kpm_pages
[inx
];
1226 kpmp
= KPMP_HASH(kp
);
1228 pml
= sfmmu_mlist_enter(pp
);
1230 if (!PP_ISMAPPED_KPM(pp
)) {
1231 sfmmu_mlist_exit(pml
);
1235 mutex_enter(&kpmp
->khl_mutex
);
1238 ASSERT(!PP_ISMAPPED_LARGE(pp
));
1239 if (kp
->kp_refcnta
> 0) {
1240 if (PP_ISKPMC(pp
)) {
1241 pmtx
= sfmmu_page_enter(pp
);
1243 sfmmu_page_exit(pmtx
);
1246 * Check for vcolor conflicts. Return here
1247 * w/ either no conflict (fast path), removed hme
1248 * mapping chains (unload conflict) or uncached
1249 * (uncache conflict). VACaches are cleaned and
1250 * p_vcolor and PP_TNC are set accordingly for the
1251 * conflict cases. Drop kpmp for uncache conflict
1252 * cases since it will be grabbed within
1253 * sfmmu_kpm_page_cache in case of an uncache
1256 mutex_exit(&kpmp
->khl_mutex
);
1257 sfmmu_kpm_vac_conflict(pp
, vaddr
);
1258 mutex_enter(&kpmp
->khl_mutex
);
1262 pmtx
= sfmmu_page_enter(pp
);
1264 sfmmu_page_exit(pmtx
);
1270 * We got a tsbmiss on a not active kpm_page range.
1271 * Let segkpm_fault decide how to panic.
1278 badstate
= (kp
->kp_refcnt
< 0 || kp
->kp_refcnts
< 0);
1279 if (kp
->kp_refcntc
== -1) {
1281 * We should come here only if trap level tsb miss
1282 * handler is disabled.
1284 badstate
|= (kp
->kp_refcnt
== 0 || kp
->kp_refcnts
> 0 ||
1285 PP_ISKPMC(pp
) || PP_ISKPMS(pp
) || PP_ISNC(pp
));
1291 if (badstate
|| kp
->kp_refcntc
< 0)
1295 * Combine the per kpm_page and per page kpm VAC states to
1296 * a summary state in order to make the kpm fault handling
1299 tsbmcase
= (((kp
->kp_refcntc
> 0) ? KPM_KC
: 0) |
1300 ((kp
->kp_refcnts
> 0) ? KPM_KS
: 0) |
1301 (PP_ISKPMC(pp
) ? KPM_C
: 0) |
1302 (PP_ISKPMS(pp
) ? KPM_S
: 0));
1305 case KPM_TSBM_CONFL_GONE
: /* - - - - */
1307 * That's fine, we either have no more vac conflict in
1308 * this kpm page or someone raced in and has solved the
1309 * vac conflict for us -- call sfmmu_kpm_vac_conflict
1310 * to take care for correcting the vcolor and flushing
1311 * the dcache if required.
1313 mutex_exit(&kpmp
->khl_mutex
);
1314 sfmmu_kpm_vac_conflict(pp
, vaddr
);
1315 mutex_enter(&kpmp
->khl_mutex
);
1317 if (PP_ISNC(pp
) || kp
->kp_refcnt
<= 0 ||
1318 addr_to_vcolor(vaddr
) != PP_GET_VCOLOR(pp
)) {
1319 panic("sfmmu_kpm_fault: inconsistent CONFL_GONE "
1320 "state, pp=%p", (void *)pp
);
1324 case KPM_TSBM_MAPS_RASM
: /* - - ks - */
1326 * All conflicts in this kpm page are gone but there are
1327 * already small mappings around, so we also map this
1328 * page small. This could be the trigger case for a
1329 * small mapping reaper, if this is really needed.
1330 * For now fall thru to the KPM_TSBM_MAPS handling.
1333 case KPM_TSBM_MAPS
: /* kc - ks - */
1335 * Large page mapping is already broken, this page is not
1336 * conflicting, so map it small. Call sfmmu_kpm_vac_conflict
1337 * to take care for correcting the vcolor and flushing
1338 * the dcache if required.
1340 mutex_exit(&kpmp
->khl_mutex
);
1341 sfmmu_kpm_vac_conflict(pp
, vaddr
);
1342 mutex_enter(&kpmp
->khl_mutex
);
1344 if (PP_ISNC(pp
) || kp
->kp_refcnt
<= 0 ||
1345 addr_to_vcolor(vaddr
) != PP_GET_VCOLOR(pp
)) {
1346 panic("sfmmu_kpm_fault: inconsistent MAPS state, "
1347 "pp=%p", (void *)pp
);
1351 pmtx
= sfmmu_page_enter(pp
);
1353 sfmmu_page_exit(pmtx
);
1356 case KPM_TSBM_RPLS_RASM
: /* - - ks s */
1358 * All conflicts in this kpm page are gone but this page
1359 * is mapped small. This could be the trigger case for a
1360 * small mapping reaper, if this is really needed.
1361 * For now we drop it in small again. Fall thru to the
1362 * KPM_TSBM_RPLS handling.
1365 case KPM_TSBM_RPLS
: /* kc - ks s */
1367 * Large page mapping is already broken, this page is not
1368 * conflicting but already mapped small, so drop it in
1372 addr_to_vcolor(vaddr
) != PP_GET_VCOLOR(pp
)) {
1373 panic("sfmmu_kpm_fault: inconsistent RPLS state, "
1374 "pp=%p", (void *)pp
);
1378 case KPM_TSBM_MAPS_BRKO
: /* kc - - - */
1380 * The kpm page where we live in is marked conflicting
1381 * but this page is not conflicting. So we have to map it
1382 * in small. Call sfmmu_kpm_vac_conflict to take care for
1383 * correcting the vcolor and flushing the dcache if required.
1385 mutex_exit(&kpmp
->khl_mutex
);
1386 sfmmu_kpm_vac_conflict(pp
, vaddr
);
1387 mutex_enter(&kpmp
->khl_mutex
);
1389 if (PP_ISNC(pp
) || kp
->kp_refcnt
<= 0 ||
1390 addr_to_vcolor(vaddr
) != PP_GET_VCOLOR(pp
)) {
1391 panic("sfmmu_kpm_fault: inconsistent MAPS_BRKO state, "
1392 "pp=%p", (void *)pp
);
1396 pmtx
= sfmmu_page_enter(pp
);
1398 sfmmu_page_exit(pmtx
);
1401 case KPM_TSBM_MAPS_BRKT
: /* kc c - - */
1402 case KPM_TSBM_MAPS_CONFL
: /* kc c ks - */
1403 if (!PP_ISMAPPED(pp
)) {
1405 * We got a tsbmiss on kpm large page range that is
1406 * marked to contain vac conflicting pages introduced
1407 * by hme mappings. The hme mappings are all gone and
1408 * must have bypassed the kpm alias prevention logic.
1410 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
1415 * Check for vcolor conflicts. Return here w/ either no
1416 * conflict (fast path), removed hme mapping chains
1417 * (unload conflict) or uncached (uncache conflict).
1418 * Dcache is cleaned and p_vcolor and P_TNC are set
1419 * accordingly. Drop kpmp for uncache conflict cases
1420 * since it will be grabbed within sfmmu_kpm_page_cache
1421 * in case of an uncache conflict.
1423 mutex_exit(&kpmp
->khl_mutex
);
1424 sfmmu_kpm_vac_conflict(pp
, vaddr
);
1425 mutex_enter(&kpmp
->khl_mutex
);
1427 if (kp
->kp_refcnt
<= 0)
1428 panic("sfmmu_kpm_fault: bad refcnt kp=%p", (void *)kp
);
1434 * When an unload conflict is solved and there are
1435 * no other small mappings around, we can resume
1436 * largepage mode. Otherwise we have to map or drop
1437 * in small. This could be a trigger for a small
1438 * mapping reaper when this was the last conflict
1439 * within the kpm page and when there are only
1440 * other small mappings around.
1442 ASSERT(addr_to_vcolor(vaddr
) == PP_GET_VCOLOR(pp
));
1443 ASSERT(kp
->kp_refcntc
> 0);
1445 pmtx
= sfmmu_page_enter(pp
);
1447 sfmmu_page_exit(pmtx
);
1448 ASSERT(PP_ISKPMS(pp
) == 0);
1449 if (kp
->kp_refcntc
== 0 && kp
->kp_refcnts
== 0)
1455 pmtx
= sfmmu_page_enter(pp
);
1457 sfmmu_page_exit(pmtx
);
1460 case KPM_TSBM_RPLS_CONFL
: /* kc c ks s */
1461 if (!PP_ISMAPPED(pp
)) {
1463 * We got a tsbmiss on kpm large page range that is
1464 * marked to contain vac conflicting pages introduced
1465 * by hme mappings. They are all gone and must have
1466 * somehow bypassed the kpm alias prevention logic.
1468 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
1473 * This state is only possible for an uncached mapping.
1476 panic("sfmmu_kpm_fault: page not uncached, pp=%p",
1484 panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p "
1485 "pp=%p", (void *)vaddr
, (void *)kp
, (void *)pp
);
1491 KPM_TTE_VCACHED(tte
.ll
, pfn
, TTE8K
);
1493 KPM_TTE_VUNCACHED(tte
.ll
, pfn
, TTE8K
);
1496 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT
);
1502 if (kp
->kp_refcnt
> 0) {
1505 KPM_TTE_VCACHED(tte
.ll
, pfn
, TTE4M
);
1508 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT4M
);
1510 if (kp
->kp_refcntc
== 0) {
1511 /* Set "go" flag for TL tsbmiss handler */
1512 sfmmu_kpm_tsbmtl(&kp
->kp_refcntc
, &kpmp
->khl_lock
,
1515 ASSERT(kp
->kp_refcntc
== -1);
1521 mutex_exit(&kpmp
->khl_mutex
);
1522 sfmmu_mlist_exit(pml
);
1527 * kpm fault handler for mappings with small page size.
1530 sfmmu_kpm_fault_small(caddr_t vaddr
, struct memseg
*mseg
, page_t
*pp
)
1537 pfn_t pfn
= pp
->p_pagenum
;
1542 inx
= pfn
- mseg
->kpm_pbase
;
1543 ksp
= &mseg
->kpm_spages
[inx
];
1544 kpmsp
= KPMP_SHASH(ksp
);
1546 pml
= sfmmu_mlist_enter(pp
);
1548 if (!PP_ISMAPPED_KPM(pp
)) {
1549 sfmmu_mlist_exit(pml
);
1554 * kp_mapped lookup protected by mlist mutex
1556 if (ksp
->kp_mapped
== KPM_MAPPEDS
) {
1560 ASSERT(!PP_ISKPMC(pp
));
1561 ASSERT(!PP_ISNC(pp
));
1564 KPM_TTE_VCACHED(tte
.ll
, pfn
, TTE8K
);
1567 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT
);
1569 } else if (ksp
->kp_mapped
== KPM_MAPPEDSC
) {
1571 * Got here due to existing or gone kpm/hme VAC conflict.
1572 * Recheck for vcolor conflicts. Return here w/ either
1573 * no conflict, removed hme mapping chain (unload
1574 * conflict) or uncached (uncache conflict). VACaches
1575 * are cleaned and p_vcolor and PP_TNC are set accordingly
1576 * for the conflict cases.
1578 sfmmu_kpm_vac_conflict(pp
, vaddr
);
1581 /* ASSERT(pp->p_share); XXX use hat_page_getshare */
1584 KPM_TTE_VUNCACHED(tte
.ll
, pfn
, TTE8K
);
1587 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT
);
1589 oldval
= sfmmu_kpm_stsbmtl(&ksp
->kp_mapped_flag
,
1590 &kpmsp
->kshl_lock
, (KPM_MAPPED_GO
| KPM_MAPPEDSC
));
1592 if (oldval
!= KPM_MAPPEDSC
)
1593 panic("sfmmu_kpm_fault_small: "
1594 "stale smallpages mapping");
1596 if (PP_ISKPMC(pp
)) {
1597 pmtx
= sfmmu_page_enter(pp
);
1599 sfmmu_page_exit(pmtx
);
1603 KPM_TTE_VCACHED(tte
.ll
, pfn
, TTE8K
);
1606 sfmmu_kpm_load_tsb(vaddr
, &tte
, MMU_PAGESHIFT
);
1608 oldval
= sfmmu_kpm_stsbmtl(&ksp
->kp_mapped_flag
,
1609 &kpmsp
->kshl_lock
, (KPM_MAPPED_GO
| KPM_MAPPEDS
));
1611 if (oldval
!= KPM_MAPPEDSC
)
1612 panic("sfmmu_kpm_fault_small: "
1613 "stale smallpages mapping");
1618 * We got a tsbmiss on a not active kpm_page range.
1619 * Let decide segkpm_fault how to panic.
1624 sfmmu_mlist_exit(pml
);
1629 * Check/handle potential hme/kpm mapping conflicts
1632 sfmmu_kpm_vac_conflict(page_t
*pp
, caddr_t vaddr
)
1635 struct sf_hment
*sfhmep
;
1637 struct sf_hment
*tmphme
= NULL
;
1638 struct hme_blk
*hmeblkp
;
1641 ASSERT(sfmmu_mlist_held(pp
));
1646 vcolor
= addr_to_vcolor(vaddr
);
1647 if (PP_GET_VCOLOR(pp
) == vcolor
)
1651 * There could be no vcolor conflict between a large cached
1652 * hme page and a non alias range kpm page (neither large nor
1653 * small mapped). So if a hme conflict already exists between
1654 * a constituent page of a large hme mapping and a shared small
1655 * conflicting hme mapping, both mappings must be already
1656 * uncached at this point.
1658 ASSERT(!PP_ISMAPPED_LARGE(pp
));
1660 if (!PP_ISMAPPED(pp
)) {
1662 * Previous hme user of page had a different color
1663 * but since there are no current users
1664 * we just flush the cache and change the color.
1666 SFMMU_STAT(sf_pgcolor_conflict
);
1667 sfmmu_cache_flush(pp
->p_pagenum
, PP_GET_VCOLOR(pp
));
1668 PP_SET_VCOLOR(pp
, vcolor
);
1673 * If we get here we have a vac conflict with a current hme
1674 * mapping. This must have been established by forcing a wrong
1675 * colored mapping, e.g. by using mmap(2) with MAP_FIXED.
1679 * Check if any mapping is in same as or if it is locked
1680 * since in that case we need to uncache.
1682 for (sfhmep
= pp
->p_mapping
; sfhmep
; sfhmep
= tmphme
) {
1683 tmphme
= sfhmep
->hme_next
;
1684 if (IS_PAHME(sfhmep
))
1686 hmeblkp
= sfmmu_hmetohblk(sfhmep
);
1687 tmphat
= hblktosfmmu(hmeblkp
);
1688 sfmmu_copytte(&sfhmep
->hme_tte
, &tte
);
1689 ASSERT(TTE_IS_VALID(&tte
));
1690 if ((tmphat
== ksfmmup
) || hmeblkp
->hblk_lckcnt
) {
1692 * We have an uncache conflict
1694 SFMMU_STAT(sf_uncache_conflict
);
1695 sfmmu_page_cache_array(pp
, HAT_TMPNC
, CACHE_FLUSH
, 1);
1701 * We have an unload conflict
1703 SFMMU_STAT(sf_unload_conflict
);
1705 for (sfhmep
= pp
->p_mapping
; sfhmep
; sfhmep
= tmphme
) {
1706 tmphme
= sfhmep
->hme_next
;
1707 if (IS_PAHME(sfhmep
))
1709 hmeblkp
= sfmmu_hmetohblk(sfhmep
);
1710 (void) sfmmu_pageunload(pp
, sfhmep
, TTE8K
);
1714 * Unloads only does tlb flushes so we need to flush the
1715 * dcache vcolor here.
1717 sfmmu_cache_flush(pp
->p_pagenum
, PP_GET_VCOLOR(pp
));
1718 PP_SET_VCOLOR(pp
, vcolor
);
1722 * Remove all kpm mappings using kpme's for pp and check that
1723 * all kpm mappings (w/ and w/o kpme's) are gone.
1726 sfmmu_kpm_pageunload(page_t
*pp
)
1729 struct kpme
*kpme
, *nkpme
;
1732 ASSERT(pp
->p_kpmref
);
1733 ASSERT(sfmmu_mlist_held(pp
));
1735 vaddr
= hat_kpm_page2va(pp
, 1);
1737 for (kpme
= pp
->p_kpmelist
; kpme
; kpme
= nkpme
) {
1738 ASSERT(kpme
->kpe_page
== pp
);
1740 if (pp
->p_kpmref
== 0)
1741 panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p "
1742 "kpme=%p", (void *)pp
, (void *)kpme
);
1744 nkpme
= kpme
->kpe_next
;
1746 /* Add instance callback here here if needed later */
1747 sfmmu_kpme_sub(kpme
, pp
);
1751 * Also correct after mixed kpme/nonkpme mappings. If nonkpme
1752 * segkpm clients have unlocked the page and forgot to mapout
1755 if (pp
->p_kpmref
!= 0)
1756 panic("sfmmu_kpm_pageunload: bad refcnt pp=%p", (void *)pp
);
1758 sfmmu_kpm_mapout(pp
, vaddr
);
1762 * Remove a large kpm mapping from kernel TSB and all TLB's.
1765 sfmmu_kpm_demap_large(caddr_t vaddr
)
1767 sfmmu_kpm_unload_tsb(vaddr
, MMU_PAGESHIFT4M
);
1768 sfmmu_kpm_demap_tlbs(vaddr
);
1772 * Remove a small kpm mapping from kernel TSB and all TLB's.
1775 sfmmu_kpm_demap_small(caddr_t vaddr
)
1777 sfmmu_kpm_unload_tsb(vaddr
, MMU_PAGESHIFT
);
1778 sfmmu_kpm_demap_tlbs(vaddr
);
1782 * Demap a kpm mapping in all TLB's.
1785 sfmmu_kpm_demap_tlbs(caddr_t vaddr
)
1790 cpuset
= ksfmmup
->sfmmu_cpusran
;
1791 CPUSET_AND(cpuset
, cpu_ready_set
);
1792 CPUSET_DEL(cpuset
, CPU
->cpu_id
);
1793 SFMMU_XCALL_STATS(ksfmmup
);
1795 xt_some(cpuset
, vtag_flushpage_tl1
, (uint64_t)vaddr
,
1797 vtag_flushpage(vaddr
, (uint64_t)ksfmmup
);
1803 * Summary states used in sfmmu_kpm_vac_unload (KPM_VUL__*).
1804 * See also more detailed comments within in the sfmmu_kpm_vac_unload switch.
1805 * Abbreviations used:
1806 * BIG: Large page kpm mapping in use.
1807 * CONFL: VAC conflict(s) within a kpm_page.
1808 * INCR: Count of conflicts within a kpm_page is going to be incremented.
1809 * DECR: Count of conflicts within a kpm_page is going to be decremented.
1810 * UNMAP_SMALL: A small (regular page size) mapping is going to be unmapped.
1811 * TNC: Temporary non cached: a kpm mapped page is mapped in TNC state.
1813 #define KPM_VUL_BIG (0)
1814 #define KPM_VUL_CONFL_INCR1 (KPM_KS)
1815 #define KPM_VUL_UNMAP_SMALL1 (KPM_KS | KPM_S)
1816 #define KPM_VUL_CONFL_INCR2 (KPM_KC)
1817 #define KPM_VUL_CONFL_INCR3 (KPM_KC | KPM_KS)
1818 #define KPM_VUL_UNMAP_SMALL2 (KPM_KC | KPM_KS | KPM_S)
1819 #define KPM_VUL_CONFL_DECR1 (KPM_KC | KPM_C)
1820 #define KPM_VUL_CONFL_DECR2 (KPM_KC | KPM_C | KPM_KS)
1821 #define KPM_VUL_TNC (KPM_KC | KPM_C | KPM_KS | KPM_S)
1824 * Handle VAC unload conflicts introduced by hme mappings or vice
1825 * versa when a hme conflict mapping is replaced by a non conflict
1826 * one. Perform actions and state transitions according to the
1827 * various page and kpm_page entry states. VACache flushes are in
1828 * the responsibiliy of the caller. We still hold the mlist lock.
1831 sfmmu_kpm_vac_unload(page_t
*pp
, caddr_t vaddr
)
1835 caddr_t kpmvaddr
= hat_kpm_page2va(pp
, 1);
1843 ASSERT(PAGE_LOCKED(pp
));
1844 ASSERT(sfmmu_mlist_held(pp
));
1845 ASSERT(!PP_ISNC(pp
));
1847 newcolor
= addr_to_vcolor(kpmvaddr
) != addr_to_vcolor(vaddr
);
1849 goto smallpages_vac_unload
;
1852 kpmp
= KPMP_HASH(kp
);
1853 mutex_enter(&kpmp
->khl_mutex
);
1855 if (IS_KPM_ALIAS_RANGE(kpmvaddr
)) {
1856 if (kp
->kp_refcnta
< 1) {
1857 panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n",
1861 if (PP_ISKPMC(pp
) == 0) {
1864 sfmmu_kpm_demap_small(kpmvaddr
);
1865 pmtx
= sfmmu_page_enter(pp
);
1867 sfmmu_page_exit(pmtx
);
1869 } else if (newcolor
== 0) {
1870 pmtx
= sfmmu_page_enter(pp
);
1872 sfmmu_page_exit(pmtx
);
1881 badstate
= (kp
->kp_refcnt
< 0 || kp
->kp_refcnts
< 0);
1882 if (kp
->kp_refcntc
== -1) {
1884 * We should come here only if trap level tsb miss
1885 * handler is disabled.
1887 badstate
|= (kp
->kp_refcnt
== 0 || kp
->kp_refcnts
> 0 ||
1888 PP_ISKPMC(pp
) || PP_ISKPMS(pp
) || PP_ISNC(pp
));
1890 badstate
|= (kp
->kp_refcntc
< 0);
1896 if (PP_ISKPMC(pp
) == 0 && newcolor
== 0) {
1897 ASSERT(PP_ISKPMS(pp
) == 0);
1902 * Combine the per kpm_page and per page kpm VAC states
1903 * to a summary state in order to make the vac unload
1904 * handling more concise.
1906 vacunlcase
= (((kp
->kp_refcntc
> 0) ? KPM_KC
: 0) |
1907 ((kp
->kp_refcnts
> 0) ? KPM_KS
: 0) |
1908 (PP_ISKPMC(pp
) ? KPM_C
: 0) |
1909 (PP_ISKPMS(pp
) ? KPM_S
: 0));
1911 switch (vacunlcase
) {
1912 case KPM_VUL_BIG
: /* - - - - */
1914 * Have to breakup the large page mapping to be
1915 * able to handle the conflicting hme vaddr.
1917 if (kp
->kp_refcntc
== -1) {
1918 /* remove go indication */
1919 sfmmu_kpm_tsbmtl(&kp
->kp_refcntc
,
1920 &kpmp
->khl_lock
, KPMTSBM_STOP
);
1922 sfmmu_kpm_demap_large(kpmvaddr
);
1924 ASSERT(kp
->kp_refcntc
== 0);
1926 pmtx
= sfmmu_page_enter(pp
);
1928 sfmmu_page_exit(pmtx
);
1931 case KPM_VUL_UNMAP_SMALL1
: /* - - ks s */
1932 case KPM_VUL_UNMAP_SMALL2
: /* kc - ks s */
1934 * New conflict w/ an active kpm page, actually mapped
1935 * in by small TSB/TLB entries. Remove the mapping and
1939 sfmmu_kpm_demap_small(kpmvaddr
);
1943 pmtx
= sfmmu_page_enter(pp
);
1946 sfmmu_page_exit(pmtx
);
1949 case KPM_VUL_CONFL_INCR1
: /* - - ks - */
1950 case KPM_VUL_CONFL_INCR2
: /* kc - - - */
1951 case KPM_VUL_CONFL_INCR3
: /* kc - ks - */
1953 * New conflict on a active kpm mapped page not yet in
1954 * TSB/TLB. Mark page and increment the kpm_page conflict
1959 pmtx
= sfmmu_page_enter(pp
);
1961 sfmmu_page_exit(pmtx
);
1964 case KPM_VUL_CONFL_DECR1
: /* kc c - - */
1965 case KPM_VUL_CONFL_DECR2
: /* kc c ks - */
1967 * A conflicting hme mapping is removed for an active
1968 * kpm page not yet in TSB/TLB. Unmark page and decrement
1969 * the kpm_page conflict count.
1971 ASSERT(newcolor
== 0);
1973 pmtx
= sfmmu_page_enter(pp
);
1975 sfmmu_page_exit(pmtx
);
1978 case KPM_VUL_TNC
: /* kc c ks s */
1979 cmn_err(CE_NOTE
, "sfmmu_kpm_vac_unload: "
1980 "page not in NC state");
1988 panic("sfmmu_kpm_vac_unload: inconsistent VAC state, "
1989 "kpmvaddr=%p kp=%p pp=%p",
1990 (void *)kpmvaddr
, (void *)kp
, (void *)pp
);
1992 mutex_exit(&kpmp
->khl_mutex
);
1996 smallpages_vac_unload
:
2001 kpmsp
= KPMP_SHASH(ksp
);
2003 if (PP_ISKPMC(pp
) == 0) {
2004 if (ksp
->kp_mapped
== KPM_MAPPEDS
) {
2006 * Stop TL tsbmiss handling
2008 (void) sfmmu_kpm_stsbmtl(&ksp
->kp_mapped_flag
,
2009 &kpmsp
->kshl_lock
, KPM_MAPPEDSC
);
2011 sfmmu_kpm_demap_small(kpmvaddr
);
2013 } else if (ksp
->kp_mapped
!= KPM_MAPPEDSC
) {
2014 panic("sfmmu_kpm_vac_unload: inconsistent mapping");
2017 pmtx
= sfmmu_page_enter(pp
);
2019 sfmmu_page_exit(pmtx
);
2022 if (ksp
->kp_mapped
!= KPM_MAPPEDSC
)
2023 panic("sfmmu_kpm_vac_unload: inconsistent mapping");
2028 * Page is marked to be in VAC conflict to an existing kpm mapping
2029 * or is kpm mapped using only the regular pagesize. Called from
2030 * sfmmu_hblk_unload when a mlist is completely removed.
2033 sfmmu_kpm_hme_unload(page_t
*pp
)
2043 ASSERT(sfmmu_mlist_held(pp
));
2044 ASSERT(PP_ISMAPPED_KPM(pp
));
2046 flags
= pp
->p_nrm
& (P_KPMC
| P_KPMS
);
2048 goto smallpages_hme_unload
;
2050 if (flags
== (P_KPMC
| P_KPMS
)) {
2051 panic("sfmmu_kpm_hme_unload: page should be uncached");
2053 } else if (flags
== P_KPMS
) {
2055 * Page mapped small but not involved in VAC conflict
2060 vaddr
= hat_kpm_page2va(pp
, 1);
2063 kpmp
= KPMP_HASH(kp
);
2064 mutex_enter(&kpmp
->khl_mutex
);
2066 if (IS_KPM_ALIAS_RANGE(vaddr
)) {
2067 if (kp
->kp_refcnta
< 1) {
2068 panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n",
2072 if (kp
->kp_refcntc
< 1) {
2073 panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n",
2079 pmtx
= sfmmu_page_enter(pp
);
2081 sfmmu_page_exit(pmtx
);
2083 mutex_exit(&kpmp
->khl_mutex
);
2086 smallpages_hme_unload
:
2087 if (flags
!= P_KPMC
)
2088 panic("sfmmu_kpm_hme_unload: page should be uncached");
2090 vaddr
= hat_kpm_page2va(pp
, 1);
2093 if (ksp
->kp_mapped
!= KPM_MAPPEDSC
)
2094 panic("sfmmu_kpm_hme_unload: inconsistent mapping");
2097 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
2098 * prevents TL tsbmiss handling and force a hat_kpm_fault.
2099 * There we can start over again.
2102 pmtx
= sfmmu_page_enter(pp
);
2104 sfmmu_page_exit(pmtx
);
2108 * Special hooks for sfmmu_page_cache_array() when changing the
2109 * cacheability of a page. It is used to obey the hat_kpm lock
2110 * ordering (mlist -> kpmp -> spl, and back).
2113 sfmmu_kpm_kpmp_enter(page_t
*pp
, pgcnt_t npages
)
2118 ASSERT(sfmmu_mlist_held(pp
));
2120 if (kpm_smallpages
|| PP_ISMAPPED_KPM(pp
) == 0)
2123 ASSERT(npages
<= kpmpnpgs
);
2126 kpmp
= KPMP_HASH(kp
);
2127 mutex_enter(&kpmp
->khl_mutex
);
2133 sfmmu_kpm_kpmp_exit(kpm_hlk_t
*kpmp
)
2135 if (kpm_smallpages
|| kpmp
== NULL
)
2138 mutex_exit(&kpmp
->khl_mutex
);
2142 * Summary states used in sfmmu_kpm_page_cache (KPM_*).
2143 * See also more detailed comments within in the sfmmu_kpm_page_cache switch.
2144 * Abbreviations used:
2145 * UNC: Input state for an uncache request.
2146 * BIG: Large page kpm mapping in use.
2147 * SMALL: Page has a small kpm mapping within a kpm_page range.
2148 * NODEMAP: No demap needed.
2149 * NOP: No operation needed on this input state.
2150 * CACHE: Input state for a re-cache request.
2151 * MAPS: Page is in TNC and kpm VAC conflict state and kpm mapped small.
2152 * NOMAP: Page is in TNC and kpm VAC conflict state, but not small kpm
2154 * NOMAPO: Page is in TNC and kpm VAC conflict state, but not small kpm
2155 * mapped. There are also other small kpm mappings within this
2158 #define KPM_UNC_BIG (0)
2159 #define KPM_UNC_NODEMAP1 (KPM_KS)
2160 #define KPM_UNC_SMALL1 (KPM_KS | KPM_S)
2161 #define KPM_UNC_NODEMAP2 (KPM_KC)
2162 #define KPM_UNC_NODEMAP3 (KPM_KC | KPM_KS)
2163 #define KPM_UNC_SMALL2 (KPM_KC | KPM_KS | KPM_S)
2164 #define KPM_UNC_NOP1 (KPM_KC | KPM_C)
2165 #define KPM_UNC_NOP2 (KPM_KC | KPM_C | KPM_KS)
2166 #define KPM_CACHE_NOMAP (KPM_KC | KPM_C)
2167 #define KPM_CACHE_NOMAPO (KPM_KC | KPM_C | KPM_KS)
2168 #define KPM_CACHE_MAPS (KPM_KC | KPM_C | KPM_KS | KPM_S)
2171 * This function is called when the virtual cacheability of a page
2172 * is changed and the page has an actice kpm mapping. The mlist mutex,
2173 * the spl hash lock and the kpmp mutex (if needed) are already grabbed.
2177 sfmmu_kpm_page_cache(page_t
*pp
, int flags
, int cache_flush_tag
)
2188 ASSERT(PP_ISMAPPED_KPM(pp
));
2189 ASSERT(sfmmu_mlist_held(pp
));
2190 ASSERT(sfmmu_page_spl_held(pp
));
2192 if (flags
!= HAT_TMPNC
&& flags
!= HAT_CACHE
)
2193 panic("sfmmu_kpm_page_cache: bad flags");
2195 kpmvaddr
= hat_kpm_page2va(pp
, 1);
2197 if (flags
== HAT_TMPNC
&& cache_flush_tag
== CACHE_FLUSH
) {
2198 pfn_t pfn
= pp
->p_pagenum
;
2199 int vcolor
= addr_to_vcolor(kpmvaddr
);
2200 cpuset_t cpuset
= cpu_ready_set
;
2202 /* Flush vcolor in DCache */
2203 CPUSET_DEL(cpuset
, CPU
->cpu_id
);
2204 SFMMU_XCALL_STATS(ksfmmup
);
2205 xt_some(cpuset
, vac_flushpage_tl1
, pfn
, vcolor
);
2206 vac_flushpage(pfn
, vcolor
);
2210 goto smallpages_page_cache
;
2213 kpmp
= KPMP_HASH(kp
);
2214 ASSERT(MUTEX_HELD(&kpmp
->khl_mutex
));
2216 if (IS_KPM_ALIAS_RANGE(kpmvaddr
)) {
2217 if (kp
->kp_refcnta
< 1) {
2218 panic("sfmmu_kpm_page_cache: bad refcnta "
2219 "kpm_page=%p\n", (void *)kp
);
2221 sfmmu_kpm_demap_small(kpmvaddr
);
2222 if (flags
== HAT_TMPNC
) {
2224 ASSERT(!PP_ISKPMS(pp
));
2226 ASSERT(PP_ISKPMC(pp
));
2232 badstate
= (kp
->kp_refcnt
< 0 || kp
->kp_refcnts
< 0);
2233 if (kp
->kp_refcntc
== -1) {
2235 * We should come here only if trap level tsb miss
2236 * handler is disabled.
2238 badstate
|= (kp
->kp_refcnt
== 0 || kp
->kp_refcnts
> 0 ||
2239 PP_ISKPMC(pp
) || PP_ISKPMS(pp
) || PP_ISNC(pp
));
2241 badstate
|= (kp
->kp_refcntc
< 0);
2248 * Combine the per kpm_page and per page kpm VAC states to
2249 * a summary state in order to make the VAC cache/uncache
2250 * handling more concise.
2252 pgcacase
= (((kp
->kp_refcntc
> 0) ? KPM_KC
: 0) |
2253 ((kp
->kp_refcnts
> 0) ? KPM_KS
: 0) |
2254 (PP_ISKPMC(pp
) ? KPM_C
: 0) |
2255 (PP_ISKPMS(pp
) ? KPM_S
: 0));
2257 if (flags
== HAT_CACHE
) {
2259 case KPM_CACHE_MAPS
: /* kc c ks s */
2260 sfmmu_kpm_demap_small(kpmvaddr
);
2261 if (kp
->kp_refcnts
< 1) {
2262 panic("sfmmu_kpm_page_cache: bad refcnts "
2263 "kpm_page=%p\n", (void *)kp
);
2270 case KPM_CACHE_NOMAP
: /* kc c - - */
2271 case KPM_CACHE_NOMAPO
: /* kc c ks - */
2283 case KPM_UNC_BIG
: /* - - - - */
2284 if (kp
->kp_refcnt
< 1) {
2285 panic("sfmmu_kpm_page_cache: bad refcnt "
2286 "kpm_page=%p\n", (void *)kp
);
2290 * Have to breakup the large page mapping in preparation
2291 * to the upcoming TNC mode handled by small mappings.
2292 * The demap can already be done due to another conflict
2293 * within the kpm_page.
2295 if (kp
->kp_refcntc
== -1) {
2296 /* remove go indication */
2297 sfmmu_kpm_tsbmtl(&kp
->kp_refcntc
,
2298 &kpmp
->khl_lock
, KPMTSBM_STOP
);
2300 ASSERT(kp
->kp_refcntc
== 0);
2301 sfmmu_kpm_demap_large(kpmvaddr
);
2306 case KPM_UNC_SMALL1
: /* - - ks s */
2307 case KPM_UNC_SMALL2
: /* kc - ks s */
2309 * Have to demap an already small kpm mapping in preparation
2310 * to the upcoming TNC mode. The demap can already be done
2311 * due to another conflict within the kpm_page.
2313 sfmmu_kpm_demap_small(kpmvaddr
);
2321 case KPM_UNC_NODEMAP1
: /* - - ks - */
2324 case KPM_UNC_NODEMAP2
: /* kc - - - */
2325 case KPM_UNC_NODEMAP3
: /* kc - ks - */
2330 case KPM_UNC_NOP1
: /* kc c - - */
2331 case KPM_UNC_NOP2
: /* kc c ks - */
2339 panic("sfmmu_kpm_page_cache: inconsistent VAC state "
2340 "kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr
,
2341 (void *)kp
, (void *)pp
);
2345 smallpages_page_cache
:
2347 kpmsp
= KPMP_SHASH(ksp
);
2350 * marked as nogo for we will fault in and resolve it
2351 * through sfmmu_kpm_fault_small
2353 oldval
= sfmmu_kpm_stsbmtl(&ksp
->kp_mapped_flag
, &kpmsp
->kshl_lock
,
2356 if (!(oldval
== KPM_MAPPEDS
|| oldval
== KPM_MAPPEDSC
))
2357 panic("smallpages_page_cache: inconsistent mapping");
2359 sfmmu_kpm_demap_small(kpmvaddr
);
2361 if (flags
== HAT_TMPNC
) {
2363 ASSERT(!PP_ISKPMS(pp
));
2366 ASSERT(PP_ISKPMC(pp
));
2371 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
2372 * prevents TL tsbmiss handling and force a hat_kpm_fault.
2373 * There we can start over again.