2 * Copyright (C) 2005 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup sparc64mm
35 #include <arch/mm/tlb.h>
39 #include <arch/mm/frame.h>
40 #include <arch/mm/page.h>
41 #include <arch/mm/mmu.h>
42 #include <arch/interrupt.h>
43 #include <interrupt.h>
46 #include <arch/types.h>
49 #include <arch/trap/trap.h>
50 #include <arch/trap/exception.h>
55 #include <arch/mm/tsb.h>
58 static void dtlb_pte_copy(pte_t
*t
, bool ro
);
59 static void itlb_pte_copy(pte_t
*t
);
60 static void do_fast_instruction_access_mmu_miss_fault(istate_t
*istate
, const char *str
);
61 static void do_fast_data_access_mmu_miss_fault(istate_t
*istate
, tlb_tag_access_reg_t tag
, const char *str
);
62 static void do_fast_data_access_protection_fault(istate_t
*istate
, tlb_tag_access_reg_t tag
, const char *str
);
64 char *context_encoding
[] = {
71 void tlb_arch_init(void)
74 * Invalidate all non-locked DTLB and ITLB entries.
85 /** Insert privileged mapping into DMMU TLB.
87 * @param page Virtual page address.
88 * @param frame Physical frame address.
89 * @param pagesize Page size.
90 * @param locked True for permanent mappings, false otherwise.
91 * @param cacheable True if the mapping is cacheable, false otherwise.
93 void dtlb_insert_mapping(uintptr_t page
, uintptr_t frame
, int pagesize
, bool locked
, bool cacheable
)
95 tlb_tag_access_reg_t tag
;
103 tag
.value
= ASID_KERNEL
;
106 dtlb_tag_access_write(tag
.value
);
110 data
.size
= pagesize
;
114 #ifdef CONFIG_VIRT_IDX_DCACHE
116 #endif /* CONFIG_VIRT_IDX_DCACHE */
121 dtlb_data_in_write(data
.value
);
126 * @param t Page Table Entry to be copied.
127 * @param ro If true, the entry will be created read-only, regardless of its w field.
129 void dtlb_pte_copy(pte_t
*t
, bool ro
)
131 tlb_tag_access_reg_t tag
;
136 pg
.address
= t
->page
;
137 fr
.address
= t
->frame
;
140 tag
.context
= t
->as
->asid
;
143 dtlb_tag_access_write(tag
.value
);
147 data
.size
= PAGESIZE_8K
;
151 #ifdef CONFIG_VIRT_IDX_DCACHE
153 #endif /* CONFIG_VIRT_IDX_DCACHE */
154 data
.p
= t
->k
; /* p like privileged */
155 data
.w
= ro
? false : t
->w
;
158 dtlb_data_in_write(data
.value
);
161 /** Copy PTE to ITLB.
163 * @param t Page Table Entry to be copied.
165 void itlb_pte_copy(pte_t
*t
)
167 tlb_tag_access_reg_t tag
;
172 pg
.address
= t
->page
;
173 fr
.address
= t
->frame
;
176 tag
.context
= t
->as
->asid
;
179 itlb_tag_access_write(tag
.value
);
183 data
.size
= PAGESIZE_8K
;
187 data
.p
= t
->k
; /* p like privileged */
191 itlb_data_in_write(data
.value
);
194 /** ITLB miss handler. */
195 void fast_instruction_access_mmu_miss(int n
, istate_t
*istate
)
197 uintptr_t va
= ALIGN_DOWN(istate
->tpc
, PAGE_SIZE
);
200 page_table_lock(AS
, true);
201 t
= page_mapping_find(AS
, va
);
202 if (t
&& PTE_EXECUTABLE(t
)) {
204 * The mapping was found in the software page hash table.
205 * Insert it into ITLB.
212 page_table_unlock(AS
, true);
215 * Forward the page fault to the address space page fault handler.
217 page_table_unlock(AS
, true);
218 if (as_page_fault(va
, PF_ACCESS_EXEC
, istate
) == AS_PF_FAULT
) {
219 do_fast_instruction_access_mmu_miss_fault(istate
, __FUNCTION__
);
224 /** DTLB miss handler.
226 * Note that some faults (e.g. kernel faults) were already resolved
227 * by the low-level, assembly language part of the fast_data_access_mmu_miss
230 void fast_data_access_mmu_miss(int n
, istate_t
*istate
)
232 tlb_tag_access_reg_t tag
;
236 tag
.value
= dtlb_tag_access_read();
237 va
= tag
.vpn
<< PAGE_WIDTH
;
239 if (tag
.context
== ASID_KERNEL
) {
241 /* NULL access in kernel */
242 do_fast_data_access_mmu_miss_fault(istate
, tag
, __FUNCTION__
);
244 do_fast_data_access_mmu_miss_fault(istate
, tag
, "Unexpected kernel page fault.");
247 page_table_lock(AS
, true);
248 t
= page_mapping_find(AS
, va
);
251 * The mapping was found in the software page hash table.
252 * Insert it into DTLB.
255 dtlb_pte_copy(t
, true);
257 dtsb_pte_copy(t
, true);
259 page_table_unlock(AS
, true);
262 * Forward the page fault to the address space page fault handler.
264 page_table_unlock(AS
, true);
265 if (as_page_fault(va
, PF_ACCESS_READ
, istate
) == AS_PF_FAULT
) {
266 do_fast_data_access_mmu_miss_fault(istate
, tag
, __FUNCTION__
);
271 /** DTLB protection fault handler. */
272 void fast_data_access_protection(int n
, istate_t
*istate
)
274 tlb_tag_access_reg_t tag
;
278 tag
.value
= dtlb_tag_access_read();
279 va
= tag
.vpn
<< PAGE_WIDTH
;
281 page_table_lock(AS
, true);
282 t
= page_mapping_find(AS
, va
);
283 if (t
&& PTE_WRITABLE(t
)) {
285 * The mapping was found in the software page hash table and is writable.
286 * Demap the old mapping and insert an updated mapping into DTLB.
290 dtlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_SECONDARY
, va
);
291 dtlb_pte_copy(t
, false);
293 dtsb_pte_copy(t
, false);
295 page_table_unlock(AS
, true);
298 * Forward the page fault to the address space page fault handler.
300 page_table_unlock(AS
, true);
301 if (as_page_fault(va
, PF_ACCESS_WRITE
, istate
) == AS_PF_FAULT
) {
302 do_fast_data_access_protection_fault(istate
, tag
, __FUNCTION__
);
307 /** Print contents of both TLBs. */
312 tlb_tag_read_reg_t t
;
314 printf("I-TLB contents:\n");
315 for (i
= 0; i
< ITLB_ENTRY_COUNT
; i
++) {
316 d
.value
= itlb_data_access_read(i
);
317 t
.value
= itlb_tag_read_read(i
);
319 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
320 i
, t
.vpn
, t
.context
, d
.v
, d
.size
, d
.nfo
, d
.ie
, d
.soft2
, d
.diag
, d
.pfn
, d
.soft
, d
.l
, d
.cp
, d
.cv
, d
.e
, d
.p
, d
.w
, d
.g
);
323 printf("D-TLB contents:\n");
324 for (i
= 0; i
< DTLB_ENTRY_COUNT
; i
++) {
325 d
.value
= dtlb_data_access_read(i
);
326 t
.value
= dtlb_tag_read_read(i
);
328 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
329 i
, t
.vpn
, t
.context
, d
.v
, d
.size
, d
.nfo
, d
.ie
, d
.soft2
, d
.diag
, d
.pfn
, d
.soft
, d
.l
, d
.cp
, d
.cv
, d
.e
, d
.p
, d
.w
, d
.g
);
334 void do_fast_instruction_access_mmu_miss_fault(istate_t
*istate
, const char *str
)
336 fault_if_from_uspace(istate
, "%s\n", str
);
341 void do_fast_data_access_mmu_miss_fault(istate_t
*istate
, tlb_tag_access_reg_t tag
, const char *str
)
345 va
= tag
.vpn
<< PAGE_WIDTH
;
347 fault_if_from_uspace(istate
, "%s, Page=%p (ASID=%d)\n", str
, va
, tag
.context
);
349 printf("Faulting page: %p, ASID=%d\n", va
, tag
.context
);
353 void do_fast_data_access_protection_fault(istate_t
*istate
, tlb_tag_access_reg_t tag
, const char *str
)
357 va
= tag
.vpn
<< PAGE_WIDTH
;
359 fault_if_from_uspace(istate
, "%s, Page=%p (ASID=%d)\n", str
, va
, tag
.context
);
360 printf("Faulting page: %p, ASID=%d\n", va
, tag
.context
);
365 void dump_sfsr_and_sfar(void)
370 sfsr
.value
= dtlb_sfsr_read();
371 sfar
= dtlb_sfar_read();
373 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, fv=%d\n",
374 sfsr
.asi
, sfsr
.ft
, sfsr
.e
, sfsr
.ct
, sfsr
.pr
, sfsr
.w
, sfsr
.ow
, sfsr
.fv
);
375 printf("DTLB SFAR: address=%p\n", sfar
);
380 /** Invalidate all unlocked ITLB and DTLB entries. */
381 void tlb_invalidate_all(void)
385 tlb_tag_read_reg_t t
;
387 for (i
= 0; i
< ITLB_ENTRY_COUNT
; i
++) {
388 d
.value
= itlb_data_access_read(i
);
390 t
.value
= itlb_tag_read_read(i
);
392 itlb_tag_access_write(t
.value
);
393 itlb_data_access_write(i
, d
.value
);
397 for (i
= 0; i
< DTLB_ENTRY_COUNT
; i
++) {
398 d
.value
= dtlb_data_access_read(i
);
400 t
.value
= dtlb_tag_read_read(i
);
402 dtlb_tag_access_write(t
.value
);
403 dtlb_data_access_write(i
, d
.value
);
409 /** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
411 * @param asid Address Space ID.
413 void tlb_invalidate_asid(asid_t asid
)
415 tlb_context_reg_t pc_save
, ctx
;
417 /* switch to nucleus because we are mapped by the primary context */
420 ctx
.v
= pc_save
.v
= mmu_primary_context_read();
422 mmu_primary_context_write(ctx
.v
);
424 itlb_demap(TLB_DEMAP_CONTEXT
, TLB_DEMAP_PRIMARY
, 0);
425 dtlb_demap(TLB_DEMAP_CONTEXT
, TLB_DEMAP_PRIMARY
, 0);
427 mmu_primary_context_write(pc_save
.v
);
432 /** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
434 * @param asid Address Space ID.
435 * @param page First page which to sweep out from ITLB and DTLB.
436 * @param cnt Number of ITLB and DTLB entries to invalidate.
438 void tlb_invalidate_pages(asid_t asid
, uintptr_t page
, count_t cnt
)
441 tlb_context_reg_t pc_save
, ctx
;
443 /* switch to nucleus because we are mapped by the primary context */
446 ctx
.v
= pc_save
.v
= mmu_primary_context_read();
448 mmu_primary_context_write(ctx
.v
);
450 for (i
= 0; i
< cnt
; i
++) {
451 itlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_PRIMARY
, page
+ i
* PAGE_SIZE
);
452 dtlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_PRIMARY
, page
+ i
* PAGE_SIZE
);
455 mmu_primary_context_write(pc_save
.v
);