2 * Copyright (c) 2005 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup sparc64mm
35 #include <arch/mm/tlb.h>
39 #include <arch/mm/frame.h>
40 #include <arch/mm/page.h>
41 #include <arch/mm/mmu.h>
42 #include <arch/interrupt.h>
43 #include <interrupt.h>
46 #include <arch/types.h>
49 #include <arch/trap/trap.h>
50 #include <arch/trap/exception.h>
55 #include <arch/mm/tsb.h>
58 static void dtlb_pte_copy(pte_t
*t
, bool ro
);
59 static void itlb_pte_copy(pte_t
*t
);
60 static void do_fast_instruction_access_mmu_miss_fault(istate_t
*istate
, const
62 static void do_fast_data_access_mmu_miss_fault(istate_t
*istate
,
63 tlb_tag_access_reg_t tag
, const char *str
);
64 static void do_fast_data_access_protection_fault(istate_t
*istate
,
65 tlb_tag_access_reg_t tag
, const char *str
);
67 char *context_encoding
[] = {
74 void tlb_arch_init(void)
77 * Invalidate all non-locked DTLB and ITLB entries.
88 /** Insert privileged mapping into DMMU TLB.
90 * @param page Virtual page address.
91 * @param frame Physical frame address.
92 * @param pagesize Page size.
93 * @param locked True for permanent mappings, false otherwise.
94 * @param cacheable True if the mapping is cacheable, false otherwise.
96 void dtlb_insert_mapping(uintptr_t page
, uintptr_t frame
, int pagesize
, bool
97 locked
, bool cacheable
)
99 tlb_tag_access_reg_t tag
;
107 tag
.value
= ASID_KERNEL
;
110 dtlb_tag_access_write(tag
.value
);
114 data
.size
= pagesize
;
118 #ifdef CONFIG_VIRT_IDX_DCACHE
120 #endif /* CONFIG_VIRT_IDX_DCACHE */
125 dtlb_data_in_write(data
.value
);
130 * @param t Page Table Entry to be copied.
131 * @param ro If true, the entry will be created read-only, regardless of its w
134 void dtlb_pte_copy(pte_t
*t
, bool ro
)
136 tlb_tag_access_reg_t tag
;
141 pg
.address
= t
->page
;
142 fr
.address
= t
->frame
;
145 tag
.context
= t
->as
->asid
;
148 dtlb_tag_access_write(tag
.value
);
152 data
.size
= PAGESIZE_8K
;
156 #ifdef CONFIG_VIRT_IDX_DCACHE
158 #endif /* CONFIG_VIRT_IDX_DCACHE */
159 data
.p
= t
->k
; /* p like privileged */
160 data
.w
= ro
? false : t
->w
;
163 dtlb_data_in_write(data
.value
);
166 /** Copy PTE to ITLB.
168 * @param t Page Table Entry to be copied.
170 void itlb_pte_copy(pte_t
*t
)
172 tlb_tag_access_reg_t tag
;
177 pg
.address
= t
->page
;
178 fr
.address
= t
->frame
;
181 tag
.context
= t
->as
->asid
;
184 itlb_tag_access_write(tag
.value
);
188 data
.size
= PAGESIZE_8K
;
192 data
.p
= t
->k
; /* p like privileged */
196 itlb_data_in_write(data
.value
);
199 /** ITLB miss handler. */
200 void fast_instruction_access_mmu_miss(int n
, istate_t
*istate
)
202 uintptr_t va
= ALIGN_DOWN(istate
->tpc
, PAGE_SIZE
);
205 page_table_lock(AS
, true);
206 t
= page_mapping_find(AS
, va
);
207 if (t
&& PTE_EXECUTABLE(t
)) {
209 * The mapping was found in the software page hash table.
210 * Insert it into ITLB.
217 page_table_unlock(AS
, true);
220 * Forward the page fault to the address space page fault
223 page_table_unlock(AS
, true);
224 if (as_page_fault(va
, PF_ACCESS_EXEC
, istate
) == AS_PF_FAULT
) {
225 do_fast_instruction_access_mmu_miss_fault(istate
,
231 /** DTLB miss handler.
233 * Note that some faults (e.g. kernel faults) were already resolved by the
234 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
236 void fast_data_access_mmu_miss(int n
, istate_t
*istate
)
238 tlb_tag_access_reg_t tag
;
242 tag
.value
= dtlb_tag_access_read();
243 va
= tag
.vpn
<< PAGE_WIDTH
;
245 if (tag
.context
== ASID_KERNEL
) {
247 /* NULL access in kernel */
248 do_fast_data_access_mmu_miss_fault(istate
, tag
,
251 do_fast_data_access_mmu_miss_fault(istate
, tag
, "Unexpected "
252 "kernel page fault.");
255 page_table_lock(AS
, true);
256 t
= page_mapping_find(AS
, va
);
259 * The mapping was found in the software page hash table.
260 * Insert it into DTLB.
263 dtlb_pte_copy(t
, true);
265 dtsb_pte_copy(t
, true);
267 page_table_unlock(AS
, true);
270 * Forward the page fault to the address space page fault handler.
272 page_table_unlock(AS
, true);
273 if (as_page_fault(va
, PF_ACCESS_READ
, istate
) == AS_PF_FAULT
) {
274 do_fast_data_access_mmu_miss_fault(istate
, tag
,
280 /** DTLB protection fault handler. */
281 void fast_data_access_protection(int n
, istate_t
*istate
)
283 tlb_tag_access_reg_t tag
;
287 tag
.value
= dtlb_tag_access_read();
288 va
= tag
.vpn
<< PAGE_WIDTH
;
290 page_table_lock(AS
, true);
291 t
= page_mapping_find(AS
, va
);
292 if (t
&& PTE_WRITABLE(t
)) {
294 * The mapping was found in the software page hash table and is
295 * writable. Demap the old mapping and insert an updated mapping
300 dtlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_SECONDARY
, va
);
301 dtlb_pte_copy(t
, false);
303 dtsb_pte_copy(t
, false);
305 page_table_unlock(AS
, true);
308 * Forward the page fault to the address space page fault
311 page_table_unlock(AS
, true);
312 if (as_page_fault(va
, PF_ACCESS_WRITE
, istate
) == AS_PF_FAULT
) {
313 do_fast_data_access_protection_fault(istate
, tag
,
319 /** Print contents of both TLBs. */
324 tlb_tag_read_reg_t t
;
326 printf("I-TLB contents:\n");
327 for (i
= 0; i
< ITLB_ENTRY_COUNT
; i
++) {
328 d
.value
= itlb_data_access_read(i
);
329 t
.value
= itlb_tag_read_read(i
);
331 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
332 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
333 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i
, t
.vpn
,
334 t
.context
, d
.v
, d
.size
, d
.nfo
, d
.ie
, d
.soft2
, d
.diag
,
335 d
.pfn
, d
.soft
, d
.l
, d
.cp
, d
.cv
, d
.e
, d
.p
, d
.w
, d
.g
);
338 printf("D-TLB contents:\n");
339 for (i
= 0; i
< DTLB_ENTRY_COUNT
; i
++) {
340 d
.value
= dtlb_data_access_read(i
);
341 t
.value
= dtlb_tag_read_read(i
);
343 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
344 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
345 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i
, t
.vpn
,
346 t
.context
, d
.v
, d
.size
, d
.nfo
, d
.ie
, d
.soft2
, d
.diag
,
347 d
.pfn
, d
.soft
, d
.l
, d
.cp
, d
.cv
, d
.e
, d
.p
, d
.w
, d
.g
);
352 void do_fast_instruction_access_mmu_miss_fault(istate_t
*istate
, const char
355 fault_if_from_uspace(istate
, "%s\n", str
);
360 void do_fast_data_access_mmu_miss_fault(istate_t
*istate
, tlb_tag_access_reg_t
361 tag
, const char *str
)
365 va
= tag
.vpn
<< PAGE_WIDTH
;
367 fault_if_from_uspace(istate
, "%s, Page=%p (ASID=%d)\n", str
, va
,
370 printf("Faulting page: %p, ASID=%d\n", va
, tag
.context
);
374 void do_fast_data_access_protection_fault(istate_t
*istate
, tlb_tag_access_reg_t
375 tag
, const char *str
)
379 va
= tag
.vpn
<< PAGE_WIDTH
;
381 fault_if_from_uspace(istate
, "%s, Page=%p (ASID=%d)\n", str
, va
,
383 printf("Faulting page: %p, ASID=%d\n", va
, tag
.context
);
388 void dump_sfsr_and_sfar(void)
393 sfsr
.value
= dtlb_sfsr_read();
394 sfar
= dtlb_sfar_read();
396 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
397 "fv=%d\n", sfsr
.asi
, sfsr
.ft
, sfsr
.e
, sfsr
.ct
, sfsr
.pr
, sfsr
.w
,
399 printf("DTLB SFAR: address=%p\n", sfar
);
404 /** Invalidate all unlocked ITLB and DTLB entries. */
405 void tlb_invalidate_all(void)
409 tlb_tag_read_reg_t t
;
411 for (i
= 0; i
< ITLB_ENTRY_COUNT
; i
++) {
412 d
.value
= itlb_data_access_read(i
);
414 t
.value
= itlb_tag_read_read(i
);
416 itlb_tag_access_write(t
.value
);
417 itlb_data_access_write(i
, d
.value
);
421 for (i
= 0; i
< DTLB_ENTRY_COUNT
; i
++) {
422 d
.value
= dtlb_data_access_read(i
);
424 t
.value
= dtlb_tag_read_read(i
);
426 dtlb_tag_access_write(t
.value
);
427 dtlb_data_access_write(i
, d
.value
);
433 /** Invalidate all ITLB and DTLB entries that belong to specified ASID
436 * @param asid Address Space ID.
438 void tlb_invalidate_asid(asid_t asid
)
440 tlb_context_reg_t pc_save
, ctx
;
442 /* switch to nucleus because we are mapped by the primary context */
445 ctx
.v
= pc_save
.v
= mmu_primary_context_read();
447 mmu_primary_context_write(ctx
.v
);
449 itlb_demap(TLB_DEMAP_CONTEXT
, TLB_DEMAP_PRIMARY
, 0);
450 dtlb_demap(TLB_DEMAP_CONTEXT
, TLB_DEMAP_PRIMARY
, 0);
452 mmu_primary_context_write(pc_save
.v
);
457 /** Invalidate all ITLB and DTLB entries for specified page range in specified
460 * @param asid Address Space ID.
461 * @param page First page which to sweep out from ITLB and DTLB.
462 * @param cnt Number of ITLB and DTLB entries to invalidate.
464 void tlb_invalidate_pages(asid_t asid
, uintptr_t page
, count_t cnt
)
467 tlb_context_reg_t pc_save
, ctx
;
469 /* switch to nucleus because we are mapped by the primary context */
472 ctx
.v
= pc_save
.v
= mmu_primary_context_read();
474 mmu_primary_context_write(ctx
.v
);
476 for (i
= 0; i
< cnt
; i
++) {
477 itlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_PRIMARY
, page
+ i
*
479 dtlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_PRIMARY
, page
+ i
*
483 mmu_primary_context_write(pc_save
.v
);