2 * Copyright (C) 2005 Jakub Jermar
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup sparc64mm
35 #include <arch/mm/tlb.h>
39 #include <arch/mm/frame.h>
40 #include <arch/mm/page.h>
41 #include <arch/mm/mmu.h>
42 #include <arch/interrupt.h>
45 #include <arch/types.h>
48 #include <arch/trap/trap.h>
53 static void dtlb_pte_copy(pte_t
*t
, bool ro
);
54 static void itlb_pte_copy(pte_t
*t
);
55 static void do_fast_instruction_access_mmu_miss_fault(istate_t
*istate
, const char *str
);
56 static void do_fast_data_access_mmu_miss_fault(istate_t
*istate
, tlb_tag_access_reg_t tag
, const char *str
);
57 static void do_fast_data_access_protection_fault(istate_t
*istate
, tlb_tag_access_reg_t tag
, const char *str
);
59 char *context_encoding
[] = {
66 void tlb_arch_init(void)
69 * TLBs are actually initialized early
74 /** Insert privileged mapping into DMMU TLB.
76 * @param page Virtual page address.
77 * @param frame Physical frame address.
78 * @param pagesize Page size.
79 * @param locked True for permanent mappings, false otherwise.
80 * @param cacheable True if the mapping is cacheable, false otherwise.
82 void dtlb_insert_mapping(uintptr_t page
, uintptr_t frame
, int pagesize
, bool locked
, bool cacheable
)
84 tlb_tag_access_reg_t tag
;
92 tag
.value
= ASID_KERNEL
;
95 dtlb_tag_access_write(tag
.value
);
108 dtlb_data_in_write(data
.value
);
113 * @param t Page Table Entry to be copied.
114 * @param ro If true, the entry will be created read-only, regardless of its w field.
116 void dtlb_pte_copy(pte_t
*t
, bool ro
)
118 tlb_tag_access_reg_t tag
;
123 pg
.address
= t
->page
;
124 fr
.address
= t
->frame
;
127 tag
.context
= t
->as
->asid
;
130 dtlb_tag_access_write(tag
.value
);
134 data
.size
= PAGESIZE_8K
;
139 data
.p
= t
->k
; /* p like privileged */
140 data
.w
= ro
? false : t
->w
;
143 dtlb_data_in_write(data
.value
);
146 void itlb_pte_copy(pte_t
*t
)
148 tlb_tag_access_reg_t tag
;
153 pg
.address
= t
->page
;
154 fr
.address
= t
->frame
;
157 tag
.context
= t
->as
->asid
;
160 itlb_tag_access_write(tag
.value
);
164 data
.size
= PAGESIZE_8K
;
169 data
.p
= t
->k
; /* p like privileged */
173 itlb_data_in_write(data
.value
);
176 /** ITLB miss handler. */
177 void fast_instruction_access_mmu_miss(int n
, istate_t
*istate
)
179 uintptr_t va
= ALIGN_DOWN(istate
->tpc
, PAGE_SIZE
);
182 page_table_lock(AS
, true);
183 t
= page_mapping_find(AS
, va
);
184 if (t
&& PTE_EXECUTABLE(t
)) {
186 * The mapping was found in the software page hash table.
187 * Insert it into ITLB.
191 page_table_unlock(AS
, true);
194 * Forward the page fault to the address space page fault handler.
196 page_table_unlock(AS
, true);
197 if (as_page_fault(va
, PF_ACCESS_EXEC
, istate
) == AS_PF_FAULT
) {
198 do_fast_instruction_access_mmu_miss_fault(istate
, __FUNCTION__
);
203 /** DTLB miss handler.
205 * Note that some faults (e.g. kernel faults) were already resolved
206 * by the low-level, assembly language part of the fast_data_access_mmu_miss
209 void fast_data_access_mmu_miss(int n
, istate_t
*istate
)
211 tlb_tag_access_reg_t tag
;
215 tag
.value
= dtlb_tag_access_read();
216 va
= tag
.vpn
<< PAGE_WIDTH
;
218 if (tag
.context
== ASID_KERNEL
) {
220 /* NULL access in kernel */
221 do_fast_data_access_mmu_miss_fault(istate
, tag
, __FUNCTION__
);
223 do_fast_data_access_mmu_miss_fault(istate
, tag
, "Unexpected kernel page fault.");
226 page_table_lock(AS
, true);
227 t
= page_mapping_find(AS
, va
);
230 * The mapping was found in the software page hash table.
231 * Insert it into DTLB.
234 dtlb_pte_copy(t
, true);
235 page_table_unlock(AS
, true);
238 * Forward the page fault to the address space page fault handler.
240 page_table_unlock(AS
, true);
241 if (as_page_fault(va
, PF_ACCESS_READ
, istate
) == AS_PF_FAULT
) {
242 do_fast_data_access_mmu_miss_fault(istate
, tag
, __FUNCTION__
);
247 /** DTLB protection fault handler. */
248 void fast_data_access_protection(int n
, istate_t
*istate
)
250 tlb_tag_access_reg_t tag
;
254 tag
.value
= dtlb_tag_access_read();
255 va
= tag
.vpn
<< PAGE_WIDTH
;
257 page_table_lock(AS
, true);
258 t
= page_mapping_find(AS
, va
);
259 if (t
&& PTE_WRITABLE(t
)) {
261 * The mapping was found in the software page hash table and is writable.
262 * Demap the old mapping and insert an updated mapping into DTLB.
266 dtlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_SECONDARY
, va
);
267 dtlb_pte_copy(t
, false);
268 page_table_unlock(AS
, true);
271 * Forward the page fault to the address space page fault handler.
273 page_table_unlock(AS
, true);
274 if (as_page_fault(va
, PF_ACCESS_WRITE
, istate
) == AS_PF_FAULT
) {
275 do_fast_data_access_protection_fault(istate
, tag
, __FUNCTION__
);
280 /** Print contents of both TLBs. */
285 tlb_tag_read_reg_t t
;
287 printf("I-TLB contents:\n");
288 for (i
= 0; i
< ITLB_ENTRY_COUNT
; i
++) {
289 d
.value
= itlb_data_access_read(i
);
290 t
.value
= itlb_tag_read_read(i
);
292 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
293 i
, t
.vpn
, t
.context
, d
.v
, d
.size
, d
.nfo
, d
.ie
, d
.soft2
, d
.diag
, d
.pfn
, d
.soft
, d
.l
, d
.cp
, d
.cv
, d
.e
, d
.p
, d
.w
, d
.g
);
296 printf("D-TLB contents:\n");
297 for (i
= 0; i
< DTLB_ENTRY_COUNT
; i
++) {
298 d
.value
= dtlb_data_access_read(i
);
299 t
.value
= dtlb_tag_read_read(i
);
301 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
302 i
, t
.vpn
, t
.context
, d
.v
, d
.size
, d
.nfo
, d
.ie
, d
.soft2
, d
.diag
, d
.pfn
, d
.soft
, d
.l
, d
.cp
, d
.cv
, d
.e
, d
.p
, d
.w
, d
.g
);
307 void do_fast_instruction_access_mmu_miss_fault(istate_t
*istate
, const char *str
)
309 char *tpc_str
= get_symtab_entry(istate
->tpc
);
311 printf("TPC=%p, (%s)\n", istate
->tpc
, tpc_str
);
315 void do_fast_data_access_mmu_miss_fault(istate_t
*istate
, tlb_tag_access_reg_t tag
, const char *str
)
318 char *tpc_str
= get_symtab_entry(istate
->tpc
);
320 va
= tag
.vpn
<< PAGE_WIDTH
;
322 printf("Faulting page: %p, ASID=%d\n", va
, tag
.context
);
323 printf("TPC=%p, (%s)\n", istate
->tpc
, tpc_str
);
327 void do_fast_data_access_protection_fault(istate_t
*istate
, tlb_tag_access_reg_t tag
, const char *str
)
330 char *tpc_str
= get_symtab_entry(istate
->tpc
);
332 va
= tag
.vpn
<< PAGE_WIDTH
;
334 printf("Faulting page: %p, ASID=%d\n", va
, tag
.context
);
335 printf("TPC=%p, (%s)\n", istate
->tpc
, tpc_str
);
339 /** Invalidate all unlocked ITLB and DTLB entries. */
340 void tlb_invalidate_all(void)
344 tlb_tag_read_reg_t t
;
346 for (i
= 0; i
< ITLB_ENTRY_COUNT
; i
++) {
347 d
.value
= itlb_data_access_read(i
);
349 t
.value
= itlb_tag_read_read(i
);
351 itlb_tag_access_write(t
.value
);
352 itlb_data_access_write(i
, d
.value
);
356 for (i
= 0; i
< DTLB_ENTRY_COUNT
; i
++) {
357 d
.value
= dtlb_data_access_read(i
);
359 t
.value
= dtlb_tag_read_read(i
);
361 dtlb_tag_access_write(t
.value
);
362 dtlb_data_access_write(i
, d
.value
);
368 /** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
370 * @param asid Address Space ID.
372 void tlb_invalidate_asid(asid_t asid
)
374 tlb_context_reg_t pc_save
, ctx
;
376 /* switch to nucleus because we are mapped by the primary context */
379 ctx
.v
= pc_save
.v
= mmu_primary_context_read();
381 mmu_primary_context_write(ctx
.v
);
383 itlb_demap(TLB_DEMAP_CONTEXT
, TLB_DEMAP_PRIMARY
, 0);
384 dtlb_demap(TLB_DEMAP_CONTEXT
, TLB_DEMAP_PRIMARY
, 0);
386 mmu_primary_context_write(pc_save
.v
);
391 /** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
393 * @param asid Address Space ID.
394 * @param page First page which to sweep out from ITLB and DTLB.
395 * @param cnt Number of ITLB and DTLB entries to invalidate.
397 void tlb_invalidate_pages(asid_t asid
, uintptr_t page
, count_t cnt
)
400 tlb_context_reg_t pc_save
, ctx
;
402 /* switch to nucleus because we are mapped by the primary context */
405 ctx
.v
= pc_save
.v
= mmu_primary_context_read();
407 mmu_primary_context_write(ctx
.v
);
409 for (i
= 0; i
< cnt
; i
++) {
410 itlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_PRIMARY
, page
+ i
* PAGE_SIZE
);
411 dtlb_demap(TLB_DEMAP_PAGE
, TLB_DEMAP_PRIMARY
, page
+ i
* PAGE_SIZE
);
414 mmu_primary_context_write(pc_save
.v
);