(c) versus (C)
[helenos.git] / kernel / arch / sparc64 / src / mm / tlb.c
blobba048aed89f11019b16430a30df38d890f8ed524
1 /*
2 * Copyright (c) 2005 Jakub Jermar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup sparc64mm
30 * @{
32 /** @file
35 #include <arch/mm/tlb.h>
36 #include <mm/tlb.h>
37 #include <mm/as.h>
38 #include <mm/asid.h>
39 #include <arch/mm/frame.h>
40 #include <arch/mm/page.h>
41 #include <arch/mm/mmu.h>
42 #include <arch/interrupt.h>
43 #include <interrupt.h>
44 #include <arch.h>
45 #include <print.h>
46 #include <arch/types.h>
47 #include <typedefs.h>
48 #include <config.h>
49 #include <arch/trap/trap.h>
50 #include <arch/trap/exception.h>
51 #include <panic.h>
52 #include <arch/asm.h>
54 #ifdef CONFIG_TSB
55 #include <arch/mm/tsb.h>
56 #endif
58 static void dtlb_pte_copy(pte_t *t, bool ro);
59 static void itlb_pte_copy(pte_t *t);
60 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
61 char *str);
62 static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
63 tlb_tag_access_reg_t tag, const char *str);
64 static void do_fast_data_access_protection_fault(istate_t *istate,
65 tlb_tag_access_reg_t tag, const char *str);
67 char *context_encoding[] = {
68 "Primary",
69 "Secondary",
70 "Nucleus",
71 "Reserved"
74 void tlb_arch_init(void)
77 * Invalidate all non-locked DTLB and ITLB entries.
79 tlb_invalidate_all();
82 * Clear both SFSRs.
84 dtlb_sfsr_write(0);
85 itlb_sfsr_write(0);
88 /** Insert privileged mapping into DMMU TLB.
90 * @param page Virtual page address.
91 * @param frame Physical frame address.
92 * @param pagesize Page size.
93 * @param locked True for permanent mappings, false otherwise.
94 * @param cacheable True if the mapping is cacheable, false otherwise.
96 void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
97 locked, bool cacheable)
99 tlb_tag_access_reg_t tag;
100 tlb_data_t data;
101 page_address_t pg;
102 frame_address_t fr;
104 pg.address = page;
105 fr.address = frame;
107 tag.value = ASID_KERNEL;
108 tag.vpn = pg.vpn;
110 dtlb_tag_access_write(tag.value);
112 data.value = 0;
113 data.v = true;
114 data.size = pagesize;
115 data.pfn = fr.pfn;
116 data.l = locked;
117 data.cp = cacheable;
118 #ifdef CONFIG_VIRT_IDX_DCACHE
119 data.cv = cacheable;
120 #endif /* CONFIG_VIRT_IDX_DCACHE */
121 data.p = true;
122 data.w = true;
123 data.g = false;
125 dtlb_data_in_write(data.value);
128 /** Copy PTE to TLB.
130 * @param t Page Table Entry to be copied.
131 * @param ro If true, the entry will be created read-only, regardless of its w
132 * field.
134 void dtlb_pte_copy(pte_t *t, bool ro)
136 tlb_tag_access_reg_t tag;
137 tlb_data_t data;
138 page_address_t pg;
139 frame_address_t fr;
141 pg.address = t->page;
142 fr.address = t->frame;
144 tag.value = 0;
145 tag.context = t->as->asid;
146 tag.vpn = pg.vpn;
148 dtlb_tag_access_write(tag.value);
150 data.value = 0;
151 data.v = true;
152 data.size = PAGESIZE_8K;
153 data.pfn = fr.pfn;
154 data.l = false;
155 data.cp = t->c;
156 #ifdef CONFIG_VIRT_IDX_DCACHE
157 data.cv = t->c;
158 #endif /* CONFIG_VIRT_IDX_DCACHE */
159 data.p = t->k; /* p like privileged */
160 data.w = ro ? false : t->w;
161 data.g = t->g;
163 dtlb_data_in_write(data.value);
166 /** Copy PTE to ITLB.
168 * @param t Page Table Entry to be copied.
170 void itlb_pte_copy(pte_t *t)
172 tlb_tag_access_reg_t tag;
173 tlb_data_t data;
174 page_address_t pg;
175 frame_address_t fr;
177 pg.address = t->page;
178 fr.address = t->frame;
180 tag.value = 0;
181 tag.context = t->as->asid;
182 tag.vpn = pg.vpn;
184 itlb_tag_access_write(tag.value);
186 data.value = 0;
187 data.v = true;
188 data.size = PAGESIZE_8K;
189 data.pfn = fr.pfn;
190 data.l = false;
191 data.cp = t->c;
192 data.p = t->k; /* p like privileged */
193 data.w = false;
194 data.g = t->g;
196 itlb_data_in_write(data.value);
199 /** ITLB miss handler. */
200 void fast_instruction_access_mmu_miss(int n, istate_t *istate)
202 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
203 pte_t *t;
205 page_table_lock(AS, true);
206 t = page_mapping_find(AS, va);
207 if (t && PTE_EXECUTABLE(t)) {
209 * The mapping was found in the software page hash table.
210 * Insert it into ITLB.
212 t->a = true;
213 itlb_pte_copy(t);
214 #ifdef CONFIG_TSB
215 itsb_pte_copy(t);
216 #endif
217 page_table_unlock(AS, true);
218 } else {
220 * Forward the page fault to the address space page fault
221 * handler.
223 page_table_unlock(AS, true);
224 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
225 do_fast_instruction_access_mmu_miss_fault(istate,
226 __FUNCTION__);
231 /** DTLB miss handler.
233 * Note that some faults (e.g. kernel faults) were already resolved by the
234 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
236 void fast_data_access_mmu_miss(int n, istate_t *istate)
238 tlb_tag_access_reg_t tag;
239 uintptr_t va;
240 pte_t *t;
242 tag.value = dtlb_tag_access_read();
243 va = tag.vpn << PAGE_WIDTH;
245 if (tag.context == ASID_KERNEL) {
246 if (!tag.vpn) {
247 /* NULL access in kernel */
248 do_fast_data_access_mmu_miss_fault(istate, tag,
249 __FUNCTION__);
251 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
252 "kernel page fault.");
255 page_table_lock(AS, true);
256 t = page_mapping_find(AS, va);
257 if (t) {
259 * The mapping was found in the software page hash table.
260 * Insert it into DTLB.
262 t->a = true;
263 dtlb_pte_copy(t, true);
264 #ifdef CONFIG_TSB
265 dtsb_pte_copy(t, true);
266 #endif
267 page_table_unlock(AS, true);
268 } else {
270 * Forward the page fault to the address space page fault handler.
272 page_table_unlock(AS, true);
273 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
274 do_fast_data_access_mmu_miss_fault(istate, tag,
275 __FUNCTION__);
280 /** DTLB protection fault handler. */
281 void fast_data_access_protection(int n, istate_t *istate)
283 tlb_tag_access_reg_t tag;
284 uintptr_t va;
285 pte_t *t;
287 tag.value = dtlb_tag_access_read();
288 va = tag.vpn << PAGE_WIDTH;
290 page_table_lock(AS, true);
291 t = page_mapping_find(AS, va);
292 if (t && PTE_WRITABLE(t)) {
294 * The mapping was found in the software page hash table and is
295 * writable. Demap the old mapping and insert an updated mapping
296 * into DTLB.
298 t->a = true;
299 t->d = true;
300 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
301 dtlb_pte_copy(t, false);
302 #ifdef CONFIG_TSB
303 dtsb_pte_copy(t, false);
304 #endif
305 page_table_unlock(AS, true);
306 } else {
308 * Forward the page fault to the address space page fault
309 * handler.
311 page_table_unlock(AS, true);
312 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
313 do_fast_data_access_protection_fault(istate, tag,
314 __FUNCTION__);
319 /** Print contents of both TLBs. */
320 void tlb_print(void)
322 int i;
323 tlb_data_t d;
324 tlb_tag_read_reg_t t;
326 printf("I-TLB contents:\n");
327 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
328 d.value = itlb_data_access_read(i);
329 t.value = itlb_tag_read_read(i);
331 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
332 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
333 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
334 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
335 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
338 printf("D-TLB contents:\n");
339 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
340 d.value = dtlb_data_access_read(i);
341 t.value = dtlb_tag_read_read(i);
343 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
344 "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
345 "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
346 t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
347 d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
352 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
353 *str)
355 fault_if_from_uspace(istate, "%s\n", str);
356 dump_istate(istate);
357 panic("%s\n", str);
360 void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
361 tag, const char *str)
363 uintptr_t va;
365 va = tag.vpn << PAGE_WIDTH;
367 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
368 tag.context);
369 dump_istate(istate);
370 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
371 panic("%s\n", str);
374 void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
375 tag, const char *str)
377 uintptr_t va;
379 va = tag.vpn << PAGE_WIDTH;
381 fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
382 tag.context);
383 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
384 dump_istate(istate);
385 panic("%s\n", str);
388 void dump_sfsr_and_sfar(void)
390 tlb_sfsr_reg_t sfsr;
391 uintptr_t sfar;
393 sfsr.value = dtlb_sfsr_read();
394 sfar = dtlb_sfar_read();
396 printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
397 "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
398 sfsr.ow, sfsr.fv);
399 printf("DTLB SFAR: address=%p\n", sfar);
401 dtlb_sfsr_write(0);
404 /** Invalidate all unlocked ITLB and DTLB entries. */
405 void tlb_invalidate_all(void)
407 int i;
408 tlb_data_t d;
409 tlb_tag_read_reg_t t;
411 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
412 d.value = itlb_data_access_read(i);
413 if (!d.l) {
414 t.value = itlb_tag_read_read(i);
415 d.v = false;
416 itlb_tag_access_write(t.value);
417 itlb_data_access_write(i, d.value);
421 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
422 d.value = dtlb_data_access_read(i);
423 if (!d.l) {
424 t.value = dtlb_tag_read_read(i);
425 d.v = false;
426 dtlb_tag_access_write(t.value);
427 dtlb_data_access_write(i, d.value);
433 /** Invalidate all ITLB and DTLB entries that belong to specified ASID
434 * (Context).
436 * @param asid Address Space ID.
438 void tlb_invalidate_asid(asid_t asid)
440 tlb_context_reg_t pc_save, ctx;
442 /* switch to nucleus because we are mapped by the primary context */
443 nucleus_enter();
445 ctx.v = pc_save.v = mmu_primary_context_read();
446 ctx.context = asid;
447 mmu_primary_context_write(ctx.v);
449 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
450 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
452 mmu_primary_context_write(pc_save.v);
454 nucleus_leave();
457 /** Invalidate all ITLB and DTLB entries for specified page range in specified
458 * address space.
460 * @param asid Address Space ID.
461 * @param page First page which to sweep out from ITLB and DTLB.
462 * @param cnt Number of ITLB and DTLB entries to invalidate.
464 void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
466 int i;
467 tlb_context_reg_t pc_save, ctx;
469 /* switch to nucleus because we are mapped by the primary context */
470 nucleus_enter();
472 ctx.v = pc_save.v = mmu_primary_context_read();
473 ctx.context = asid;
474 mmu_primary_context_write(ctx.v);
476 for (i = 0; i < cnt; i++) {
477 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
478 PAGE_SIZE);
479 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
480 PAGE_SIZE);
483 mmu_primary_context_write(pc_save.v);
485 nucleus_leave();
488 /** @}