sparc64 kernel fixes
[helenos.git] / kernel / arch / sparc64 / src / mm / tlb.c
blobe50a7b9a1a2d883a01edafc79b1dbd404ed97a36
1 /*
2 * Copyright (C) 2005 Jakub Jermar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup sparc64mm
30 * @{
32 /** @file
35 #include <arch/mm/tlb.h>
36 #include <mm/tlb.h>
37 #include <mm/as.h>
38 #include <mm/asid.h>
39 #include <arch/mm/frame.h>
40 #include <arch/mm/page.h>
41 #include <arch/mm/mmu.h>
42 #include <arch/interrupt.h>
43 #include <arch.h>
44 #include <print.h>
45 #include <arch/types.h>
46 #include <typedefs.h>
47 #include <config.h>
48 #include <arch/trap/trap.h>
49 #include <panic.h>
50 #include <arch/asm.h>
51 #include <symtab.h>
53 static void dtlb_pte_copy(pte_t *t, bool ro);
54 static void itlb_pte_copy(pte_t *t);
55 static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
56 static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
57 static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
59 char *context_encoding[] = {
60 "Primary",
61 "Secondary",
62 "Nucleus",
63 "Reserved"
66 void tlb_arch_init(void)
69 * TLBs are actually initialized early
70 * in start.S.
74 /** Insert privileged mapping into DMMU TLB.
76 * @param page Virtual page address.
77 * @param frame Physical frame address.
78 * @param pagesize Page size.
79 * @param locked True for permanent mappings, false otherwise.
80 * @param cacheable True if the mapping is cacheable, false otherwise.
82 void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable)
84 tlb_tag_access_reg_t tag;
85 tlb_data_t data;
86 page_address_t pg;
87 frame_address_t fr;
89 pg.address = page;
90 fr.address = frame;
92 tag.value = ASID_KERNEL;
93 tag.vpn = pg.vpn;
95 dtlb_tag_access_write(tag.value);
97 data.value = 0;
98 data.v = true;
99 data.size = pagesize;
100 data.pfn = fr.pfn;
101 data.l = locked;
102 data.cp = cacheable;
103 data.cv = cacheable;
104 data.p = true;
105 data.w = true;
106 data.g = true;
108 dtlb_data_in_write(data.value);
111 /** Copy PTE to TLB.
113 * @param t Page Table Entry to be copied.
114 * @param ro If true, the entry will be created read-only, regardless of its w field.
116 void dtlb_pte_copy(pte_t *t, bool ro)
118 tlb_tag_access_reg_t tag;
119 tlb_data_t data;
120 page_address_t pg;
121 frame_address_t fr;
123 pg.address = t->page;
124 fr.address = t->frame;
126 tag.value = 0;
127 tag.context = t->as->asid;
128 tag.vpn = pg.vpn;
130 dtlb_tag_access_write(tag.value);
132 data.value = 0;
133 data.v = true;
134 data.size = PAGESIZE_8K;
135 data.pfn = fr.pfn;
136 data.l = false;
137 data.cp = t->c;
138 data.cv = t->c;
139 data.p = t->k; /* p like privileged */
140 data.w = ro ? false : t->w;
141 data.g = t->g;
143 dtlb_data_in_write(data.value);
146 void itlb_pte_copy(pte_t *t)
148 tlb_tag_access_reg_t tag;
149 tlb_data_t data;
150 page_address_t pg;
151 frame_address_t fr;
153 pg.address = t->page;
154 fr.address = t->frame;
156 tag.value = 0;
157 tag.context = t->as->asid;
158 tag.vpn = pg.vpn;
160 itlb_tag_access_write(tag.value);
162 data.value = 0;
163 data.v = true;
164 data.size = PAGESIZE_8K;
165 data.pfn = fr.pfn;
166 data.l = false;
167 data.cp = t->c;
168 data.cv = t->c;
169 data.p = t->k; /* p like privileged */
170 data.w = false;
171 data.g = t->g;
173 itlb_data_in_write(data.value);
176 /** ITLB miss handler. */
177 void fast_instruction_access_mmu_miss(int n, istate_t *istate)
179 uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
180 pte_t *t;
182 page_table_lock(AS, true);
183 t = page_mapping_find(AS, va);
184 if (t && PTE_EXECUTABLE(t)) {
186 * The mapping was found in the software page hash table.
187 * Insert it into ITLB.
189 t->a = true;
190 itlb_pte_copy(t);
191 page_table_unlock(AS, true);
192 } else {
194 * Forward the page fault to the address space page fault handler.
196 page_table_unlock(AS, true);
197 if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
198 do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
203 /** DTLB miss handler.
205 * Note that some faults (e.g. kernel faults) were already resolved
206 * by the low-level, assembly language part of the fast_data_access_mmu_miss
207 * handler.
209 void fast_data_access_mmu_miss(int n, istate_t *istate)
211 tlb_tag_access_reg_t tag;
212 uintptr_t va;
213 pte_t *t;
215 tag.value = dtlb_tag_access_read();
216 va = tag.vpn << PAGE_WIDTH;
218 if (tag.context == ASID_KERNEL) {
219 if (!tag.vpn) {
220 /* NULL access in kernel */
221 do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
223 do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault.");
226 page_table_lock(AS, true);
227 t = page_mapping_find(AS, va);
228 if (t) {
230 * The mapping was found in the software page hash table.
231 * Insert it into DTLB.
233 t->a = true;
234 dtlb_pte_copy(t, true);
235 page_table_unlock(AS, true);
236 } else {
238 * Forward the page fault to the address space page fault handler.
240 page_table_unlock(AS, true);
241 if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
242 do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
247 /** DTLB protection fault handler. */
248 void fast_data_access_protection(int n, istate_t *istate)
250 tlb_tag_access_reg_t tag;
251 uintptr_t va;
252 pte_t *t;
254 tag.value = dtlb_tag_access_read();
255 va = tag.vpn << PAGE_WIDTH;
257 page_table_lock(AS, true);
258 t = page_mapping_find(AS, va);
259 if (t && PTE_WRITABLE(t)) {
261 * The mapping was found in the software page hash table and is writable.
262 * Demap the old mapping and insert an updated mapping into DTLB.
264 t->a = true;
265 t->d = true;
266 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
267 dtlb_pte_copy(t, false);
268 page_table_unlock(AS, true);
269 } else {
271 * Forward the page fault to the address space page fault handler.
273 page_table_unlock(AS, true);
274 if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
275 do_fast_data_access_protection_fault(istate, tag, __FUNCTION__);
280 /** Print contents of both TLBs. */
281 void tlb_print(void)
283 int i;
284 tlb_data_t d;
285 tlb_tag_read_reg_t t;
287 printf("I-TLB contents:\n");
288 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
289 d.value = itlb_data_access_read(i);
290 t.value = itlb_tag_read_read(i);
292 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
293 i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
296 printf("D-TLB contents:\n");
297 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
298 d.value = dtlb_data_access_read(i);
299 t.value = dtlb_tag_read_read(i);
301 printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
302 i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
307 void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
309 char *tpc_str = get_symtab_entry(istate->tpc);
311 printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
312 panic("%s\n", str);
315 void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
317 uintptr_t va;
318 char *tpc_str = get_symtab_entry(istate->tpc);
320 va = tag.vpn << PAGE_WIDTH;
322 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
323 printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
324 panic("%s\n", str);
327 void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
329 uintptr_t va;
330 char *tpc_str = get_symtab_entry(istate->tpc);
332 va = tag.vpn << PAGE_WIDTH;
334 printf("Faulting page: %p, ASID=%d\n", va, tag.context);
335 printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
336 panic("%s\n", str);
339 /** Invalidate all unlocked ITLB and DTLB entries. */
340 void tlb_invalidate_all(void)
342 int i;
343 tlb_data_t d;
344 tlb_tag_read_reg_t t;
346 for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
347 d.value = itlb_data_access_read(i);
348 if (!d.l) {
349 t.value = itlb_tag_read_read(i);
350 d.v = false;
351 itlb_tag_access_write(t.value);
352 itlb_data_access_write(i, d.value);
356 for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
357 d.value = dtlb_data_access_read(i);
358 if (!d.l) {
359 t.value = dtlb_tag_read_read(i);
360 d.v = false;
361 dtlb_tag_access_write(t.value);
362 dtlb_data_access_write(i, d.value);
368 /** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
370 * @param asid Address Space ID.
372 void tlb_invalidate_asid(asid_t asid)
374 tlb_context_reg_t pc_save, ctx;
376 /* switch to nucleus because we are mapped by the primary context */
377 nucleus_enter();
379 ctx.v = pc_save.v = mmu_primary_context_read();
380 ctx.context = asid;
381 mmu_primary_context_write(ctx.v);
383 itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
384 dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
386 mmu_primary_context_write(pc_save.v);
388 nucleus_leave();
391 /** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
393 * @param asid Address Space ID.
394 * @param page First page which to sweep out from ITLB and DTLB.
395 * @param cnt Number of ITLB and DTLB entries to invalidate.
397 void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
399 int i;
400 tlb_context_reg_t pc_save, ctx;
402 /* switch to nucleus because we are mapped by the primary context */
403 nucleus_enter();
405 ctx.v = pc_save.v = mmu_primary_context_read();
406 ctx.context = asid;
407 mmu_primary_context_write(ctx.v);
409 for (i = 0; i < cnt; i++) {
410 itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
411 dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
414 mmu_primary_context_write(pc_save.v);
416 nucleus_leave();
419 /** @}