Change SIF storage format to be more XML-like
[helenos.git] / kernel / genarch / src / mm / page_pt.c
blob771280aaf87897e6dbff3381c7b1366cdfa15242
1 /*
2 * Copyright (c) 2006 Jakub Jermar
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup kernel_genarch_mm
30 * @{
33 /**
34 * @file
35 * @brief Virtual Address Translation for hierarchical 4-level page tables.
38 #include <assert.h>
39 #include <genarch/mm/page_pt.h>
40 #include <mm/page.h>
41 #include <mm/frame.h>
42 #include <mm/km.h>
43 #include <mm/as.h>
44 #include <arch/mm/page.h>
45 #include <arch/mm/as.h>
46 #include <barrier.h>
47 #include <typedefs.h>
48 #include <arch/asm.h>
49 #include <memw.h>
50 #include <align.h>
51 #include <macros.h>
52 #include <bitops.h>
54 static void pt_mapping_insert(as_t *, uintptr_t, uintptr_t, unsigned int);
55 static void pt_mapping_remove(as_t *, uintptr_t);
56 static bool pt_mapping_find(as_t *, uintptr_t, bool, pte_t *pte);
57 static void pt_mapping_update(as_t *, uintptr_t, bool, pte_t *pte);
58 static void pt_mapping_make_global(uintptr_t, size_t);
60 const page_mapping_operations_t pt_mapping_operations = {
61 .mapping_insert = pt_mapping_insert,
62 .mapping_remove = pt_mapping_remove,
63 .mapping_find = pt_mapping_find,
64 .mapping_update = pt_mapping_update,
65 .mapping_make_global = pt_mapping_make_global
68 /** Map page to frame using hierarchical page tables.
70 * Map virtual address page to physical address frame
71 * using flags.
73 * @param as Address space to wich page belongs.
74 * @param page Virtual address of the page to be mapped.
75 * @param frame Physical address of memory frame to which the mapping is done.
76 * @param flags Flags to be used for mapping.
79 void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
80 unsigned int flags)
82 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
84 assert(page_table_locked(as));
86 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
87 pte_t *newpt = (pte_t *)
88 PA2KA(frame_alloc(PTL1_FRAMES, FRAME_LOWMEM, PTL1_SIZE - 1));
89 memsetb(newpt, PTL1_SIZE, 0);
90 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
91 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page),
92 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
93 PAGE_WRITE);
95 * Make sure that a concurrent hardware page table walk or
96 * pt_mapping_find() will see the new PTL1 only after it is
97 * fully initialized.
99 write_barrier();
100 SET_PTL1_PRESENT(ptl0, PTL0_INDEX(page));
103 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
105 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
106 pte_t *newpt = (pte_t *)
107 PA2KA(frame_alloc(PTL2_FRAMES, FRAME_LOWMEM, PTL2_SIZE - 1));
108 memsetb(newpt, PTL2_SIZE, 0);
109 SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
110 SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page),
111 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
112 PAGE_WRITE);
114 * Make the new PTL2 visible only after it is fully initialized.
116 write_barrier();
117 SET_PTL2_PRESENT(ptl1, PTL1_INDEX(page));
120 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
122 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
123 pte_t *newpt = (pte_t *)
124 PA2KA(frame_alloc(PTL3_FRAMES, FRAME_LOWMEM, PTL2_SIZE - 1));
125 memsetb(newpt, PTL2_SIZE, 0);
126 SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
127 SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page),
128 PAGE_NOT_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE |
129 PAGE_WRITE);
131 * Make the new PTL3 visible only after it is fully initialized.
133 write_barrier();
134 SET_PTL3_PRESENT(ptl2, PTL2_INDEX(page));
137 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
139 SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
140 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags | PAGE_NOT_PRESENT);
142 * Make the new mapping visible only after it is fully initialized.
144 write_barrier();
145 SET_FRAME_PRESENT(ptl3, PTL3_INDEX(page));
148 /** Remove mapping of page from hierarchical page tables.
150 * Remove any mapping of page within address space as.
151 * TLB shootdown should follow in order to make effects of
152 * this call visible.
154 * Empty page tables except PTL0 are freed.
156 * @param as Address space to wich page belongs.
157 * @param page Virtual address of the page to be demapped.
160 void pt_mapping_remove(as_t *as, uintptr_t page)
162 assert(page_table_locked(as));
165 * First, remove the mapping, if it exists.
168 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
169 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
170 return;
172 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
173 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
174 return;
176 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
177 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
178 return;
180 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
183 * Destroy the mapping.
184 * Setting to PAGE_NOT_PRESENT is not sufficient.
185 * But we need SET_FRAME for possible PT coherence maintenance.
186 * At least on ARM.
188 //TODO: Fix this inconsistency
189 SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), PAGE_NOT_PRESENT);
190 memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
193 * Second, free all empty tables along the way from PTL3 down to PTL0
194 * except those needed for sharing the kernel non-identity mappings.
197 /* Check PTL3 */
198 bool empty = true;
200 unsigned int i;
201 for (i = 0; i < PTL3_ENTRIES; i++) {
202 if (PTE_VALID(&ptl3[i])) {
203 empty = false;
204 break;
208 if (empty) {
210 * PTL3 is empty.
211 * Release the frame and remove PTL3 pointer from the parent
212 * table.
214 #if (PTL2_ENTRIES != 0)
215 memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
216 #elif (PTL1_ENTRIES != 0)
217 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
218 #else
219 if (km_is_non_identity(page))
220 return;
222 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
223 #endif
224 frame_free(KA2PA((uintptr_t) ptl3), PTL3_FRAMES);
225 } else {
227 * PTL3 is not empty.
228 * Therefore, there must be a path from PTL0 to PTL3 and
229 * thus nothing to free in higher levels.
232 return;
235 /* Check PTL2, empty is still true */
236 #if (PTL2_ENTRIES != 0)
237 for (i = 0; i < PTL2_ENTRIES; i++) {
238 if (PTE_VALID(&ptl2[i])) {
239 empty = false;
240 break;
244 if (empty) {
246 * PTL2 is empty.
247 * Release the frame and remove PTL2 pointer from the parent
248 * table.
250 #if (PTL1_ENTRIES != 0)
251 memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
252 #else
253 if (km_is_non_identity(page))
254 return;
256 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
257 #endif
258 frame_free(KA2PA((uintptr_t) ptl2), PTL2_FRAMES);
259 } else {
261 * PTL2 is not empty.
262 * Therefore, there must be a path from PTL0 to PTL2 and
263 * thus nothing to free in higher levels.
266 return;
268 #endif /* PTL2_ENTRIES != 0 */
270 /* check PTL1, empty is still true */
271 #if (PTL1_ENTRIES != 0)
272 for (i = 0; i < PTL1_ENTRIES; i++) {
273 if (PTE_VALID(&ptl1[i])) {
274 empty = false;
275 break;
279 if (empty) {
281 * PTL1 is empty.
282 * Release the frame and remove PTL1 pointer from the parent
283 * table.
285 if (km_is_non_identity(page))
286 return;
288 memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
289 frame_free(KA2PA((uintptr_t) ptl1), PTL1_FRAMES);
291 #endif /* PTL1_ENTRIES != 0 */
294 static pte_t *pt_mapping_find_internal(as_t *as, uintptr_t page, bool nolock)
296 assert(nolock || page_table_locked(as));
298 pte_t *ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
299 if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
300 return NULL;
302 read_barrier();
304 pte_t *ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
305 if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
306 return NULL;
308 #if (PTL1_ENTRIES != 0)
310 * Always read ptl2 only after we are sure it is present.
312 read_barrier();
313 #endif
315 pte_t *ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
316 if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
317 return NULL;
319 #if (PTL2_ENTRIES != 0)
321 * Always read ptl3 only after we are sure it is present.
323 read_barrier();
324 #endif
326 pte_t *ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
328 return &ptl3[PTL3_INDEX(page)];
331 /** Find mapping for virtual page in hierarchical page tables.
333 * @param as Address space to which page belongs.
334 * @param page Virtual page.
335 * @param nolock True if the page tables need not be locked.
336 * @param[out] pte Structure that will receive a copy of the found PTE.
338 * @return True if the mapping was found, false otherwise.
340 bool pt_mapping_find(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
342 pte_t *t = pt_mapping_find_internal(as, page, nolock);
343 if (t)
344 *pte = *t;
345 return t != NULL;
348 /** Update mapping for virtual page in hierarchical page tables.
350 * @param as Address space to which page belongs.
351 * @param page Virtual page.
352 * @param nolock True if the page tables need not be locked.
353 * @param[in] pte New PTE.
355 void pt_mapping_update(as_t *as, uintptr_t page, bool nolock, pte_t *pte)
357 pte_t *t = pt_mapping_find_internal(as, page, nolock);
358 if (!t)
359 panic("Updating non-existent PTE");
361 assert(PTE_VALID(t) == PTE_VALID(pte));
362 assert(PTE_PRESENT(t) == PTE_PRESENT(pte));
363 assert(PTE_GET_FRAME(t) == PTE_GET_FRAME(pte));
364 assert(PTE_WRITABLE(t) == PTE_WRITABLE(pte));
365 assert(PTE_EXECUTABLE(t) == PTE_EXECUTABLE(pte));
367 *t = *pte;
370 /** Return the size of the region mapped by a single PTL0 entry.
372 * @return Size of the region mapped by a single PTL0 entry.
374 static uintptr_t ptl0_step_get(void)
376 size_t va_bits;
378 va_bits = fnzb(PTL0_ENTRIES) + fnzb(PTL1_ENTRIES) + fnzb(PTL2_ENTRIES) +
379 fnzb(PTL3_ENTRIES) + PAGE_WIDTH;
381 return 1UL << (va_bits - fnzb(PTL0_ENTRIES));
384 /** Make the mappings in the given range global accross all address spaces.
386 * All PTL0 entries in the given range will be mapped to a next level page
387 * table. The next level page table will be allocated and cleared.
389 * pt_mapping_remove() will never deallocate these page tables even when there
390 * are no PTEs in them.
392 * @param as Address space.
393 * @param base Base address corresponding to the first PTL0 entry that will be
394 * altered by this function.
395 * @param size Size in bytes defining the range of PTL0 entries that will be
396 * altered by this function.
399 void pt_mapping_make_global(uintptr_t base, size_t size)
401 assert(size > 0);
403 uintptr_t ptl0 = PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
404 uintptr_t ptl0_step = ptl0_step_get();
405 size_t frames;
407 #if (PTL1_ENTRIES != 0)
408 frames = PTL1_FRAMES;
409 #elif (PTL2_ENTRIES != 0)
410 frames = PTL2_FRAMES;
411 #else
412 frames = PTL3_FRAMES;
413 #endif
415 for (uintptr_t addr = ALIGN_DOWN(base, ptl0_step);
416 addr - 1 < base + size - 1;
417 addr += ptl0_step) {
418 if (GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr)) != 0) {
419 assert(overlaps(addr, ptl0_step,
420 config.identity_base, config.identity_size));
423 * This PTL0 entry also maps the kernel identity region,
424 * so it is already global and initialized.
426 continue;
429 uintptr_t l1 = PA2KA(frame_alloc(frames, FRAME_LOWMEM, 0));
430 memsetb((void *) l1, FRAMES2SIZE(frames), 0);
431 SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(addr), KA2PA(l1));
432 SET_PTL1_FLAGS(ptl0, PTL0_INDEX(addr),
433 PAGE_PRESENT | PAGE_USER | PAGE_CACHEABLE |
434 PAGE_EXEC | PAGE_WRITE | PAGE_READ);
438 /** @}