Make defunct ppc64 kernel compile again.
[helenos.git] / arch / ppc64 / src / mm / page.c
blob44fde4e34a051a1c123fff4af2534da5e70b022f
1 /*
2 * Copyright (C) 2005 Martin Decky
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup ppc64mm
30 * @{
32 /** @file
35 #include <arch/mm/page.h>
36 #include <genarch/mm/page_pt.h>
37 #include <arch/mm/frame.h>
38 #include <arch/asm.h>
39 #include <mm/frame.h>
40 #include <mm/page.h>
41 #include <mm/as.h>
42 #include <arch.h>
43 #include <arch/types.h>
44 #include <arch/exception.h>
45 #include <align.h>
46 #include <config.h>
47 #include <print.h>
48 #include <symtab.h>
50 static phte_t *phte;
53 /** Try to find PTE for faulting address
55 * Try to find PTE for faulting address.
56 * The as->lock must be held on entry to this function
57 * if lock is true.
59 * @param as Address space.
60 * @param lock Lock/unlock the address space.
61 * @param badvaddr Faulting virtual address.
62 * @param access Access mode that caused the fault.
63 * @param istate Pointer to interrupted state.
64 * @param pfrc Pointer to variable where as_page_fault() return code will be stored.
65 * @return PTE on success, NULL otherwise.
68 static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access,
69 istate_t *istate, int *pfrc)
72 * Check if the mapping exists in page tables.
73 */
74 pte_t *pte = page_mapping_find(as, badvaddr);
75 if ((pte) && (pte->p)) {
77 * Mapping found in page tables.
78 * Immediately succeed.
80 return pte;
81 } else {
82 int rc;
85 * Mapping not found in page tables.
86 * Resort to higher-level page fault handler.
88 page_table_unlock(as, lock);
89 switch (rc = as_page_fault(badvaddr, access, istate)) {
90 case AS_PF_OK:
92 * The higher-level page fault handler succeeded,
93 * The mapping ought to be in place.
95 page_table_lock(as, lock);
96 pte = page_mapping_find(as, badvaddr);
97 ASSERT((pte) && (pte->p));
98 *pfrc = 0;
99 return pte;
100 case AS_PF_DEFER:
101 page_table_lock(as, lock);
102 *pfrc = rc;
103 return NULL;
104 case AS_PF_FAULT:
105 page_table_lock(as, lock);
106 printf("Page fault.\n");
107 *pfrc = rc;
108 return NULL;
109 default:
110 panic("unexpected rc (%d)\n", rc);
116 static void pht_refill_fail(__address badvaddr, istate_t *istate)
118 char *symbol = "";
119 char *sym2 = "";
121 char *s = get_symtab_entry(istate->pc);
122 if (s)
123 symbol = s;
124 s = get_symtab_entry(istate->lr);
125 if (s)
126 sym2 = s;
127 panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr, istate->pc, symbol, sym2);
131 static void pht_insert(const __address vaddr, const pfn_t pfn)
133 __u32 page = (vaddr >> 12) & 0xffff;
134 __u32 api = (vaddr >> 22) & 0x3f;
135 __u32 vsid;
137 asm volatile (
138 "mfsrin %0, %1\n"
139 : "=r" (vsid)
140 : "r" (vaddr)
143 /* Primary hash (xor) */
144 __u32 h = 0;
145 __u32 hash = vsid ^ page;
146 __u32 base = (hash & 0x3ff) << 3;
147 __u32 i;
148 bool found = false;
150 /* Find unused or colliding
151 PTE in PTEG */
152 for (i = 0; i < 8; i++) {
153 if ((!phte[base + i].v) || ((phte[base + i].vsid == vsid) && (phte[base + i].api == api))) {
154 found = true;
155 break;
159 if (!found) {
160 /* Secondary hash (not) */
161 __u32 base2 = (~hash & 0x3ff) << 3;
163 /* Find unused or colliding
164 PTE in PTEG */
165 for (i = 0; i < 8; i++) {
166 if ((!phte[base2 + i].v) || ((phte[base2 + i].vsid == vsid) && (phte[base2 + i].api == api))) {
167 found = true;
168 base = base2;
169 h = 1;
170 break;
174 if (!found) {
175 // TODO: A/C precedence groups
176 i = page % 8;
180 phte[base + i].v = 1;
181 phte[base + i].vsid = vsid;
182 phte[base + i].h = h;
183 phte[base + i].api = api;
184 phte[base + i].rpn = pfn;
185 phte[base + i].r = 0;
186 phte[base + i].c = 0;
187 phte[base + i].pp = 2; // FIXME
191 /** Process Instruction/Data Storage Interrupt
193 * @param data True if Data Storage Interrupt.
194 * @param istate Interrupted register context.
197 void pht_refill(bool data, istate_t *istate)
199 __address badvaddr;
200 pte_t *pte;
201 int pfrc;
202 as_t *as;
203 bool lock;
205 if (AS == NULL) {
206 as = AS_KERNEL;
207 lock = false;
208 } else {
209 as = AS;
210 lock = true;
213 if (data) {
214 asm volatile (
215 "mfdar %0\n"
216 : "=r" (badvaddr)
218 } else
219 badvaddr = istate->pc;
221 page_table_lock(as, lock);
223 pte = find_mapping_and_check(as, lock, badvaddr, PF_ACCESS_READ /* FIXME */, istate, &pfrc);
224 if (!pte) {
225 switch (pfrc) {
226 case AS_PF_FAULT:
227 goto fail;
228 break;
229 case AS_PF_DEFER:
231 * The page fault came during copy_from_uspace()
232 * or copy_to_uspace().
234 page_table_unlock(as, lock);
235 return;
236 default:
237 panic("Unexpected pfrc (%d)\n", pfrc);
241 pte->a = 1; /* Record access to PTE */
242 pht_insert(badvaddr, pte->pfn);
244 page_table_unlock(as, lock);
245 return;
247 fail:
248 page_table_unlock(as, lock);
249 pht_refill_fail(badvaddr, istate);
253 void pht_init(void)
255 memsetb((__address) phte, 1 << PHT_BITS, 0);
259 void page_arch_init(void)
261 if (config.cpu_active == 1) {
262 page_mapping_operations = &pt_mapping_operations;
264 __address cur;
265 int flags;
267 /* Frames below 128 MB are mapped using BAT,
268 map rest of the physical memory */
269 for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
270 flags = PAGE_CACHEABLE;
271 if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
272 flags |= PAGE_GLOBAL;
273 page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
276 /* Allocate page hash table */
277 phte_t *physical_phte = (phte_t *) PFN2ADDR(frame_alloc(PHT_ORDER, FRAME_KA | FRAME_PANIC));
278 phte = (phte_t *) PA2KA((__address) physical_phte);
280 ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0);
281 pht_init();
283 asm volatile (
284 "mtsdr1 %0\n"
286 : "r" ((__address) physical_phte)
292 __address hw_map(__address physaddr, size_t size)
294 if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
295 panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
297 __address virtaddr = PA2KA(last_frame);
298 pfn_t i;
299 for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
300 page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
302 last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
304 return virtaddr;
307 /** @}