2 * Copyright (C) 2005 Martin Decky
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * - Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * - Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * - The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /** @addtogroup ppc64mm
35 #include <arch/mm/page.h>
36 #include <genarch/mm/page_pt.h>
37 #include <arch/mm/frame.h>
43 #include <arch/types.h>
44 #include <arch/exception.h>
53 /** Try to find PTE for faulting address
55 * Try to find PTE for faulting address.
56 * The as->lock must be held on entry to this function
59 * @param as Address space.
60 * @param lock Lock/unlock the address space.
61 * @param badvaddr Faulting virtual address.
62 * @param access Access mode that caused the fault.
63 * @param istate Pointer to interrupted state.
64 * @param pfrc Pointer to variable where as_page_fault() return code will be stored.
65 * @return PTE on success, NULL otherwise.
68 static pte_t
*find_mapping_and_check(as_t
*as
, bool lock
, __address badvaddr
, int access
,
69 istate_t
*istate
, int *pfrc
)
72 * Check if the mapping exists in page tables.
74 pte_t
*pte
= page_mapping_find(as
, badvaddr
);
75 if ((pte
) && (pte
->p
)) {
77 * Mapping found in page tables.
78 * Immediately succeed.
85 * Mapping not found in page tables.
86 * Resort to higher-level page fault handler.
88 page_table_unlock(as
, lock
);
89 switch (rc
= as_page_fault(badvaddr
, access
, istate
)) {
92 * The higher-level page fault handler succeeded,
93 * The mapping ought to be in place.
95 page_table_lock(as
, lock
);
96 pte
= page_mapping_find(as
, badvaddr
);
97 ASSERT((pte
) && (pte
->p
));
101 page_table_lock(as
, lock
);
105 page_table_lock(as
, lock
);
106 printf("Page fault.\n");
110 panic("unexpected rc (%d)\n", rc
);
116 static void pht_refill_fail(__address badvaddr
, istate_t
*istate
)
121 char *s
= get_symtab_entry(istate
->pc
);
124 s
= get_symtab_entry(istate
->lr
);
127 panic("%p: PHT Refill Exception at %p (%s<-%s)\n", badvaddr
, istate
->pc
, symbol
, sym2
);
131 static void pht_insert(const __address vaddr
, const pfn_t pfn
)
133 __u32 page
= (vaddr
>> 12) & 0xffff;
134 __u32 api
= (vaddr
>> 22) & 0x3f;
143 /* Primary hash (xor) */
145 __u32 hash
= vsid
^ page
;
146 __u32 base
= (hash
& 0x3ff) << 3;
150 /* Find unused or colliding
152 for (i
= 0; i
< 8; i
++) {
153 if ((!phte
[base
+ i
].v
) || ((phte
[base
+ i
].vsid
== vsid
) && (phte
[base
+ i
].api
== api
))) {
160 /* Secondary hash (not) */
161 __u32 base2
= (~hash
& 0x3ff) << 3;
163 /* Find unused or colliding
165 for (i
= 0; i
< 8; i
++) {
166 if ((!phte
[base2
+ i
].v
) || ((phte
[base2
+ i
].vsid
== vsid
) && (phte
[base2
+ i
].api
== api
))) {
175 // TODO: A/C precedence groups
180 phte
[base
+ i
].v
= 1;
181 phte
[base
+ i
].vsid
= vsid
;
182 phte
[base
+ i
].h
= h
;
183 phte
[base
+ i
].api
= api
;
184 phte
[base
+ i
].rpn
= pfn
;
185 phte
[base
+ i
].r
= 0;
186 phte
[base
+ i
].c
= 0;
187 phte
[base
+ i
].pp
= 2; // FIXME
191 /** Process Instruction/Data Storage Interrupt
193 * @param data True if Data Storage Interrupt.
194 * @param istate Interrupted register context.
197 void pht_refill(bool data
, istate_t
*istate
)
219 badvaddr
= istate
->pc
;
221 page_table_lock(as
, lock
);
223 pte
= find_mapping_and_check(as
, lock
, badvaddr
, PF_ACCESS_READ
/* FIXME */, istate
, &pfrc
);
231 * The page fault came during copy_from_uspace()
232 * or copy_to_uspace().
234 page_table_unlock(as
, lock
);
237 panic("Unexpected pfrc (%d)\n", pfrc
);
241 pte
->a
= 1; /* Record access to PTE */
242 pht_insert(badvaddr
, pte
->pfn
);
244 page_table_unlock(as
, lock
);
248 page_table_unlock(as
, lock
);
249 pht_refill_fail(badvaddr
, istate
);
255 memsetb((__address
) phte
, 1 << PHT_BITS
, 0);
259 void page_arch_init(void)
261 if (config
.cpu_active
== 1) {
262 page_mapping_operations
= &pt_mapping_operations
;
267 /* Frames below 128 MB are mapped using BAT,
268 map rest of the physical memory */
269 for (cur
= 128 << 20; cur
< last_frame
; cur
+= FRAME_SIZE
) {
270 flags
= PAGE_CACHEABLE
;
271 if ((PA2KA(cur
) >= config
.base
) && (PA2KA(cur
) < config
.base
+ config
.kernel_size
))
272 flags
|= PAGE_GLOBAL
;
273 page_mapping_insert(AS_KERNEL
, PA2KA(cur
), cur
, flags
);
276 /* Allocate page hash table */
277 phte_t
*physical_phte
= (phte_t
*) PFN2ADDR(frame_alloc(PHT_ORDER
, FRAME_KA
| FRAME_PANIC
));
278 phte
= (phte_t
*) PA2KA((__address
) physical_phte
);
280 ASSERT((__address
) physical_phte
% (1 << PHT_BITS
) == 0);
286 : "r" ((__address
) physical_phte
)
292 __address
hw_map(__address physaddr
, size_t size
)
294 if (last_frame
+ ALIGN_UP(size
, PAGE_SIZE
) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH
))
295 panic("Unable to map physical memory %p (%d bytes)", physaddr
, size
)
297 __address virtaddr
= PA2KA(last_frame
);
299 for (i
= 0; i
< ADDR2PFN(ALIGN_UP(size
, PAGE_SIZE
)); i
++)
300 page_mapping_insert(AS_KERNEL
, virtaddr
+ PFN2ADDR(i
), physaddr
+ PFN2ADDR(i
), PAGE_NOT_CACHEABLE
);
302 last_frame
= ALIGN_UP(last_frame
+ size
, FRAME_SIZE
);