target/hppa: Implement tlb_fill
[qemu/ar7.git] / target / hppa / mem_helper.c
blob334ef98a3263ea92ebd2e7faf03fff4fd5139ddb
1 /*
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qom/cpu.h"
26 #ifdef CONFIG_USER_ONLY
27 int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
28 int size, int rw, int mmu_idx)
30 HPPACPU *cpu = HPPA_CPU(cs);
32 /* ??? Test between data page fault and data memory protection trap,
33 which would affect si_code. */
34 cs->exception_index = EXCP_DMP;
35 cpu->env.cr[CR_IOR] = address;
36 return 1;
38 #else
39 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
41 int i;
43 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
44 hppa_tlb_entry *ent = &env->tlb[i];
45 if (ent->va_b <= addr && addr <= ent->va_e && ent->entry_valid) {
46 return ent;
49 return NULL;
52 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
53 int type, hwaddr *pphys, int *pprot)
55 hwaddr phys;
56 int prot, r_prot, w_prot, x_prot;
57 hppa_tlb_entry *ent;
58 int ret = -1;
60 /* Virtual translation disabled. Direct map virtual to physical. */
61 if (mmu_idx == MMU_PHYS_IDX) {
62 phys = addr;
63 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
64 goto egress;
67 /* Find a valid tlb entry that matches the virtual address. */
68 ent = hppa_find_tlb(env, addr);
69 if (ent == NULL) {
70 phys = 0;
71 prot = 0;
72 ret = (type & PAGE_EXEC ? EXCP_ITLB_MISS : EXCP_DTLB_MISS);
73 goto egress;
76 /* We now know the physical address. */
77 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
79 /* Map TLB access_rights field to QEMU protection. */
80 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
81 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
82 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
83 switch (ent->ar_type) {
84 case 0: /* read-only: data page */
85 prot = r_prot;
86 break;
87 case 1: /* read/write: dynamic data page */
88 prot = r_prot | w_prot;
89 break;
90 case 2: /* read/execute: normal code page */
91 prot = r_prot | x_prot;
92 break;
93 case 3: /* read/write/execute: dynamic code page */
94 prot = r_prot | w_prot | x_prot;
95 break;
96 default: /* execute: promote to privilege level type & 3 */
97 prot = x_prot;
100 /* ??? Check PSW_P and ent->access_prot. This can remove PAGE_WRITE. */
102 /* No guest access type indicates a non-architectural access from
103 within QEMU. Bypass checks for access, D, B and T bits. */
104 if (type == 0) {
105 goto egress;
108 if (unlikely(!(prot & type))) {
109 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
110 ret = (type & PAGE_EXEC ? EXCP_IMP : EXCP_DMP);
111 goto egress;
114 /* In reverse priority order, check for conditions which raise faults.
115 As we go, remove PROT bits that cover the condition we want to check.
116 In this way, the resulting PROT will force a re-check of the
117 architectural TLB entry for the next access. */
118 if (unlikely(!ent->d)) {
119 if (type & PAGE_WRITE) {
120 /* The D bit is not set -- TLB Dirty Bit Fault. */
121 ret = EXCP_TLB_DIRTY;
123 prot &= PAGE_READ | PAGE_EXEC;
125 if (unlikely(ent->b)) {
126 if (type & PAGE_WRITE) {
127 /* The B bit is set -- Data Memory Break Fault. */
128 ret = EXCP_DMB;
130 prot &= PAGE_READ | PAGE_EXEC;
132 if (unlikely(ent->t)) {
133 if (!(type & PAGE_EXEC)) {
134 /* The T bit is set -- Page Reference Fault. */
135 ret = EXCP_PAGE_REF;
137 prot &= PAGE_EXEC;
140 egress:
141 *pphys = phys;
142 *pprot = prot;
143 return ret;
146 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
148 HPPACPU *cpu = HPPA_CPU(cs);
149 hwaddr phys;
150 int prot, excp;
152 /* If the (data) mmu is disabled, bypass translation. */
153 /* ??? We really ought to know if the code mmu is disabled too,
154 in order to get the correct debugging dumps. */
155 if (!(cpu->env.psw & PSW_D)) {
156 return addr;
159 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
160 &phys, &prot);
162 /* Since we're translating for debugging, the only error that is a
163 hard error is no translation at all. Otherwise, while a real cpu
164 access might not have permission, the debugger does. */
165 return excp == EXCP_DTLB_MISS ? -1 : phys;
168 void tlb_fill(CPUState *cs, target_ulong addr, int size,
169 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
171 HPPACPU *cpu = HPPA_CPU(cs);
172 int prot, excp, a_prot;
173 hwaddr phys;
175 switch (type) {
176 case MMU_INST_FETCH:
177 a_prot = PAGE_EXEC;
178 break;
179 case MMU_DATA_STORE:
180 a_prot = PAGE_WRITE;
181 break;
182 default:
183 a_prot = PAGE_READ;
184 break;
187 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx,
188 a_prot, &phys, &prot);
189 if (unlikely(excp >= 0)) {
190 /* Failure. Raise the indicated exception. */
191 cs->exception_index = excp;
192 if (cpu->env.psw & PSW_Q) {
193 /* ??? Needs tweaking for hppa64. */
194 cpu->env.cr[CR_IOR] = addr;
195 cpu->env.cr[CR_ISR] = addr >> 32;
197 cpu_loop_exit_restore(cs, retaddr);
200 /* Success! Store the translation into the QEMU TLB. */
201 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
202 prot, mmu_idx, TARGET_PAGE_SIZE);
204 #endif /* CONFIG_USER_ONLY */