2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "hw/core/cpu.h"
27 #ifdef CONFIG_USER_ONLY
28 bool hppa_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
29 MMUAccessType access_type
, int mmu_idx
,
30 bool probe
, uintptr_t retaddr
)
32 HPPACPU
*cpu
= HPPA_CPU(cs
);
34 /* ??? Test between data page fault and data memory protection trap,
35 which would affect si_code. */
36 cs
->exception_index
= EXCP_DMP
;
37 cpu
->env
.cr
[CR_IOR
] = address
;
38 cpu_loop_exit_restore(cs
, retaddr
);
41 static hppa_tlb_entry
*hppa_find_tlb(CPUHPPAState
*env
, vaddr addr
)
45 for (i
= 0; i
< ARRAY_SIZE(env
->tlb
); ++i
) {
46 hppa_tlb_entry
*ent
= &env
->tlb
[i
];
47 if (ent
->va_b
<= addr
&& addr
<= ent
->va_e
) {
48 trace_hppa_tlb_find_entry(env
, ent
+ i
, ent
->entry_valid
,
49 ent
->va_b
, ent
->va_e
, ent
->pa
);
53 trace_hppa_tlb_find_entry_not_found(env
, addr
);
57 static void hppa_flush_tlb_ent(CPUHPPAState
*env
, hppa_tlb_entry
*ent
)
59 CPUState
*cs
= env_cpu(env
);
60 unsigned i
, n
= 1 << (2 * ent
->page_size
);
61 uint64_t addr
= ent
->va_b
;
63 trace_hppa_tlb_flush_ent(env
, ent
, ent
->va_b
, ent
->va_e
, ent
->pa
);
65 for (i
= 0; i
< n
; ++i
, addr
+= TARGET_PAGE_SIZE
) {
66 /* Do not flush MMU_PHYS_IDX. */
67 tlb_flush_page_by_mmuidx(cs
, addr
, 0xf);
70 memset(ent
, 0, sizeof(*ent
));
74 static hppa_tlb_entry
*hppa_alloc_tlb_ent(CPUHPPAState
*env
)
77 uint32_t i
= env
->tlb_last
;
79 env
->tlb_last
= (i
== ARRAY_SIZE(env
->tlb
) - 1 ? 0 : i
+ 1);
82 hppa_flush_tlb_ent(env
, ent
);
86 int hppa_get_physical_address(CPUHPPAState
*env
, vaddr addr
, int mmu_idx
,
87 int type
, hwaddr
*pphys
, int *pprot
)
90 int prot
, r_prot
, w_prot
, x_prot
;
94 /* Virtual translation disabled. Direct map virtual to physical. */
95 if (mmu_idx
== MMU_PHYS_IDX
) {
97 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
101 /* Find a valid tlb entry that matches the virtual address. */
102 ent
= hppa_find_tlb(env
, addr
);
103 if (ent
== NULL
|| !ent
->entry_valid
) {
106 ret
= (type
== PAGE_EXEC
) ? EXCP_ITLB_MISS
: EXCP_DTLB_MISS
;
110 /* We now know the physical address. */
111 phys
= ent
->pa
+ (addr
& ~TARGET_PAGE_MASK
);
113 /* Map TLB access_rights field to QEMU protection. */
114 r_prot
= (mmu_idx
<= ent
->ar_pl1
) * PAGE_READ
;
115 w_prot
= (mmu_idx
<= ent
->ar_pl2
) * PAGE_WRITE
;
116 x_prot
= (ent
->ar_pl2
<= mmu_idx
&& mmu_idx
<= ent
->ar_pl1
) * PAGE_EXEC
;
117 switch (ent
->ar_type
) {
118 case 0: /* read-only: data page */
121 case 1: /* read/write: dynamic data page */
122 prot
= r_prot
| w_prot
;
124 case 2: /* read/execute: normal code page */
125 prot
= r_prot
| x_prot
;
127 case 3: /* read/write/execute: dynamic code page */
128 prot
= r_prot
| w_prot
| x_prot
;
130 default: /* execute: promote to privilege level type & 3 */
135 /* access_id == 0 means public page and no check is performed */
136 if ((env
->psw
& PSW_P
) && ent
->access_id
) {
137 /* If bits [31:1] match, and bit 0 is set, suppress write. */
138 int match
= ent
->access_id
* 2 + 1;
140 if (match
== env
->cr
[CR_PID1
] || match
== env
->cr
[CR_PID2
] ||
141 match
== env
->cr
[CR_PID3
] || match
== env
->cr
[CR_PID4
]) {
142 prot
&= PAGE_READ
| PAGE_EXEC
;
143 if (type
== PAGE_WRITE
) {
150 /* No guest access type indicates a non-architectural access from
151 within QEMU. Bypass checks for access, D, B and T bits. */
156 if (unlikely(!(prot
& type
))) {
157 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
158 ret
= (type
& PAGE_EXEC
) ? EXCP_IMP
: EXCP_DMAR
;
162 /* In reverse priority order, check for conditions which raise faults.
163 As we go, remove PROT bits that cover the condition we want to check.
164 In this way, the resulting PROT will force a re-check of the
165 architectural TLB entry for the next access. */
166 if (unlikely(!ent
->d
)) {
167 if (type
& PAGE_WRITE
) {
168 /* The D bit is not set -- TLB Dirty Bit Fault. */
169 ret
= EXCP_TLB_DIRTY
;
171 prot
&= PAGE_READ
| PAGE_EXEC
;
173 if (unlikely(ent
->b
)) {
174 if (type
& PAGE_WRITE
) {
175 /* The B bit is set -- Data Memory Break Fault. */
178 prot
&= PAGE_READ
| PAGE_EXEC
;
180 if (unlikely(ent
->t
)) {
181 if (!(type
& PAGE_EXEC
)) {
182 /* The T bit is set -- Page Reference Fault. */
191 trace_hppa_tlb_get_physical_address(env
, ret
, prot
, addr
, phys
);
195 hwaddr
hppa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
197 HPPACPU
*cpu
= HPPA_CPU(cs
);
201 /* If the (data) mmu is disabled, bypass translation. */
202 /* ??? We really ought to know if the code mmu is disabled too,
203 in order to get the correct debugging dumps. */
204 if (!(cpu
->env
.psw
& PSW_D
)) {
208 excp
= hppa_get_physical_address(&cpu
->env
, addr
, MMU_KERNEL_IDX
, 0,
211 /* Since we're translating for debugging, the only error that is a
212 hard error is no translation at all. Otherwise, while a real cpu
213 access might not have permission, the debugger does. */
214 return excp
== EXCP_DTLB_MISS
? -1 : phys
;
217 bool hppa_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
218 MMUAccessType type
, int mmu_idx
,
219 bool probe
, uintptr_t retaddr
)
221 HPPACPU
*cpu
= HPPA_CPU(cs
);
222 CPUHPPAState
*env
= &cpu
->env
;
223 int prot
, excp
, a_prot
;
238 excp
= hppa_get_physical_address(env
, addr
, mmu_idx
,
239 a_prot
, &phys
, &prot
);
240 if (unlikely(excp
>= 0)) {
244 trace_hppa_tlb_fill_excp(env
, addr
, size
, type
, mmu_idx
);
245 /* Failure. Raise the indicated exception. */
246 cs
->exception_index
= excp
;
247 if (cpu
->env
.psw
& PSW_Q
) {
248 /* ??? Needs tweaking for hppa64. */
249 cpu
->env
.cr
[CR_IOR
] = addr
;
250 cpu
->env
.cr
[CR_ISR
] = addr
>> 32;
252 cpu_loop_exit_restore(cs
, retaddr
);
255 trace_hppa_tlb_fill_success(env
, addr
& TARGET_PAGE_MASK
,
256 phys
& TARGET_PAGE_MASK
, size
, type
, mmu_idx
);
257 /* Success! Store the translation into the QEMU TLB. */
258 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
259 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
263 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
264 void HELPER(itlba
)(CPUHPPAState
*env
, target_ulong addr
, target_ureg reg
)
266 hppa_tlb_entry
*empty
= NULL
;
269 /* Zap any old entries covering ADDR; notice empty entries on the way. */
270 for (i
= 0; i
< ARRAY_SIZE(env
->tlb
); ++i
) {
271 hppa_tlb_entry
*ent
= &env
->tlb
[i
];
272 if (ent
->va_b
<= addr
&& addr
<= ent
->va_e
) {
273 if (ent
->entry_valid
) {
274 hppa_flush_tlb_ent(env
, ent
);
282 /* If we didn't see an empty entry, evict one. */
284 empty
= hppa_alloc_tlb_ent(env
);
287 /* Note that empty->entry_valid == 0 already. */
288 empty
->va_b
= addr
& TARGET_PAGE_MASK
;
289 empty
->va_e
= empty
->va_b
+ TARGET_PAGE_SIZE
- 1;
290 empty
->pa
= extract32(reg
, 5, 20) << TARGET_PAGE_BITS
;
291 trace_hppa_tlb_itlba(env
, empty
, empty
->va_b
, empty
->va_e
, empty
->pa
);
294 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
295 void HELPER(itlbp
)(CPUHPPAState
*env
, target_ulong addr
, target_ureg reg
)
297 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, addr
);
299 if (unlikely(ent
== NULL
)) {
300 qemu_log_mask(LOG_GUEST_ERROR
, "ITLBP not following ITLBA\n");
304 ent
->access_id
= extract32(reg
, 1, 18);
305 ent
->u
= extract32(reg
, 19, 1);
306 ent
->ar_pl2
= extract32(reg
, 20, 2);
307 ent
->ar_pl1
= extract32(reg
, 22, 2);
308 ent
->ar_type
= extract32(reg
, 24, 3);
309 ent
->b
= extract32(reg
, 27, 1);
310 ent
->d
= extract32(reg
, 28, 1);
311 ent
->t
= extract32(reg
, 29, 1);
312 ent
->entry_valid
= 1;
313 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
, ent
->ar_pl2
,
314 ent
->ar_pl1
, ent
->ar_type
, ent
->b
, ent
->d
, ent
->t
);
317 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
318 synchronous across all processors. */
319 static void ptlb_work(CPUState
*cpu
, run_on_cpu_data data
)
321 CPUHPPAState
*env
= cpu
->env_ptr
;
322 target_ulong addr
= (target_ulong
) data
.target_ptr
;
323 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, addr
);
325 if (ent
&& ent
->entry_valid
) {
326 hppa_flush_tlb_ent(env
, ent
);
330 void HELPER(ptlb
)(CPUHPPAState
*env
, target_ulong addr
)
332 CPUState
*src
= env_cpu(env
);
334 trace_hppa_tlb_ptlb(env
);
335 run_on_cpu_data data
= RUN_ON_CPU_TARGET_PTR(addr
);
339 async_run_on_cpu(cpu
, ptlb_work
, data
);
342 async_safe_run_on_cpu(src
, ptlb_work
, data
);
345 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
346 number of pages/entries (we choose all), and is local to the cpu. */
347 void HELPER(ptlbe
)(CPUHPPAState
*env
)
349 trace_hppa_tlb_ptlbe(env
);
350 memset(env
->tlb
, 0, sizeof(env
->tlb
));
351 tlb_flush_by_mmuidx(env_cpu(env
), 0xf);
354 void cpu_hppa_change_prot_id(CPUHPPAState
*env
)
356 if (env
->psw
& PSW_P
) {
357 tlb_flush_by_mmuidx(env_cpu(env
), 0xf);
361 void HELPER(change_prot_id
)(CPUHPPAState
*env
)
363 cpu_hppa_change_prot_id(env
);
366 target_ureg
HELPER(lpa
)(CPUHPPAState
*env
, target_ulong addr
)
371 excp
= hppa_get_physical_address(env
, addr
, MMU_KERNEL_IDX
, 0,
374 if (env
->psw
& PSW_Q
) {
375 /* ??? Needs tweaking for hppa64. */
376 env
->cr
[CR_IOR
] = addr
;
377 env
->cr
[CR_ISR
] = addr
>> 32;
379 if (excp
== EXCP_DTLB_MISS
) {
380 excp
= EXCP_NA_DTLB_MISS
;
382 trace_hppa_tlb_lpa_failed(env
, addr
);
383 hppa_dynamic_excp(env
, excp
, GETPC());
385 trace_hppa_tlb_lpa_success(env
, addr
, phys
);
389 /* Return the ar_type of the TLB at VADDR, or -1. */
390 int hppa_artype_for_page(CPUHPPAState
*env
, target_ulong vaddr
)
392 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, vaddr
);
393 return ent
? ent
->ar_type
: -1;
395 #endif /* CONFIG_USER_ONLY */