2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
28 static hppa_tlb_entry
*hppa_find_tlb(CPUHPPAState
*env
, vaddr addr
)
32 for (i
= 0; i
< ARRAY_SIZE(env
->tlb
); ++i
) {
33 hppa_tlb_entry
*ent
= &env
->tlb
[i
];
34 if (ent
->va_b
<= addr
&& addr
<= ent
->va_e
) {
35 trace_hppa_tlb_find_entry(env
, ent
+ i
, ent
->entry_valid
,
36 ent
->va_b
, ent
->va_e
, ent
->pa
);
40 trace_hppa_tlb_find_entry_not_found(env
, addr
);
44 static void hppa_flush_tlb_ent(CPUHPPAState
*env
, hppa_tlb_entry
*ent
,
45 bool force_flush_btlb
)
47 CPUState
*cs
= env_cpu(env
);
49 if (!ent
->entry_valid
) {
53 trace_hppa_tlb_flush_ent(env
, ent
, ent
->va_b
, ent
->va_e
, ent
->pa
);
55 tlb_flush_range_by_mmuidx(cs
, ent
->va_b
,
56 ent
->va_e
- ent
->va_b
+ 1,
57 HPPA_MMU_FLUSH_MASK
, TARGET_LONG_BITS
);
59 /* never clear BTLBs, unless forced to do so. */
60 if (ent
< &env
->tlb
[HPPA_BTLB_ENTRIES
] && !force_flush_btlb
) {
64 memset(ent
, 0, sizeof(*ent
));
68 static hppa_tlb_entry
*hppa_alloc_tlb_ent(CPUHPPAState
*env
)
73 if (env
->tlb_last
< HPPA_BTLB_ENTRIES
|| env
->tlb_last
>= ARRAY_SIZE(env
->tlb
)) {
74 i
= HPPA_BTLB_ENTRIES
;
75 env
->tlb_last
= HPPA_BTLB_ENTRIES
+ 1;
83 hppa_flush_tlb_ent(env
, ent
, false);
87 int hppa_get_physical_address(CPUHPPAState
*env
, vaddr addr
, int mmu_idx
,
88 int type
, hwaddr
*pphys
, int *pprot
,
89 hppa_tlb_entry
**tlb_entry
)
92 int prot
, r_prot
, w_prot
, x_prot
, priv
;
100 /* Virtual translation disabled. Direct map virtual to physical. */
101 if (mmu_idx
== MMU_PHYS_IDX
) {
103 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
107 /* Find a valid tlb entry that matches the virtual address. */
108 ent
= hppa_find_tlb(env
, addr
);
109 if (ent
== NULL
|| !ent
->entry_valid
) {
112 ret
= (type
== PAGE_EXEC
) ? EXCP_ITLB_MISS
: EXCP_DTLB_MISS
;
120 /* We now know the physical address. */
121 phys
= ent
->pa
+ (addr
- ent
->va_b
);
123 /* Map TLB access_rights field to QEMU protection. */
124 priv
= MMU_IDX_TO_PRIV(mmu_idx
);
125 r_prot
= (priv
<= ent
->ar_pl1
) * PAGE_READ
;
126 w_prot
= (priv
<= ent
->ar_pl2
) * PAGE_WRITE
;
127 x_prot
= (ent
->ar_pl2
<= priv
&& priv
<= ent
->ar_pl1
) * PAGE_EXEC
;
128 switch (ent
->ar_type
) {
129 case 0: /* read-only: data page */
132 case 1: /* read/write: dynamic data page */
133 prot
= r_prot
| w_prot
;
135 case 2: /* read/execute: normal code page */
136 prot
= r_prot
| x_prot
;
138 case 3: /* read/write/execute: dynamic code page */
139 prot
= r_prot
| w_prot
| x_prot
;
141 default: /* execute: promote to privilege level type & 3 */
146 /* access_id == 0 means public page and no check is performed */
147 if ((env
->psw
& PSW_P
) && ent
->access_id
) {
148 /* If bits [31:1] match, and bit 0 is set, suppress write. */
149 int match
= ent
->access_id
* 2 + 1;
151 if (match
== env
->cr
[CR_PID1
] || match
== env
->cr
[CR_PID2
] ||
152 match
== env
->cr
[CR_PID3
] || match
== env
->cr
[CR_PID4
]) {
153 prot
&= PAGE_READ
| PAGE_EXEC
;
154 if (type
== PAGE_WRITE
) {
161 /* No guest access type indicates a non-architectural access from
162 within QEMU. Bypass checks for access, D, B and T bits. */
167 if (unlikely(!(prot
& type
))) {
168 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
169 ret
= (type
& PAGE_EXEC
) ? EXCP_IMP
: EXCP_DMAR
;
173 /* In reverse priority order, check for conditions which raise faults.
174 As we go, remove PROT bits that cover the condition we want to check.
175 In this way, the resulting PROT will force a re-check of the
176 architectural TLB entry for the next access. */
177 if (unlikely(!ent
->d
)) {
178 if (type
& PAGE_WRITE
) {
179 /* The D bit is not set -- TLB Dirty Bit Fault. */
180 ret
= EXCP_TLB_DIRTY
;
182 prot
&= PAGE_READ
| PAGE_EXEC
;
184 if (unlikely(ent
->b
)) {
185 if (type
& PAGE_WRITE
) {
186 /* The B bit is set -- Data Memory Break Fault. */
189 prot
&= PAGE_READ
| PAGE_EXEC
;
191 if (unlikely(ent
->t
)) {
192 if (!(type
& PAGE_EXEC
)) {
193 /* The T bit is set -- Page Reference Fault. */
202 trace_hppa_tlb_get_physical_address(env
, ret
, prot
, addr
, phys
);
206 hwaddr
hppa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
208 HPPACPU
*cpu
= HPPA_CPU(cs
);
212 /* If the (data) mmu is disabled, bypass translation. */
213 /* ??? We really ought to know if the code mmu is disabled too,
214 in order to get the correct debugging dumps. */
215 if (!(cpu
->env
.psw
& PSW_D
)) {
219 excp
= hppa_get_physical_address(&cpu
->env
, addr
, MMU_KERNEL_IDX
, 0,
222 /* Since we're translating for debugging, the only error that is a
223 hard error is no translation at all. Otherwise, while a real cpu
224 access might not have permission, the debugger does. */
225 return excp
== EXCP_DTLB_MISS
? -1 : phys
;
228 bool hppa_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
229 MMUAccessType type
, int mmu_idx
,
230 bool probe
, uintptr_t retaddr
)
232 HPPACPU
*cpu
= HPPA_CPU(cs
);
233 CPUHPPAState
*env
= &cpu
->env
;
235 int prot
, excp
, a_prot
;
250 excp
= hppa_get_physical_address(env
, addr
, mmu_idx
,
251 a_prot
, &phys
, &prot
, &ent
);
252 if (unlikely(excp
>= 0)) {
256 trace_hppa_tlb_fill_excp(env
, addr
, size
, type
, mmu_idx
);
257 /* Failure. Raise the indicated exception. */
258 cs
->exception_index
= excp
;
259 if (cpu
->env
.psw
& PSW_Q
) {
260 /* ??? Needs tweaking for hppa64. */
261 cpu
->env
.cr
[CR_IOR
] = addr
;
262 cpu
->env
.cr
[CR_ISR
] = addr
>> 32;
264 cpu_loop_exit_restore(cs
, retaddr
);
267 trace_hppa_tlb_fill_success(env
, addr
& TARGET_PAGE_MASK
,
268 phys
& TARGET_PAGE_MASK
, size
, type
, mmu_idx
);
269 /* Success! Store the translation into the QEMU TLB. */
270 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
271 prot
, mmu_idx
, TARGET_PAGE_SIZE
<< (ent
? 2 * ent
->page_size
: 0));
275 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
276 void HELPER(itlba
)(CPUHPPAState
*env
, target_ulong addr
, target_ureg reg
)
278 hppa_tlb_entry
*empty
= NULL
;
281 /* Zap any old entries covering ADDR; notice empty entries on the way. */
282 for (i
= HPPA_BTLB_ENTRIES
; i
< ARRAY_SIZE(env
->tlb
); ++i
) {
283 hppa_tlb_entry
*ent
= &env
->tlb
[i
];
284 if (ent
->va_b
<= addr
&& addr
<= ent
->va_e
) {
285 if (ent
->entry_valid
) {
286 hppa_flush_tlb_ent(env
, ent
, false);
294 /* If we didn't see an empty entry, evict one. */
296 empty
= hppa_alloc_tlb_ent(env
);
299 /* Note that empty->entry_valid == 0 already. */
300 empty
->va_b
= addr
& TARGET_PAGE_MASK
;
301 empty
->va_e
= empty
->va_b
+ TARGET_PAGE_SIZE
- 1;
302 empty
->pa
= extract32(reg
, 5, 20) << TARGET_PAGE_BITS
;
303 trace_hppa_tlb_itlba(env
, empty
, empty
->va_b
, empty
->va_e
, empty
->pa
);
306 static void set_access_bits(CPUHPPAState
*env
, hppa_tlb_entry
*ent
, target_ureg reg
)
308 ent
->access_id
= extract32(reg
, 1, 18);
309 ent
->u
= extract32(reg
, 19, 1);
310 ent
->ar_pl2
= extract32(reg
, 20, 2);
311 ent
->ar_pl1
= extract32(reg
, 22, 2);
312 ent
->ar_type
= extract32(reg
, 24, 3);
313 ent
->b
= extract32(reg
, 27, 1);
314 ent
->d
= extract32(reg
, 28, 1);
315 ent
->t
= extract32(reg
, 29, 1);
316 ent
->entry_valid
= 1;
317 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
, ent
->ar_pl2
,
318 ent
->ar_pl1
, ent
->ar_type
, ent
->b
, ent
->d
, ent
->t
);
321 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
322 void HELPER(itlbp
)(CPUHPPAState
*env
, target_ulong addr
, target_ureg reg
)
324 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, addr
);
326 if (unlikely(ent
== NULL
)) {
327 qemu_log_mask(LOG_GUEST_ERROR
, "ITLBP not following ITLBA\n");
331 set_access_bits(env
, ent
, reg
);
334 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
335 synchronous across all processors. */
336 static void ptlb_work(CPUState
*cpu
, run_on_cpu_data data
)
338 CPUHPPAState
*env
= cpu
->env_ptr
;
339 target_ulong addr
= (target_ulong
) data
.target_ptr
;
340 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, addr
);
342 if (ent
&& ent
->entry_valid
) {
343 hppa_flush_tlb_ent(env
, ent
, false);
347 void HELPER(ptlb
)(CPUHPPAState
*env
, target_ulong addr
)
349 CPUState
*src
= env_cpu(env
);
351 trace_hppa_tlb_ptlb(env
);
352 run_on_cpu_data data
= RUN_ON_CPU_TARGET_PTR(addr
);
356 async_run_on_cpu(cpu
, ptlb_work
, data
);
359 async_safe_run_on_cpu(src
, ptlb_work
, data
);
362 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
363 number of pages/entries (we choose all), and is local to the cpu. */
364 void HELPER(ptlbe
)(CPUHPPAState
*env
)
366 trace_hppa_tlb_ptlbe(env
);
367 qemu_log_mask(CPU_LOG_MMU
, "FLUSH ALL TLB ENTRIES\n");
368 memset(&env
->tlb
[HPPA_BTLB_ENTRIES
], 0,
369 sizeof(env
->tlb
) - HPPA_BTLB_ENTRIES
* sizeof(env
->tlb
[0]));
370 env
->tlb_last
= HPPA_BTLB_ENTRIES
;
371 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_MASK
);
374 void cpu_hppa_change_prot_id(CPUHPPAState
*env
)
376 if (env
->psw
& PSW_P
) {
377 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_MASK
);
381 void HELPER(change_prot_id
)(CPUHPPAState
*env
)
383 cpu_hppa_change_prot_id(env
);
386 target_ureg
HELPER(lpa
)(CPUHPPAState
*env
, target_ulong addr
)
391 excp
= hppa_get_physical_address(env
, addr
, MMU_KERNEL_IDX
, 0,
394 if (env
->psw
& PSW_Q
) {
395 /* ??? Needs tweaking for hppa64. */
396 env
->cr
[CR_IOR
] = addr
;
397 env
->cr
[CR_ISR
] = addr
>> 32;
399 if (excp
== EXCP_DTLB_MISS
) {
400 excp
= EXCP_NA_DTLB_MISS
;
402 trace_hppa_tlb_lpa_failed(env
, addr
);
403 hppa_dynamic_excp(env
, excp
, GETPC());
405 trace_hppa_tlb_lpa_success(env
, addr
, phys
);
409 /* Return the ar_type of the TLB at VADDR, or -1. */
410 int hppa_artype_for_page(CPUHPPAState
*env
, target_ulong vaddr
)
412 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, vaddr
);
413 return ent
? ent
->ar_type
: -1;
417 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
418 * allow operating systems to modify the Block TLB (BTLB) entries.
419 * For implementation details see page 1-13 in
420 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
422 void HELPER(diag_btlb
)(CPUHPPAState
*env
)
424 unsigned int phys_page
, len
, slot
;
425 int mmu_idx
= cpu_mmu_index(env
, 0);
426 uintptr_t ra
= GETPC();
427 hppa_tlb_entry
*btlb
;
432 /* BTLBs are not supported on 64-bit CPUs */
433 env
->gr
[28] = -1; /* nonexistent procedure */
436 env
->gr
[28] = 0; /* PDC_OK */
438 switch (env
->gr
[25]) {
440 /* return BTLB parameters */
441 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
442 vaddr
= probe_access(env
, env
->gr
[24], 4 * sizeof(target_ulong
),
443 MMU_DATA_STORE
, mmu_idx
, ra
);
445 env
->gr
[28] = -10; /* invalid argument */
447 vaddr
[0] = cpu_to_be32(1);
448 vaddr
[1] = cpu_to_be32(16 * 1024);
449 vaddr
[2] = cpu_to_be32(HPPA_BTLB_FIXED
);
450 vaddr
[3] = cpu_to_be32(HPPA_BTLB_VARIABLE
);
454 /* insert BTLB entry */
455 virt_page
= env
->gr
[24]; /* upper 32 bits */
457 virt_page
|= env
->gr
[23]; /* lower 32 bits */
458 phys_page
= env
->gr
[22];
461 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
462 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
464 (long long) virt_page
<< TARGET_PAGE_BITS
,
465 (long long) (virt_page
+ len
) << TARGET_PAGE_BITS
,
466 (long long) virt_page
, phys_page
, len
, slot
);
467 if (slot
< HPPA_BTLB_ENTRIES
) {
468 btlb
= &env
->tlb
[slot
];
469 /* force flush of possibly existing BTLB entry */
470 hppa_flush_tlb_ent(env
, btlb
, true);
471 /* create new BTLB entry */
472 btlb
->va_b
= virt_page
<< TARGET_PAGE_BITS
;
473 btlb
->va_e
= btlb
->va_b
+ len
* TARGET_PAGE_SIZE
- 1;
474 btlb
->pa
= phys_page
<< TARGET_PAGE_BITS
;
475 set_access_bits(env
, btlb
, env
->gr
[20]);
479 env
->gr
[28] = -10; /* invalid argument */
483 /* Purge BTLB entry */
485 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
487 if (slot
< HPPA_BTLB_ENTRIES
) {
488 btlb
= &env
->tlb
[slot
];
489 hppa_flush_tlb_ent(env
, btlb
, true);
491 env
->gr
[28] = -10; /* invalid argument */
495 /* Purge all BTLB entries */
496 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
497 for (slot
= 0; slot
< HPPA_BTLB_ENTRIES
; slot
++) {
498 btlb
= &env
->tlb
[slot
];
499 hppa_flush_tlb_ent(env
, btlb
, true);
503 env
->gr
[28] = -2; /* nonexistent option */