Merge tag 'v9.0.0-rc3'
[qemu/ar7.git] / target / hppa / mem_helper.c
blob84785b5a5c61efdd1c1cc8530936612fe3a2a369
1 /*
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
31 * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
32 * an algorithm in which a 62-bit absolute address is transformed to
33 * a 64-bit physical address. This must then be combined with that
34 * pictured in Figure H-11 "Physical Address Space Mapping", in which
35 * the full physical address is truncated to the N-bit physical address
36 * supported by the implementation.
38 * Since the supported physical address space is below 54 bits, the
39 * H-8 algorithm is moot and all that is left is to truncate.
41 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
42 return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
45 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
48 * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
49 * combined with Figure H-11, as above.
51 if (likely(extract32(addr, 28, 4) != 0xf)) {
52 /* Memory address space */
53 addr = (uint32_t)addr;
54 } else if (extract32(addr, 24, 4) != 0) {
55 /* I/O address space */
56 addr = (int32_t)addr;
57 } else {
59 * PDC address space:
60 * Figures H-10 and H-11 of the parisc2.0 spec do not specify
61 * where to map into the 64-bit PDC address space.
62 * We map with an offset which equals the 32-bit address, which
63 * is what can be seen on physical machines too.
65 addr = (uint32_t)addr;
66 addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
68 return addr;
71 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
73 IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
75 if (i) {
76 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
77 trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
78 ent->itree.start, ent->itree.last, ent->pa);
79 return ent;
81 trace_hppa_tlb_find_entry_not_found(env, addr);
82 return NULL;
85 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
86 bool force_flush_btlb)
88 CPUState *cs = env_cpu(env);
89 bool is_btlb;
91 if (!ent->entry_valid) {
92 return;
95 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
96 ent->itree.last, ent->pa);
98 tlb_flush_range_by_mmuidx(cs, ent->itree.start,
99 ent->itree.last - ent->itree.start + 1,
100 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
102 /* Never clear BTLBs, unless forced to do so. */
103 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
104 if (is_btlb && !force_flush_btlb) {
105 return;
108 interval_tree_remove(&ent->itree, &env->tlb_root);
109 memset(ent, 0, sizeof(*ent));
111 if (!is_btlb) {
112 ent->unused_next = env->tlb_unused;
113 env->tlb_unused = ent;
117 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
119 IntervalTreeNode *i, *n;
121 i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
122 for (; i ; i = n) {
123 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
126 * Find the next entry now: In the normal case the current entry
127 * will be removed, but in the BTLB case it will remain.
129 n = interval_tree_iter_next(i, va_b, va_e);
130 hppa_flush_tlb_ent(env, ent, false);
134 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
136 HPPATLBEntry *ent = env->tlb_unused;
138 if (ent == NULL) {
139 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
140 uint32_t i = env->tlb_last;
142 if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
143 i = btlb_entries;
145 env->tlb_last = i + 1;
147 ent = &env->tlb[i];
148 hppa_flush_tlb_ent(env, ent, false);
151 env->tlb_unused = ent->unused_next;
152 return ent;
155 #define ACCESS_ID_MASK 0xffff
157 /* Return the set of protections allowed by a PID match. */
158 static int match_prot_id_1(uint32_t access_id, uint32_t prot_id)
160 if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) {
161 return (prot_id & 1
162 ? PAGE_EXEC | PAGE_READ
163 : PAGE_EXEC | PAGE_READ | PAGE_WRITE);
165 return 0;
168 static int match_prot_id32(CPUHPPAState *env, uint32_t access_id)
170 int r, i;
172 for (i = CR_PID1; i <= CR_PID4; ++i) {
173 r = match_prot_id_1(access_id, env->cr[i]);
174 if (r) {
175 return r;
178 return 0;
181 static int match_prot_id64(CPUHPPAState *env, uint32_t access_id)
183 int r, i;
185 for (i = CR_PID1; i <= CR_PID4; ++i) {
186 r = match_prot_id_1(access_id, env->cr[i]);
187 if (r) {
188 return r;
190 r = match_prot_id_1(access_id, env->cr[i] >> 32);
191 if (r) {
192 return r;
195 return 0;
198 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
199 int type, hwaddr *pphys, int *pprot,
200 HPPATLBEntry **tlb_entry)
202 hwaddr phys;
203 int prot, r_prot, w_prot, x_prot, priv;
204 HPPATLBEntry *ent;
205 int ret = -1;
207 if (tlb_entry) {
208 *tlb_entry = NULL;
211 /* Virtual translation disabled. Map absolute to physical. */
212 if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
213 switch (mmu_idx) {
214 case MMU_ABS_W_IDX:
215 phys = hppa_abs_to_phys_pa2_w1(addr);
216 break;
217 case MMU_ABS_IDX:
218 if (hppa_is_pa20(env)) {
219 phys = hppa_abs_to_phys_pa2_w0(addr);
220 } else {
221 phys = (uint32_t)addr;
223 break;
224 default:
225 g_assert_not_reached();
227 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
228 goto egress;
231 /* Find a valid tlb entry that matches the virtual address. */
232 ent = hppa_find_tlb(env, addr);
233 if (ent == NULL) {
234 phys = 0;
235 prot = 0;
236 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
237 goto egress;
240 if (tlb_entry) {
241 *tlb_entry = ent;
244 /* We now know the physical address. */
245 phys = ent->pa + (addr - ent->itree.start);
247 /* Map TLB access_rights field to QEMU protection. */
248 priv = MMU_IDX_TO_PRIV(mmu_idx);
249 r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
250 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
251 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
252 switch (ent->ar_type) {
253 case 0: /* read-only: data page */
254 prot = r_prot;
255 break;
256 case 1: /* read/write: dynamic data page */
257 prot = r_prot | w_prot;
258 break;
259 case 2: /* read/execute: normal code page */
260 prot = r_prot | x_prot;
261 break;
262 case 3: /* read/write/execute: dynamic code page */
263 prot = r_prot | w_prot | x_prot;
264 break;
265 default: /* execute: promote to privilege level type & 3 */
266 prot = x_prot;
267 break;
271 * No guest access type indicates a non-architectural access from
272 * within QEMU. Bypass checks for access, D, B, P and T bits.
274 if (type == 0) {
275 goto egress;
278 /* access_id == 0 means public page and no check is performed */
279 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
280 int access_prot = (hppa_is_pa20(env)
281 ? match_prot_id64(env, ent->access_id)
282 : match_prot_id32(env, ent->access_id));
283 if (unlikely(!(type & access_prot))) {
284 /* Not allowed -- Inst/Data Memory Protection Id Fault. */
285 ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI;
286 goto egress;
288 /* Otherwise exclude permissions not allowed (i.e WD). */
289 prot &= access_prot;
292 if (unlikely(!(prot & type))) {
293 /* Not allowed -- Inst/Data Memory Access Rights Fault. */
294 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
295 goto egress;
298 /* In reverse priority order, check for conditions which raise faults.
299 As we go, remove PROT bits that cover the condition we want to check.
300 In this way, the resulting PROT will force a re-check of the
301 architectural TLB entry for the next access. */
302 if (unlikely(!ent->d)) {
303 if (type & PAGE_WRITE) {
304 /* The D bit is not set -- TLB Dirty Bit Fault. */
305 ret = EXCP_TLB_DIRTY;
307 prot &= PAGE_READ | PAGE_EXEC;
309 if (unlikely(ent->b)) {
310 if (type & PAGE_WRITE) {
311 /* The B bit is set -- Data Memory Break Fault. */
312 ret = EXCP_DMB;
314 prot &= PAGE_READ | PAGE_EXEC;
316 if (unlikely(ent->t)) {
317 if (!(type & PAGE_EXEC)) {
318 /* The T bit is set -- Page Reference Fault. */
319 ret = EXCP_PAGE_REF;
321 prot &= PAGE_EXEC;
324 egress:
325 *pphys = phys;
326 *pprot = prot;
327 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
328 return ret;
331 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
333 HPPACPU *cpu = HPPA_CPU(cs);
334 hwaddr phys;
335 int prot, excp, mmu_idx;
337 /* If the (data) mmu is disabled, bypass translation. */
338 /* ??? We really ought to know if the code mmu is disabled too,
339 in order to get the correct debugging dumps. */
340 mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
341 cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
343 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
344 &phys, &prot, NULL);
346 /* Since we're translating for debugging, the only error that is a
347 hard error is no translation at all. Otherwise, while a real cpu
348 access might not have permission, the debugger does. */
349 return excp == EXCP_DTLB_MISS ? -1 : phys;
352 void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled)
354 if (env->psw & PSW_Q) {
356 * For pa1.x, the offset and space never overlap, and so we
357 * simply extract the high and low part of the virtual address.
359 * For pa2.0, the formation of these are described in section
360 * "Interruption Parameter Registers", page 2-15.
362 env->cr[CR_IOR] = (uint32_t)addr;
363 env->cr[CR_ISR] = addr >> 32;
365 if (hppa_is_pa20(env)) {
366 if (mmu_disabled) {
368 * If data translation was disabled, the ISR contains
369 * the upper portion of the abs address, zero-extended.
371 env->cr[CR_ISR] &= 0x3fffffff;
372 } else {
374 * If data translation was enabled, the upper two bits
375 * of the IOR (the b field) are equal to the two space
376 * bits from the base register used to form the gva.
378 uint64_t b;
380 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0;
381 b >>= (env->psw & PSW_W ? 62 : 30);
382 env->cr[CR_IOR] |= b << 62;
388 G_NORETURN static void
389 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
390 vaddr addr, bool mmu_disabled)
392 CPUState *cs = env_cpu(env);
394 cs->exception_index = excp;
395 cpu_restore_state(cs, retaddr);
396 hppa_set_ior_and_isr(env, addr, mmu_disabled);
398 cpu_loop_exit(cs);
401 void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
402 vaddr addr, unsigned size,
403 MMUAccessType access_type,
404 int mmu_idx, MemTxAttrs attrs,
405 MemTxResult response, uintptr_t retaddr)
407 CPUHPPAState *env = cpu_env(cs);
409 qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx
410 " while accessing I/O at %#08" HWADDR_PRIx "\n",
411 env->iasq_f, env->iaoq_f, physaddr);
413 /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
414 if (0) {
415 raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr,
416 MMU_IDX_MMU_DISABLED(mmu_idx));
420 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
421 MMUAccessType type, int mmu_idx,
422 bool probe, uintptr_t retaddr)
424 HPPACPU *cpu = HPPA_CPU(cs);
425 CPUHPPAState *env = &cpu->env;
426 HPPATLBEntry *ent;
427 int prot, excp, a_prot;
428 hwaddr phys;
430 switch (type) {
431 case MMU_INST_FETCH:
432 a_prot = PAGE_EXEC;
433 break;
434 case MMU_DATA_STORE:
435 a_prot = PAGE_WRITE;
436 break;
437 default:
438 a_prot = PAGE_READ;
439 break;
442 excp = hppa_get_physical_address(env, addr, mmu_idx,
443 a_prot, &phys, &prot, &ent);
444 if (unlikely(excp >= 0)) {
445 if (probe) {
446 return false;
448 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
450 /* Failure. Raise the indicated exception. */
451 raise_exception_with_ior(env, excp, retaddr, addr,
452 MMU_IDX_MMU_DISABLED(mmu_idx));
455 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
456 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
459 * Success! Store the translation into the QEMU TLB.
460 * Note that we always install a single-page entry, because that
461 * is what works best with softmmu -- anything else will trigger
462 * the large page protection mask. We do not require this,
463 * because we record the large page here in the hppa tlb.
465 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
466 prot, mmu_idx, TARGET_PAGE_SIZE);
467 return true;
470 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
471 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
473 HPPATLBEntry *ent;
475 /* Zap any old entries covering ADDR. */
476 addr &= TARGET_PAGE_MASK;
477 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
479 ent = env->tlb_partial;
480 if (ent == NULL) {
481 ent = hppa_alloc_tlb_ent(env);
482 env->tlb_partial = ent;
485 /* Note that ent->entry_valid == 0 already. */
486 ent->itree.start = addr;
487 ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
488 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
489 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
492 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
493 target_ulong reg)
495 ent->access_id = extract32(reg, 1, 18);
496 ent->u = extract32(reg, 19, 1);
497 ent->ar_pl2 = extract32(reg, 20, 2);
498 ent->ar_pl1 = extract32(reg, 22, 2);
499 ent->ar_type = extract32(reg, 24, 3);
500 ent->b = extract32(reg, 27, 1);
501 ent->d = extract32(reg, 28, 1);
502 ent->t = extract32(reg, 29, 1);
503 ent->entry_valid = 1;
505 interval_tree_insert(&ent->itree, &env->tlb_root);
506 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
507 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
510 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
511 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
513 HPPATLBEntry *ent = env->tlb_partial;
515 if (ent) {
516 env->tlb_partial = NULL;
517 if (ent->itree.start <= addr && addr <= ent->itree.last) {
518 set_access_bits_pa11(env, ent, reg);
519 return;
522 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
525 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
526 target_ulong r2, vaddr va_b)
528 HPPATLBEntry *ent;
529 vaddr va_e;
530 uint64_t va_size;
531 int mask_shift;
533 mask_shift = 2 * (r1 & 0xf);
534 va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
535 va_b &= -va_size;
536 va_e = va_b + va_size - 1;
538 hppa_flush_tlb_range(env, va_b, va_e);
539 ent = hppa_alloc_tlb_ent(env);
541 ent->itree.start = va_b;
542 ent->itree.last = va_e;
544 /* Extract all 52 bits present in the page table entry. */
545 ent->pa = r1 << (TARGET_PAGE_BITS - 5);
546 /* Align per the page size. */
547 ent->pa &= TARGET_PAGE_MASK << mask_shift;
548 /* Ignore the bits beyond physical address space. */
549 ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
551 ent->t = extract64(r2, 61, 1);
552 ent->d = extract64(r2, 60, 1);
553 ent->b = extract64(r2, 59, 1);
554 ent->ar_type = extract64(r2, 56, 3);
555 ent->ar_pl1 = extract64(r2, 54, 2);
556 ent->ar_pl2 = extract64(r2, 52, 2);
557 ent->u = extract64(r2, 51, 1);
558 /* o = bit 50 */
559 /* p = bit 49 */
560 ent->access_id = extract64(r2, 1, 31);
561 ent->entry_valid = 1;
563 interval_tree_insert(&ent->itree, &env->tlb_root);
564 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
565 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
566 ent->ar_pl2, ent->ar_pl1, ent->ar_type,
567 ent->b, ent->d, ent->t);
570 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
572 vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
573 itlbt_pa20(env, r1, r2, va_b);
576 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
578 vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
579 itlbt_pa20(env, r1, r2, va_b);
582 /* Purge (Insn/Data) TLB. */
583 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
585 vaddr start = data.target_ptr;
586 vaddr end;
589 * PA2.0 allows a range of pages encoded into GR[b], which we have
590 * copied into the bottom bits of the otherwise page-aligned address.
591 * PA1.x will always provide zero here, for a single page flush.
593 end = start & 0xf;
594 start &= TARGET_PAGE_MASK;
595 end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
596 end = start + end - 1;
598 hppa_flush_tlb_range(cpu_env(cpu), start, end);
601 /* This is local to the current cpu. */
602 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
604 trace_hppa_tlb_ptlb_local(env);
605 ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
608 /* This is synchronous across all processors. */
609 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
611 CPUState *src = env_cpu(env);
612 CPUState *cpu;
613 bool wait = false;
615 trace_hppa_tlb_ptlb(env);
616 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
618 CPU_FOREACH(cpu) {
619 if (cpu != src) {
620 async_run_on_cpu(cpu, ptlb_work, data);
621 wait = true;
624 if (wait) {
625 async_safe_run_on_cpu(src, ptlb_work, data);
626 } else {
627 ptlb_work(src, data);
631 void hppa_ptlbe(CPUHPPAState *env)
633 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
634 uint32_t i;
636 /* Zap the (non-btlb) tlb entries themselves. */
637 memset(&env->tlb[btlb_entries], 0,
638 sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
639 env->tlb_last = btlb_entries;
640 env->tlb_partial = NULL;
642 /* Put them all onto the unused list. */
643 env->tlb_unused = &env->tlb[btlb_entries];
644 for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
645 env->tlb[i].unused_next = &env->tlb[i + 1];
648 /* Re-initialize the interval tree with only the btlb entries. */
649 memset(&env->tlb_root, 0, sizeof(env->tlb_root));
650 for (i = 0; i < btlb_entries; ++i) {
651 if (env->tlb[i].entry_valid) {
652 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
656 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
659 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
660 number of pages/entries (we choose all), and is local to the cpu. */
661 void HELPER(ptlbe)(CPUHPPAState *env)
663 trace_hppa_tlb_ptlbe(env);
664 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
665 hppa_ptlbe(env);
668 void cpu_hppa_change_prot_id(CPUHPPAState *env)
670 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
673 void HELPER(change_prot_id)(CPUHPPAState *env)
675 cpu_hppa_change_prot_id(env);
678 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
680 hwaddr phys;
681 int prot, excp;
683 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
684 &phys, &prot, NULL);
685 if (excp >= 0) {
686 if (excp == EXCP_DTLB_MISS) {
687 excp = EXCP_NA_DTLB_MISS;
689 trace_hppa_tlb_lpa_failed(env, addr);
690 raise_exception_with_ior(env, excp, GETPC(), addr, false);
692 trace_hppa_tlb_lpa_success(env, addr, phys);
693 return phys;
696 /* Return the ar_type of the TLB at VADDR, or -1. */
697 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
699 HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
700 return ent ? ent->ar_type : -1;
704 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
705 * allow operating systems to modify the Block TLB (BTLB) entries.
706 * For implementation details see page 1-13 in
707 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
709 void HELPER(diag_btlb)(CPUHPPAState *env)
711 unsigned int phys_page, len, slot;
712 int mmu_idx = cpu_mmu_index(env_cpu(env), 0);
713 uintptr_t ra = GETPC();
714 HPPATLBEntry *btlb;
715 uint64_t virt_page;
716 uint32_t *vaddr;
717 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
719 /* BTLBs are not supported on 64-bit CPUs */
720 if (btlb_entries == 0) {
721 env->gr[28] = -1; /* nonexistent procedure */
722 return;
725 env->gr[28] = 0; /* PDC_OK */
727 switch (env->gr[25]) {
728 case 0:
729 /* return BTLB parameters */
730 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
731 vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t),
732 MMU_DATA_STORE, mmu_idx, ra);
733 if (vaddr == NULL) {
734 env->gr[28] = -10; /* invalid argument */
735 } else {
736 vaddr[0] = cpu_to_be32(1);
737 vaddr[1] = cpu_to_be32(16 * 1024);
738 vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
739 vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
741 break;
742 case 1:
743 /* insert BTLB entry */
744 virt_page = env->gr[24]; /* upper 32 bits */
745 virt_page <<= 32;
746 virt_page |= env->gr[23]; /* lower 32 bits */
747 phys_page = env->gr[22];
748 len = env->gr[21];
749 slot = env->gr[19];
750 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
751 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
752 "into slot %d\n",
753 (long long) virt_page << TARGET_PAGE_BITS,
754 (long long) (virt_page + len) << TARGET_PAGE_BITS,
755 (long long) virt_page, phys_page, len, slot);
756 if (slot < btlb_entries) {
757 btlb = &env->tlb[slot];
759 /* Force flush of possibly existing BTLB entry. */
760 hppa_flush_tlb_ent(env, btlb, true);
762 /* Create new BTLB entry */
763 btlb->itree.start = virt_page << TARGET_PAGE_BITS;
764 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
765 btlb->pa = phys_page << TARGET_PAGE_BITS;
766 set_access_bits_pa11(env, btlb, env->gr[20]);
767 btlb->t = 0;
768 btlb->d = 1;
769 } else {
770 env->gr[28] = -10; /* invalid argument */
772 break;
773 case 2:
774 /* Purge BTLB entry */
775 slot = env->gr[22];
776 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
777 slot);
778 if (slot < btlb_entries) {
779 btlb = &env->tlb[slot];
780 hppa_flush_tlb_ent(env, btlb, true);
781 } else {
782 env->gr[28] = -10; /* invalid argument */
784 break;
785 case 3:
786 /* Purge all BTLB entries */
787 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
788 for (slot = 0; slot < btlb_entries; slot++) {
789 btlb = &env->tlb[slot];
790 hppa_flush_tlb_ent(env, btlb, true);
792 break;
793 default:
794 env->gr[28] = -2; /* nonexistent option */
795 break;