target/hppa: Export function hppa_set_ior_and_isr()
[qemu/armbru.git] / target / hppa / mem_helper.c
blob4fcc612754bd90673afef7036830d23955846333
1 /*
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
31 * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
32 * an algorithm in which a 62-bit absolute address is transformed to
33 * a 64-bit physical address. This must then be combined with that
34 * pictured in Figure H-11 "Physical Address Space Mapping", in which
35 * the full physical address is truncated to the N-bit physical address
36 * supported by the implementation.
38 * Since the supported physical address space is below 54 bits, the
39 * H-8 algorithm is moot and all that is left is to truncate.
41 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54);
42 return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS);
45 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
48 * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
49 * combined with Figure H-11, as above.
51 if (likely(extract32(addr, 28, 4) != 0xf)) {
52 /* Memory address space */
53 addr = (uint32_t)addr;
54 } else if (extract32(addr, 24, 4) != 0) {
55 /* I/O address space */
56 addr = (int32_t)addr;
57 } else {
59 * PDC address space:
60 * Figures H-10 and H-11 of the parisc2.0 spec do not specify
61 * where to map into the 64-bit PDC address space.
62 * We map with an offset which equals the 32-bit address, which
63 * is what can be seen on physical machines too.
65 addr = (uint32_t)addr;
66 addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4);
68 return addr;
71 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
73 IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
75 if (i) {
76 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
77 trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
78 ent->itree.start, ent->itree.last, ent->pa);
79 return ent;
81 trace_hppa_tlb_find_entry_not_found(env, addr);
82 return NULL;
85 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
86 bool force_flush_btlb)
88 CPUState *cs = env_cpu(env);
89 bool is_btlb;
91 if (!ent->entry_valid) {
92 return;
95 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
96 ent->itree.last, ent->pa);
98 tlb_flush_range_by_mmuidx(cs, ent->itree.start,
99 ent->itree.last - ent->itree.start + 1,
100 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
102 /* Never clear BTLBs, unless forced to do so. */
103 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
104 if (is_btlb && !force_flush_btlb) {
105 return;
108 interval_tree_remove(&ent->itree, &env->tlb_root);
109 memset(ent, 0, sizeof(*ent));
111 if (!is_btlb) {
112 ent->unused_next = env->tlb_unused;
113 env->tlb_unused = ent;
117 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
119 IntervalTreeNode *i, *n;
121 i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
122 for (; i ; i = n) {
123 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
126 * Find the next entry now: In the normal case the current entry
127 * will be removed, but in the BTLB case it will remain.
129 n = interval_tree_iter_next(i, va_b, va_e);
130 hppa_flush_tlb_ent(env, ent, false);
134 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
136 HPPATLBEntry *ent = env->tlb_unused;
138 if (ent == NULL) {
139 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
140 uint32_t i = env->tlb_last;
142 if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
143 i = btlb_entries;
145 env->tlb_last = i + 1;
147 ent = &env->tlb[i];
148 hppa_flush_tlb_ent(env, ent, false);
151 env->tlb_unused = ent->unused_next;
152 return ent;
155 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
156 int type, hwaddr *pphys, int *pprot,
157 HPPATLBEntry **tlb_entry)
159 hwaddr phys;
160 int prot, r_prot, w_prot, x_prot, priv;
161 HPPATLBEntry *ent;
162 int ret = -1;
164 if (tlb_entry) {
165 *tlb_entry = NULL;
168 /* Virtual translation disabled. Map absolute to physical. */
169 if (MMU_IDX_MMU_DISABLED(mmu_idx)) {
170 switch (mmu_idx) {
171 case MMU_ABS_W_IDX:
172 phys = hppa_abs_to_phys_pa2_w1(addr);
173 break;
174 case MMU_ABS_IDX:
175 if (hppa_is_pa20(env)) {
176 phys = hppa_abs_to_phys_pa2_w0(addr);
177 } else {
178 phys = (uint32_t)addr;
180 break;
181 default:
182 g_assert_not_reached();
184 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
185 goto egress;
188 /* Find a valid tlb entry that matches the virtual address. */
189 ent = hppa_find_tlb(env, addr);
190 if (ent == NULL) {
191 phys = 0;
192 prot = 0;
193 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
194 goto egress;
197 if (tlb_entry) {
198 *tlb_entry = ent;
201 /* We now know the physical address. */
202 phys = ent->pa + (addr - ent->itree.start);
204 /* Map TLB access_rights field to QEMU protection. */
205 priv = MMU_IDX_TO_PRIV(mmu_idx);
206 r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
207 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
208 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
209 switch (ent->ar_type) {
210 case 0: /* read-only: data page */
211 prot = r_prot;
212 break;
213 case 1: /* read/write: dynamic data page */
214 prot = r_prot | w_prot;
215 break;
216 case 2: /* read/execute: normal code page */
217 prot = r_prot | x_prot;
218 break;
219 case 3: /* read/write/execute: dynamic code page */
220 prot = r_prot | w_prot | x_prot;
221 break;
222 default: /* execute: promote to privilege level type & 3 */
223 prot = x_prot;
224 break;
227 /* access_id == 0 means public page and no check is performed */
228 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
229 /* If bits [31:1] match, and bit 0 is set, suppress write. */
230 int match = ent->access_id * 2 + 1;
232 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
233 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
234 prot &= PAGE_READ | PAGE_EXEC;
235 if (type == PAGE_WRITE) {
236 ret = EXCP_DMPI;
237 goto egress;
242 /* No guest access type indicates a non-architectural access from
243 within QEMU. Bypass checks for access, D, B and T bits. */
244 if (type == 0) {
245 goto egress;
248 if (unlikely(!(prot & type))) {
249 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
250 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
251 goto egress;
254 /* In reverse priority order, check for conditions which raise faults.
255 As we go, remove PROT bits that cover the condition we want to check.
256 In this way, the resulting PROT will force a re-check of the
257 architectural TLB entry for the next access. */
258 if (unlikely(!ent->d)) {
259 if (type & PAGE_WRITE) {
260 /* The D bit is not set -- TLB Dirty Bit Fault. */
261 ret = EXCP_TLB_DIRTY;
263 prot &= PAGE_READ | PAGE_EXEC;
265 if (unlikely(ent->b)) {
266 if (type & PAGE_WRITE) {
267 /* The B bit is set -- Data Memory Break Fault. */
268 ret = EXCP_DMB;
270 prot &= PAGE_READ | PAGE_EXEC;
272 if (unlikely(ent->t)) {
273 if (!(type & PAGE_EXEC)) {
274 /* The T bit is set -- Page Reference Fault. */
275 ret = EXCP_PAGE_REF;
277 prot &= PAGE_EXEC;
280 egress:
281 *pphys = phys;
282 *pprot = prot;
283 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
284 return ret;
287 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
289 HPPACPU *cpu = HPPA_CPU(cs);
290 hwaddr phys;
291 int prot, excp, mmu_idx;
293 /* If the (data) mmu is disabled, bypass translation. */
294 /* ??? We really ought to know if the code mmu is disabled too,
295 in order to get the correct debugging dumps. */
296 mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX :
297 cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
299 excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0,
300 &phys, &prot, NULL);
302 /* Since we're translating for debugging, the only error that is a
303 hard error is no translation at all. Otherwise, while a real cpu
304 access might not have permission, the debugger does. */
305 return excp == EXCP_DTLB_MISS ? -1 : phys;
308 void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled)
310 if (env->psw & PSW_Q) {
312 * For pa1.x, the offset and space never overlap, and so we
313 * simply extract the high and low part of the virtual address.
315 * For pa2.0, the formation of these are described in section
316 * "Interruption Parameter Registers", page 2-15.
318 env->cr[CR_IOR] = (uint32_t)addr;
319 env->cr[CR_ISR] = addr >> 32;
321 if (hppa_is_pa20(env)) {
322 if (mmu_disabled) {
324 * If data translation was disabled, the ISR contains
325 * the upper portion of the abs address, zero-extended.
327 env->cr[CR_ISR] &= 0x3fffffff;
328 } else {
330 * If data translation was enabled, the upper two bits
331 * of the IOR (the b field) are equal to the two space
332 * bits from the base register used to form the gva.
334 uint64_t b;
336 b = env->unwind_breg ? env->gr[env->unwind_breg] : 0;
337 b >>= (env->psw & PSW_W ? 62 : 30);
338 env->cr[CR_IOR] |= b << 62;
344 G_NORETURN static void
345 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
346 vaddr addr, bool mmu_disabled)
348 CPUState *cs = env_cpu(env);
350 cs->exception_index = excp;
351 hppa_set_ior_and_isr(env, addr, mmu_disabled);
353 cpu_loop_exit_restore(cs, retaddr);
356 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
357 MMUAccessType type, int mmu_idx,
358 bool probe, uintptr_t retaddr)
360 HPPACPU *cpu = HPPA_CPU(cs);
361 CPUHPPAState *env = &cpu->env;
362 HPPATLBEntry *ent;
363 int prot, excp, a_prot;
364 hwaddr phys;
366 switch (type) {
367 case MMU_INST_FETCH:
368 a_prot = PAGE_EXEC;
369 break;
370 case MMU_DATA_STORE:
371 a_prot = PAGE_WRITE;
372 break;
373 default:
374 a_prot = PAGE_READ;
375 break;
378 excp = hppa_get_physical_address(env, addr, mmu_idx,
379 a_prot, &phys, &prot, &ent);
380 if (unlikely(excp >= 0)) {
381 if (probe) {
382 return false;
384 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
386 /* Failure. Raise the indicated exception. */
387 raise_exception_with_ior(env, excp, retaddr, addr,
388 MMU_IDX_MMU_DISABLED(mmu_idx));
391 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
392 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
395 * Success! Store the translation into the QEMU TLB.
396 * Note that we always install a single-page entry, because that
397 * is what works best with softmmu -- anything else will trigger
398 * the large page protection mask. We do not require this,
399 * because we record the large page here in the hppa tlb.
401 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
402 prot, mmu_idx, TARGET_PAGE_SIZE);
403 return true;
406 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
407 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
409 HPPATLBEntry *ent;
411 /* Zap any old entries covering ADDR. */
412 addr &= TARGET_PAGE_MASK;
413 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
415 ent = env->tlb_partial;
416 if (ent == NULL) {
417 ent = hppa_alloc_tlb_ent(env);
418 env->tlb_partial = ent;
421 /* Note that ent->entry_valid == 0 already. */
422 ent->itree.start = addr;
423 ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
424 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
425 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
428 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
429 target_ulong reg)
431 ent->access_id = extract32(reg, 1, 18);
432 ent->u = extract32(reg, 19, 1);
433 ent->ar_pl2 = extract32(reg, 20, 2);
434 ent->ar_pl1 = extract32(reg, 22, 2);
435 ent->ar_type = extract32(reg, 24, 3);
436 ent->b = extract32(reg, 27, 1);
437 ent->d = extract32(reg, 28, 1);
438 ent->t = extract32(reg, 29, 1);
439 ent->entry_valid = 1;
441 interval_tree_insert(&ent->itree, &env->tlb_root);
442 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
443 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
446 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
447 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
449 HPPATLBEntry *ent = env->tlb_partial;
451 if (ent) {
452 env->tlb_partial = NULL;
453 if (ent->itree.start <= addr && addr <= ent->itree.last) {
454 set_access_bits_pa11(env, ent, reg);
455 return;
458 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
461 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
462 target_ulong r2, vaddr va_b)
464 HPPATLBEntry *ent;
465 vaddr va_e;
466 uint64_t va_size;
467 int mask_shift;
469 mask_shift = 2 * (r1 & 0xf);
470 va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift;
471 va_b &= -va_size;
472 va_e = va_b + va_size - 1;
474 hppa_flush_tlb_range(env, va_b, va_e);
475 ent = hppa_alloc_tlb_ent(env);
477 ent->itree.start = va_b;
478 ent->itree.last = va_e;
480 /* Extract all 52 bits present in the page table entry. */
481 ent->pa = r1 << (TARGET_PAGE_BITS - 5);
482 /* Align per the page size. */
483 ent->pa &= TARGET_PAGE_MASK << mask_shift;
484 /* Ignore the bits beyond physical address space. */
485 ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS);
487 ent->t = extract64(r2, 61, 1);
488 ent->d = extract64(r2, 60, 1);
489 ent->b = extract64(r2, 59, 1);
490 ent->ar_type = extract64(r2, 56, 3);
491 ent->ar_pl1 = extract64(r2, 54, 2);
492 ent->ar_pl2 = extract64(r2, 52, 2);
493 ent->u = extract64(r2, 51, 1);
494 /* o = bit 50 */
495 /* p = bit 49 */
496 ent->access_id = extract64(r2, 1, 31);
497 ent->entry_valid = 1;
499 interval_tree_insert(&ent->itree, &env->tlb_root);
500 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
501 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
502 ent->ar_pl2, ent->ar_pl1, ent->ar_type,
503 ent->b, ent->d, ent->t);
506 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
508 vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
509 itlbt_pa20(env, r1, r2, va_b);
512 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
514 vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
515 itlbt_pa20(env, r1, r2, va_b);
518 /* Purge (Insn/Data) TLB. */
519 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
521 CPUHPPAState *env = cpu_env(cpu);
522 vaddr start = data.target_ptr;
523 vaddr end;
526 * PA2.0 allows a range of pages encoded into GR[b], which we have
527 * copied into the bottom bits of the otherwise page-aligned address.
528 * PA1.x will always provide zero here, for a single page flush.
530 end = start & 0xf;
531 start &= TARGET_PAGE_MASK;
532 end = (vaddr)TARGET_PAGE_SIZE << (2 * end);
533 end = start + end - 1;
535 hppa_flush_tlb_range(env, start, end);
538 /* This is local to the current cpu. */
539 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
541 trace_hppa_tlb_ptlb_local(env);
542 ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
545 /* This is synchronous across all processors. */
546 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
548 CPUState *src = env_cpu(env);
549 CPUState *cpu;
550 bool wait = false;
552 trace_hppa_tlb_ptlb(env);
553 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
555 CPU_FOREACH(cpu) {
556 if (cpu != src) {
557 async_run_on_cpu(cpu, ptlb_work, data);
558 wait = true;
561 if (wait) {
562 async_safe_run_on_cpu(src, ptlb_work, data);
563 } else {
564 ptlb_work(src, data);
568 void hppa_ptlbe(CPUHPPAState *env)
570 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
571 uint32_t i;
573 /* Zap the (non-btlb) tlb entries themselves. */
574 memset(&env->tlb[btlb_entries], 0,
575 sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
576 env->tlb_last = btlb_entries;
577 env->tlb_partial = NULL;
579 /* Put them all onto the unused list. */
580 env->tlb_unused = &env->tlb[btlb_entries];
581 for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
582 env->tlb[i].unused_next = &env->tlb[i + 1];
585 /* Re-initialize the interval tree with only the btlb entries. */
586 memset(&env->tlb_root, 0, sizeof(env->tlb_root));
587 for (i = 0; i < btlb_entries; ++i) {
588 if (env->tlb[i].entry_valid) {
589 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
593 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
596 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
597 number of pages/entries (we choose all), and is local to the cpu. */
598 void HELPER(ptlbe)(CPUHPPAState *env)
600 trace_hppa_tlb_ptlbe(env);
601 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
602 hppa_ptlbe(env);
605 void cpu_hppa_change_prot_id(CPUHPPAState *env)
607 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
610 void HELPER(change_prot_id)(CPUHPPAState *env)
612 cpu_hppa_change_prot_id(env);
615 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
617 hwaddr phys;
618 int prot, excp;
620 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
621 &phys, &prot, NULL);
622 if (excp >= 0) {
623 if (excp == EXCP_DTLB_MISS) {
624 excp = EXCP_NA_DTLB_MISS;
626 trace_hppa_tlb_lpa_failed(env, addr);
627 raise_exception_with_ior(env, excp, GETPC(), addr, false);
629 trace_hppa_tlb_lpa_success(env, addr, phys);
630 return phys;
633 /* Return the ar_type of the TLB at VADDR, or -1. */
634 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
636 HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
637 return ent ? ent->ar_type : -1;
641 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
642 * allow operating systems to modify the Block TLB (BTLB) entries.
643 * For implementation details see page 1-13 in
644 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
646 void HELPER(diag_btlb)(CPUHPPAState *env)
648 unsigned int phys_page, len, slot;
649 int mmu_idx = cpu_mmu_index(env, 0);
650 uintptr_t ra = GETPC();
651 HPPATLBEntry *btlb;
652 uint64_t virt_page;
653 uint32_t *vaddr;
654 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
656 /* BTLBs are not supported on 64-bit CPUs */
657 if (btlb_entries == 0) {
658 env->gr[28] = -1; /* nonexistent procedure */
659 return;
662 env->gr[28] = 0; /* PDC_OK */
664 switch (env->gr[25]) {
665 case 0:
666 /* return BTLB parameters */
667 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
668 vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
669 MMU_DATA_STORE, mmu_idx, ra);
670 if (vaddr == NULL) {
671 env->gr[28] = -10; /* invalid argument */
672 } else {
673 vaddr[0] = cpu_to_be32(1);
674 vaddr[1] = cpu_to_be32(16 * 1024);
675 vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
676 vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
678 break;
679 case 1:
680 /* insert BTLB entry */
681 virt_page = env->gr[24]; /* upper 32 bits */
682 virt_page <<= 32;
683 virt_page |= env->gr[23]; /* lower 32 bits */
684 phys_page = env->gr[22];
685 len = env->gr[21];
686 slot = env->gr[19];
687 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
688 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
689 "into slot %d\n",
690 (long long) virt_page << TARGET_PAGE_BITS,
691 (long long) (virt_page + len) << TARGET_PAGE_BITS,
692 (long long) virt_page, phys_page, len, slot);
693 if (slot < btlb_entries) {
694 btlb = &env->tlb[slot];
696 /* Force flush of possibly existing BTLB entry. */
697 hppa_flush_tlb_ent(env, btlb, true);
699 /* Create new BTLB entry */
700 btlb->itree.start = virt_page << TARGET_PAGE_BITS;
701 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
702 btlb->pa = phys_page << TARGET_PAGE_BITS;
703 set_access_bits_pa11(env, btlb, env->gr[20]);
704 btlb->t = 0;
705 btlb->d = 1;
706 } else {
707 env->gr[28] = -10; /* invalid argument */
709 break;
710 case 2:
711 /* Purge BTLB entry */
712 slot = env->gr[22];
713 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
714 slot);
715 if (slot < btlb_entries) {
716 btlb = &env->tlb[slot];
717 hppa_flush_tlb_ent(env, btlb, true);
718 } else {
719 env->gr[28] = -10; /* invalid argument */
721 break;
722 case 3:
723 /* Purge all BTLB entries */
724 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
725 for (slot = 0; slot < btlb_entries; slot++) {
726 btlb = &env->tlb[slot];
727 hppa_flush_tlb_ent(env, btlb, true);
729 break;
730 default:
731 env->gr[28] = -2; /* nonexistent option */
732 break;