2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
28 hwaddr
hppa_abs_to_phys_pa2_w1(vaddr addr
)
31 * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
32 * an algorithm in which a 62-bit absolute address is transformed to
33 * a 64-bit physical address. This must then be combined with that
34 * pictured in Figure H-11 "Physical Address Space Mapping", in which
35 * the full physical address is truncated to the N-bit physical address
36 * supported by the implementation.
38 * Since the supported physical address space is below 54 bits, the
39 * H-8 algorithm is moot and all that is left is to truncate.
41 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 54);
42 return sextract64(addr
, 0, TARGET_PHYS_ADDR_SPACE_BITS
);
45 hwaddr
hppa_abs_to_phys_pa2_w0(vaddr addr
)
48 * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
49 * combined with Figure H-11, as above.
51 if (likely(extract32(addr
, 28, 4) != 0xf)) {
52 /* Memory address space */
53 addr
= (uint32_t)addr
;
54 } else if (extract32(addr
, 24, 4) != 0) {
55 /* I/O address space */
60 * Figures H-10 and H-11 of the parisc2.0 spec do not specify
61 * where to map into the 64-bit PDC address space.
62 * We map with an offset which equals the 32-bit address, which
63 * is what can be seen on physical machines too.
65 addr
= (uint32_t)addr
;
66 addr
|= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS
- 4);
71 static HPPATLBEntry
*hppa_find_tlb(CPUHPPAState
*env
, vaddr addr
)
73 IntervalTreeNode
*i
= interval_tree_iter_first(&env
->tlb_root
, addr
, addr
);
76 HPPATLBEntry
*ent
= container_of(i
, HPPATLBEntry
, itree
);
77 trace_hppa_tlb_find_entry(env
, ent
, ent
->entry_valid
,
78 ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
81 trace_hppa_tlb_find_entry_not_found(env
, addr
);
85 static void hppa_flush_tlb_ent(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
86 bool force_flush_btlb
)
88 CPUState
*cs
= env_cpu(env
);
91 if (!ent
->entry_valid
) {
95 trace_hppa_tlb_flush_ent(env
, ent
, ent
->itree
.start
,
96 ent
->itree
.last
, ent
->pa
);
98 tlb_flush_range_by_mmuidx(cs
, ent
->itree
.start
,
99 ent
->itree
.last
- ent
->itree
.start
+ 1,
100 HPPA_MMU_FLUSH_MASK
, TARGET_LONG_BITS
);
102 /* Never clear BTLBs, unless forced to do so. */
103 is_btlb
= ent
< &env
->tlb
[HPPA_BTLB_ENTRIES(env
)];
104 if (is_btlb
&& !force_flush_btlb
) {
108 interval_tree_remove(&ent
->itree
, &env
->tlb_root
);
109 memset(ent
, 0, sizeof(*ent
));
112 ent
->unused_next
= env
->tlb_unused
;
113 env
->tlb_unused
= ent
;
117 static void hppa_flush_tlb_range(CPUHPPAState
*env
, vaddr va_b
, vaddr va_e
)
119 IntervalTreeNode
*i
, *n
;
121 i
= interval_tree_iter_first(&env
->tlb_root
, va_b
, va_e
);
123 HPPATLBEntry
*ent
= container_of(i
, HPPATLBEntry
, itree
);
126 * Find the next entry now: In the normal case the current entry
127 * will be removed, but in the BTLB case it will remain.
129 n
= interval_tree_iter_next(i
, va_b
, va_e
);
130 hppa_flush_tlb_ent(env
, ent
, false);
134 static HPPATLBEntry
*hppa_alloc_tlb_ent(CPUHPPAState
*env
)
136 HPPATLBEntry
*ent
= env
->tlb_unused
;
139 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
140 uint32_t i
= env
->tlb_last
;
142 if (i
< btlb_entries
|| i
>= ARRAY_SIZE(env
->tlb
)) {
145 env
->tlb_last
= i
+ 1;
148 hppa_flush_tlb_ent(env
, ent
, false);
151 env
->tlb_unused
= ent
->unused_next
;
155 int hppa_get_physical_address(CPUHPPAState
*env
, vaddr addr
, int mmu_idx
,
156 int type
, hwaddr
*pphys
, int *pprot
,
157 HPPATLBEntry
**tlb_entry
)
160 int prot
, r_prot
, w_prot
, x_prot
, priv
;
168 /* Virtual translation disabled. Map absolute to physical. */
169 if (MMU_IDX_MMU_DISABLED(mmu_idx
)) {
172 phys
= hppa_abs_to_phys_pa2_w1(addr
);
175 if (hppa_is_pa20(env
)) {
176 phys
= hppa_abs_to_phys_pa2_w0(addr
);
178 phys
= (uint32_t)addr
;
182 g_assert_not_reached();
184 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
188 /* Find a valid tlb entry that matches the virtual address. */
189 ent
= hppa_find_tlb(env
, addr
);
193 ret
= (type
== PAGE_EXEC
) ? EXCP_ITLB_MISS
: EXCP_DTLB_MISS
;
201 /* We now know the physical address. */
202 phys
= ent
->pa
+ (addr
- ent
->itree
.start
);
204 /* Map TLB access_rights field to QEMU protection. */
205 priv
= MMU_IDX_TO_PRIV(mmu_idx
);
206 r_prot
= (priv
<= ent
->ar_pl1
) * PAGE_READ
;
207 w_prot
= (priv
<= ent
->ar_pl2
) * PAGE_WRITE
;
208 x_prot
= (ent
->ar_pl2
<= priv
&& priv
<= ent
->ar_pl1
) * PAGE_EXEC
;
209 switch (ent
->ar_type
) {
210 case 0: /* read-only: data page */
213 case 1: /* read/write: dynamic data page */
214 prot
= r_prot
| w_prot
;
216 case 2: /* read/execute: normal code page */
217 prot
= r_prot
| x_prot
;
219 case 3: /* read/write/execute: dynamic code page */
220 prot
= r_prot
| w_prot
| x_prot
;
222 default: /* execute: promote to privilege level type & 3 */
227 /* access_id == 0 means public page and no check is performed */
228 if (ent
->access_id
&& MMU_IDX_TO_P(mmu_idx
)) {
229 /* If bits [31:1] match, and bit 0 is set, suppress write. */
230 int match
= ent
->access_id
* 2 + 1;
232 if (match
== env
->cr
[CR_PID1
] || match
== env
->cr
[CR_PID2
] ||
233 match
== env
->cr
[CR_PID3
] || match
== env
->cr
[CR_PID4
]) {
234 prot
&= PAGE_READ
| PAGE_EXEC
;
235 if (type
== PAGE_WRITE
) {
242 /* No guest access type indicates a non-architectural access from
243 within QEMU. Bypass checks for access, D, B and T bits. */
248 if (unlikely(!(prot
& type
))) {
249 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
250 ret
= (type
& PAGE_EXEC
) ? EXCP_IMP
: EXCP_DMAR
;
254 /* In reverse priority order, check for conditions which raise faults.
255 As we go, remove PROT bits that cover the condition we want to check.
256 In this way, the resulting PROT will force a re-check of the
257 architectural TLB entry for the next access. */
258 if (unlikely(!ent
->d
)) {
259 if (type
& PAGE_WRITE
) {
260 /* The D bit is not set -- TLB Dirty Bit Fault. */
261 ret
= EXCP_TLB_DIRTY
;
263 prot
&= PAGE_READ
| PAGE_EXEC
;
265 if (unlikely(ent
->b
)) {
266 if (type
& PAGE_WRITE
) {
267 /* The B bit is set -- Data Memory Break Fault. */
270 prot
&= PAGE_READ
| PAGE_EXEC
;
272 if (unlikely(ent
->t
)) {
273 if (!(type
& PAGE_EXEC
)) {
274 /* The T bit is set -- Page Reference Fault. */
283 trace_hppa_tlb_get_physical_address(env
, ret
, prot
, addr
, phys
);
287 hwaddr
hppa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
289 HPPACPU
*cpu
= HPPA_CPU(cs
);
291 int prot
, excp
, mmu_idx
;
293 /* If the (data) mmu is disabled, bypass translation. */
294 /* ??? We really ought to know if the code mmu is disabled too,
295 in order to get the correct debugging dumps. */
296 mmu_idx
= (cpu
->env
.psw
& PSW_D
? MMU_KERNEL_IDX
:
297 cpu
->env
.psw
& PSW_W
? MMU_ABS_W_IDX
: MMU_ABS_IDX
);
299 excp
= hppa_get_physical_address(&cpu
->env
, addr
, mmu_idx
, 0,
302 /* Since we're translating for debugging, the only error that is a
303 hard error is no translation at all. Otherwise, while a real cpu
304 access might not have permission, the debugger does. */
305 return excp
== EXCP_DTLB_MISS
? -1 : phys
;
308 void hppa_set_ior_and_isr(CPUHPPAState
*env
, vaddr addr
, bool mmu_disabled
)
310 if (env
->psw
& PSW_Q
) {
312 * For pa1.x, the offset and space never overlap, and so we
313 * simply extract the high and low part of the virtual address.
315 * For pa2.0, the formation of these are described in section
316 * "Interruption Parameter Registers", page 2-15.
318 env
->cr
[CR_IOR
] = (uint32_t)addr
;
319 env
->cr
[CR_ISR
] = addr
>> 32;
321 if (hppa_is_pa20(env
)) {
324 * If data translation was disabled, the ISR contains
325 * the upper portion of the abs address, zero-extended.
327 env
->cr
[CR_ISR
] &= 0x3fffffff;
330 * If data translation was enabled, the upper two bits
331 * of the IOR (the b field) are equal to the two space
332 * bits from the base register used to form the gva.
336 b
= env
->unwind_breg
? env
->gr
[env
->unwind_breg
] : 0;
337 b
>>= (env
->psw
& PSW_W
? 62 : 30);
338 env
->cr
[CR_IOR
] |= b
<< 62;
344 G_NORETURN
static void
345 raise_exception_with_ior(CPUHPPAState
*env
, int excp
, uintptr_t retaddr
,
346 vaddr addr
, bool mmu_disabled
)
348 CPUState
*cs
= env_cpu(env
);
350 cs
->exception_index
= excp
;
351 cpu_restore_state(cs
, retaddr
);
352 hppa_set_ior_and_isr(env
, addr
, mmu_disabled
);
357 void hppa_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
358 vaddr addr
, unsigned size
,
359 MMUAccessType access_type
,
360 int mmu_idx
, MemTxAttrs attrs
,
361 MemTxResult response
, uintptr_t retaddr
)
363 CPUHPPAState
*env
= cpu_env(cs
);
365 qemu_log_mask(LOG_GUEST_ERROR
, "HPMC at " TARGET_FMT_lx
":" TARGET_FMT_lx
366 " while accessing I/O at %#08" HWADDR_PRIx
"\n",
367 env
->iasq_f
, env
->iaoq_f
, physaddr
);
369 /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
371 raise_exception_with_ior(env
, EXCP_HPMC
, retaddr
, addr
,
372 MMU_IDX_MMU_DISABLED(mmu_idx
));
376 bool hppa_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
377 MMUAccessType type
, int mmu_idx
,
378 bool probe
, uintptr_t retaddr
)
380 HPPACPU
*cpu
= HPPA_CPU(cs
);
381 CPUHPPAState
*env
= &cpu
->env
;
383 int prot
, excp
, a_prot
;
398 excp
= hppa_get_physical_address(env
, addr
, mmu_idx
,
399 a_prot
, &phys
, &prot
, &ent
);
400 if (unlikely(excp
>= 0)) {
404 trace_hppa_tlb_fill_excp(env
, addr
, size
, type
, mmu_idx
);
406 /* Failure. Raise the indicated exception. */
407 raise_exception_with_ior(env
, excp
, retaddr
, addr
,
408 MMU_IDX_MMU_DISABLED(mmu_idx
));
411 trace_hppa_tlb_fill_success(env
, addr
& TARGET_PAGE_MASK
,
412 phys
& TARGET_PAGE_MASK
, size
, type
, mmu_idx
);
415 * Success! Store the translation into the QEMU TLB.
416 * Note that we always install a single-page entry, because that
417 * is what works best with softmmu -- anything else will trigger
418 * the large page protection mask. We do not require this,
419 * because we record the large page here in the hppa tlb.
421 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
422 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
426 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
427 void HELPER(itlba_pa11
)(CPUHPPAState
*env
, target_ulong addr
, target_ulong reg
)
431 /* Zap any old entries covering ADDR. */
432 addr
&= TARGET_PAGE_MASK
;
433 hppa_flush_tlb_range(env
, addr
, addr
+ TARGET_PAGE_SIZE
- 1);
435 ent
= env
->tlb_partial
;
437 ent
= hppa_alloc_tlb_ent(env
);
438 env
->tlb_partial
= ent
;
441 /* Note that ent->entry_valid == 0 already. */
442 ent
->itree
.start
= addr
;
443 ent
->itree
.last
= addr
+ TARGET_PAGE_SIZE
- 1;
444 ent
->pa
= extract32(reg
, 5, 20) << TARGET_PAGE_BITS
;
445 trace_hppa_tlb_itlba(env
, ent
, ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
448 static void set_access_bits_pa11(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
451 ent
->access_id
= extract32(reg
, 1, 18);
452 ent
->u
= extract32(reg
, 19, 1);
453 ent
->ar_pl2
= extract32(reg
, 20, 2);
454 ent
->ar_pl1
= extract32(reg
, 22, 2);
455 ent
->ar_type
= extract32(reg
, 24, 3);
456 ent
->b
= extract32(reg
, 27, 1);
457 ent
->d
= extract32(reg
, 28, 1);
458 ent
->t
= extract32(reg
, 29, 1);
459 ent
->entry_valid
= 1;
461 interval_tree_insert(&ent
->itree
, &env
->tlb_root
);
462 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
, ent
->ar_pl2
,
463 ent
->ar_pl1
, ent
->ar_type
, ent
->b
, ent
->d
, ent
->t
);
466 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
467 void HELPER(itlbp_pa11
)(CPUHPPAState
*env
, target_ulong addr
, target_ulong reg
)
469 HPPATLBEntry
*ent
= env
->tlb_partial
;
472 env
->tlb_partial
= NULL
;
473 if (ent
->itree
.start
<= addr
&& addr
<= ent
->itree
.last
) {
474 set_access_bits_pa11(env
, ent
, reg
);
478 qemu_log_mask(LOG_GUEST_ERROR
, "ITLBP not following ITLBA\n");
481 static void itlbt_pa20(CPUHPPAState
*env
, target_ulong r1
,
482 target_ulong r2
, vaddr va_b
)
489 mask_shift
= 2 * (r1
& 0xf);
490 va_size
= (uint64_t)TARGET_PAGE_SIZE
<< mask_shift
;
492 va_e
= va_b
+ va_size
- 1;
494 hppa_flush_tlb_range(env
, va_b
, va_e
);
495 ent
= hppa_alloc_tlb_ent(env
);
497 ent
->itree
.start
= va_b
;
498 ent
->itree
.last
= va_e
;
500 /* Extract all 52 bits present in the page table entry. */
501 ent
->pa
= r1
<< (TARGET_PAGE_BITS
- 5);
502 /* Align per the page size. */
503 ent
->pa
&= TARGET_PAGE_MASK
<< mask_shift
;
504 /* Ignore the bits beyond physical address space. */
505 ent
->pa
= sextract64(ent
->pa
, 0, TARGET_PHYS_ADDR_SPACE_BITS
);
507 ent
->t
= extract64(r2
, 61, 1);
508 ent
->d
= extract64(r2
, 60, 1);
509 ent
->b
= extract64(r2
, 59, 1);
510 ent
->ar_type
= extract64(r2
, 56, 3);
511 ent
->ar_pl1
= extract64(r2
, 54, 2);
512 ent
->ar_pl2
= extract64(r2
, 52, 2);
513 ent
->u
= extract64(r2
, 51, 1);
516 ent
->access_id
= extract64(r2
, 1, 31);
517 ent
->entry_valid
= 1;
519 interval_tree_insert(&ent
->itree
, &env
->tlb_root
);
520 trace_hppa_tlb_itlba(env
, ent
, ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
521 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
,
522 ent
->ar_pl2
, ent
->ar_pl1
, ent
->ar_type
,
523 ent
->b
, ent
->d
, ent
->t
);
526 void HELPER(idtlbt_pa20
)(CPUHPPAState
*env
, target_ulong r1
, target_ulong r2
)
528 vaddr va_b
= deposit64(env
->cr
[CR_IOR
], 32, 32, env
->cr
[CR_ISR
]);
529 itlbt_pa20(env
, r1
, r2
, va_b
);
532 void HELPER(iitlbt_pa20
)(CPUHPPAState
*env
, target_ulong r1
, target_ulong r2
)
534 vaddr va_b
= deposit64(env
->cr
[CR_IIAOQ
], 32, 32, env
->cr
[CR_IIASQ
]);
535 itlbt_pa20(env
, r1
, r2
, va_b
);
538 /* Purge (Insn/Data) TLB. */
539 static void ptlb_work(CPUState
*cpu
, run_on_cpu_data data
)
541 vaddr start
= data
.target_ptr
;
545 * PA2.0 allows a range of pages encoded into GR[b], which we have
546 * copied into the bottom bits of the otherwise page-aligned address.
547 * PA1.x will always provide zero here, for a single page flush.
550 start
&= TARGET_PAGE_MASK
;
551 end
= (vaddr
)TARGET_PAGE_SIZE
<< (2 * end
);
552 end
= start
+ end
- 1;
554 hppa_flush_tlb_range(cpu_env(cpu
), start
, end
);
557 /* This is local to the current cpu. */
558 void HELPER(ptlb_l
)(CPUHPPAState
*env
, target_ulong addr
)
560 trace_hppa_tlb_ptlb_local(env
);
561 ptlb_work(env_cpu(env
), RUN_ON_CPU_TARGET_PTR(addr
));
564 /* This is synchronous across all processors. */
565 void HELPER(ptlb
)(CPUHPPAState
*env
, target_ulong addr
)
567 CPUState
*src
= env_cpu(env
);
571 trace_hppa_tlb_ptlb(env
);
572 run_on_cpu_data data
= RUN_ON_CPU_TARGET_PTR(addr
);
576 async_run_on_cpu(cpu
, ptlb_work
, data
);
581 async_safe_run_on_cpu(src
, ptlb_work
, data
);
583 ptlb_work(src
, data
);
587 void hppa_ptlbe(CPUHPPAState
*env
)
589 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
592 /* Zap the (non-btlb) tlb entries themselves. */
593 memset(&env
->tlb
[btlb_entries
], 0,
594 sizeof(env
->tlb
) - btlb_entries
* sizeof(env
->tlb
[0]));
595 env
->tlb_last
= btlb_entries
;
596 env
->tlb_partial
= NULL
;
598 /* Put them all onto the unused list. */
599 env
->tlb_unused
= &env
->tlb
[btlb_entries
];
600 for (i
= btlb_entries
; i
< ARRAY_SIZE(env
->tlb
) - 1; ++i
) {
601 env
->tlb
[i
].unused_next
= &env
->tlb
[i
+ 1];
604 /* Re-initialize the interval tree with only the btlb entries. */
605 memset(&env
->tlb_root
, 0, sizeof(env
->tlb_root
));
606 for (i
= 0; i
< btlb_entries
; ++i
) {
607 if (env
->tlb
[i
].entry_valid
) {
608 interval_tree_insert(&env
->tlb
[i
].itree
, &env
->tlb_root
);
612 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_MASK
);
615 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
616 number of pages/entries (we choose all), and is local to the cpu. */
617 void HELPER(ptlbe
)(CPUHPPAState
*env
)
619 trace_hppa_tlb_ptlbe(env
);
620 qemu_log_mask(CPU_LOG_MMU
, "FLUSH ALL TLB ENTRIES\n");
624 void cpu_hppa_change_prot_id(CPUHPPAState
*env
)
626 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_P_MASK
);
629 void HELPER(change_prot_id
)(CPUHPPAState
*env
)
631 cpu_hppa_change_prot_id(env
);
634 target_ulong
HELPER(lpa
)(CPUHPPAState
*env
, target_ulong addr
)
639 excp
= hppa_get_physical_address(env
, addr
, MMU_KERNEL_IDX
, 0,
642 if (excp
== EXCP_DTLB_MISS
) {
643 excp
= EXCP_NA_DTLB_MISS
;
645 trace_hppa_tlb_lpa_failed(env
, addr
);
646 raise_exception_with_ior(env
, excp
, GETPC(), addr
, false);
648 trace_hppa_tlb_lpa_success(env
, addr
, phys
);
652 /* Return the ar_type of the TLB at VADDR, or -1. */
653 int hppa_artype_for_page(CPUHPPAState
*env
, target_ulong vaddr
)
655 HPPATLBEntry
*ent
= hppa_find_tlb(env
, vaddr
);
656 return ent
? ent
->ar_type
: -1;
660 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
661 * allow operating systems to modify the Block TLB (BTLB) entries.
662 * For implementation details see page 1-13 in
663 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
665 void HELPER(diag_btlb
)(CPUHPPAState
*env
)
667 unsigned int phys_page
, len
, slot
;
668 int mmu_idx
= cpu_mmu_index(env_cpu(env
), 0);
669 uintptr_t ra
= GETPC();
673 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
675 /* BTLBs are not supported on 64-bit CPUs */
676 if (btlb_entries
== 0) {
677 env
->gr
[28] = -1; /* nonexistent procedure */
681 env
->gr
[28] = 0; /* PDC_OK */
683 switch (env
->gr
[25]) {
685 /* return BTLB parameters */
686 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
687 vaddr
= probe_access(env
, env
->gr
[24], 4 * sizeof(uint32_t),
688 MMU_DATA_STORE
, mmu_idx
, ra
);
690 env
->gr
[28] = -10; /* invalid argument */
692 vaddr
[0] = cpu_to_be32(1);
693 vaddr
[1] = cpu_to_be32(16 * 1024);
694 vaddr
[2] = cpu_to_be32(PA10_BTLB_FIXED
);
695 vaddr
[3] = cpu_to_be32(PA10_BTLB_VARIABLE
);
699 /* insert BTLB entry */
700 virt_page
= env
->gr
[24]; /* upper 32 bits */
702 virt_page
|= env
->gr
[23]; /* lower 32 bits */
703 phys_page
= env
->gr
[22];
706 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
707 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
709 (long long) virt_page
<< TARGET_PAGE_BITS
,
710 (long long) (virt_page
+ len
) << TARGET_PAGE_BITS
,
711 (long long) virt_page
, phys_page
, len
, slot
);
712 if (slot
< btlb_entries
) {
713 btlb
= &env
->tlb
[slot
];
715 /* Force flush of possibly existing BTLB entry. */
716 hppa_flush_tlb_ent(env
, btlb
, true);
718 /* Create new BTLB entry */
719 btlb
->itree
.start
= virt_page
<< TARGET_PAGE_BITS
;
720 btlb
->itree
.last
= btlb
->itree
.start
+ len
* TARGET_PAGE_SIZE
- 1;
721 btlb
->pa
= phys_page
<< TARGET_PAGE_BITS
;
722 set_access_bits_pa11(env
, btlb
, env
->gr
[20]);
726 env
->gr
[28] = -10; /* invalid argument */
730 /* Purge BTLB entry */
732 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
734 if (slot
< btlb_entries
) {
735 btlb
= &env
->tlb
[slot
];
736 hppa_flush_tlb_ent(env
, btlb
, true);
738 env
->gr
[28] = -10; /* invalid argument */
742 /* Purge all BTLB entries */
743 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
744 for (slot
= 0; slot
< btlb_entries
; slot
++) {
745 btlb
= &env
->tlb
[slot
];
746 hppa_flush_tlb_ent(env
, btlb
, true);
750 env
->gr
[28] = -2; /* nonexistent option */