2 * RISC-V CPU helpers for qemu.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
27 #include "semihosting/common-semi.h"
29 int riscv_cpu_mmu_index(CPURISCVState
*env
, bool ifetch
)
31 #ifdef CONFIG_USER_ONLY
38 static RISCVMXL
cpu_get_xl(CPURISCVState
*env
)
40 #if defined(TARGET_RISCV32)
42 #elif defined(CONFIG_USER_ONLY)
45 RISCVMXL xl
= riscv_cpu_mxl(env
);
48 * When emulating a 32-bit-only cpu, use RV32.
49 * When emulating a 64-bit cpu, and MXL has been reduced to RV32,
50 * MSTATUSH doesn't have UXL/SXL, therefore XLEN cannot be widened
51 * back to RV64 for lower privs.
58 xl
= get_field(env
->mstatus
, MSTATUS64_UXL
);
60 default: /* PRV_S | PRV_H */
61 xl
= get_field(env
->mstatus
, MSTATUS64_SXL
);
69 void cpu_get_tb_cpu_state(CPURISCVState
*env
, target_ulong
*pc
,
70 target_ulong
*cs_base
, uint32_t *pflags
)
77 if (riscv_has_ext(env
, RVV
)) {
79 * If env->vl equals to VLMAX, we can use generic vector operation
80 * expanders (GVEC) to accerlate the vector operations.
81 * However, as LMUL could be a fractional number. The maximum
82 * vector size can be operated might be less than 8 bytes,
83 * which is not supported by GVEC. So we set vl_eq_vlmax flag to true
84 * only when maxsz >= 8 bytes.
86 uint32_t vlmax
= vext_get_vlmax(env_archcpu(env
), env
->vtype
);
87 uint32_t sew
= FIELD_EX64(env
->vtype
, VTYPE
, VSEW
);
88 uint32_t maxsz
= vlmax
<< sew
;
89 bool vl_eq_vlmax
= (env
->vstart
== 0) && (vlmax
== env
->vl
) &&
91 flags
= FIELD_DP32(flags
, TB_FLAGS
, VILL
,
92 FIELD_EX64(env
->vtype
, VTYPE
, VILL
));
93 flags
= FIELD_DP32(flags
, TB_FLAGS
, SEW
, sew
);
94 flags
= FIELD_DP32(flags
, TB_FLAGS
, LMUL
,
95 FIELD_EX64(env
->vtype
, VTYPE
, VLMUL
));
96 flags
= FIELD_DP32(flags
, TB_FLAGS
, VL_EQ_VLMAX
, vl_eq_vlmax
);
98 flags
= FIELD_DP32(flags
, TB_FLAGS
, VILL
, 1);
101 #ifdef CONFIG_USER_ONLY
102 flags
|= TB_FLAGS_MSTATUS_FS
;
103 flags
|= TB_FLAGS_MSTATUS_VS
;
105 flags
|= cpu_mmu_index(env
, 0);
106 if (riscv_cpu_fp_enabled(env
)) {
107 flags
|= env
->mstatus
& MSTATUS_FS
;
110 if (riscv_cpu_vector_enabled(env
)) {
111 flags
|= env
->mstatus
& MSTATUS_VS
;
114 if (riscv_has_ext(env
, RVH
)) {
115 if (env
->priv
== PRV_M
||
116 (env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) ||
117 (env
->priv
== PRV_U
&& !riscv_cpu_virt_enabled(env
) &&
118 get_field(env
->hstatus
, HSTATUS_HU
))) {
119 flags
= FIELD_DP32(flags
, TB_FLAGS
, HLSX
, 1);
122 flags
= FIELD_DP32(flags
, TB_FLAGS
, MSTATUS_HS_FS
,
123 get_field(env
->mstatus_hs
, MSTATUS_FS
));
125 flags
= FIELD_DP32(flags
, TB_FLAGS
, MSTATUS_HS_VS
,
126 get_field(env
->mstatus_hs
, MSTATUS_VS
));
128 if (riscv_has_ext(env
, RVJ
)) {
129 int priv
= flags
& TB_FLAGS_PRIV_MMU_MASK
;
130 bool pm_enabled
= false;
133 pm_enabled
= env
->mmte
& U_PM_ENABLE
;
136 pm_enabled
= env
->mmte
& S_PM_ENABLE
;
139 pm_enabled
= env
->mmte
& M_PM_ENABLE
;
142 g_assert_not_reached();
144 flags
= FIELD_DP32(flags
, TB_FLAGS
, PM_ENABLED
, pm_enabled
);
148 flags
= FIELD_DP32(flags
, TB_FLAGS
, XL
, cpu_get_xl(env
));
153 #ifndef CONFIG_USER_ONLY
154 static int riscv_cpu_local_irq_pending(CPURISCVState
*env
)
156 target_ulong virt_enabled
= riscv_cpu_virt_enabled(env
);
158 target_ulong mstatus_mie
= get_field(env
->mstatus
, MSTATUS_MIE
);
159 target_ulong mstatus_sie
= get_field(env
->mstatus
, MSTATUS_SIE
);
161 target_ulong pending
= env
->mip
& env
->mie
;
163 target_ulong mie
= env
->priv
< PRV_M
||
164 (env
->priv
== PRV_M
&& mstatus_mie
);
165 target_ulong sie
= env
->priv
< PRV_S
||
166 (env
->priv
== PRV_S
&& mstatus_sie
);
167 target_ulong hsie
= virt_enabled
|| sie
;
168 target_ulong vsie
= virt_enabled
&& sie
;
171 (pending
& ~env
->mideleg
& -mie
) |
172 (pending
& env
->mideleg
& ~env
->hideleg
& -hsie
) |
173 (pending
& env
->mideleg
& env
->hideleg
& -vsie
);
176 return ctz64(irqs
); /* since non-zero */
178 return RISCV_EXCP_NONE
; /* indicates no pending interrupt */
182 bool riscv_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
184 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
185 RISCVCPU
*cpu
= RISCV_CPU(cs
);
186 CPURISCVState
*env
= &cpu
->env
;
187 int interruptno
= riscv_cpu_local_irq_pending(env
);
188 if (interruptno
>= 0) {
189 cs
->exception_index
= RISCV_EXCP_INT_FLAG
| interruptno
;
190 riscv_cpu_do_interrupt(cs
);
197 /* Return true is floating point support is currently enabled */
198 bool riscv_cpu_fp_enabled(CPURISCVState
*env
)
200 if (env
->mstatus
& MSTATUS_FS
) {
201 if (riscv_cpu_virt_enabled(env
) && !(env
->mstatus_hs
& MSTATUS_FS
)) {
210 /* Return true is vector support is currently enabled */
211 bool riscv_cpu_vector_enabled(CPURISCVState
*env
)
213 if (env
->mstatus
& MSTATUS_VS
) {
214 if (riscv_cpu_virt_enabled(env
) && !(env
->mstatus_hs
& MSTATUS_VS
)) {
223 void riscv_cpu_swap_hypervisor_regs(CPURISCVState
*env
)
225 uint64_t mstatus_mask
= MSTATUS_MXR
| MSTATUS_SUM
| MSTATUS_FS
|
226 MSTATUS_SPP
| MSTATUS_SPIE
| MSTATUS_SIE
|
227 MSTATUS64_UXL
| MSTATUS_VS
;
228 bool current_virt
= riscv_cpu_virt_enabled(env
);
230 g_assert(riscv_has_ext(env
, RVH
));
233 /* Current V=1 and we are about to change to V=0 */
234 env
->vsstatus
= env
->mstatus
& mstatus_mask
;
235 env
->mstatus
&= ~mstatus_mask
;
236 env
->mstatus
|= env
->mstatus_hs
;
238 env
->vstvec
= env
->stvec
;
239 env
->stvec
= env
->stvec_hs
;
241 env
->vsscratch
= env
->sscratch
;
242 env
->sscratch
= env
->sscratch_hs
;
244 env
->vsepc
= env
->sepc
;
245 env
->sepc
= env
->sepc_hs
;
247 env
->vscause
= env
->scause
;
248 env
->scause
= env
->scause_hs
;
250 env
->vstval
= env
->stval
;
251 env
->stval
= env
->stval_hs
;
253 env
->vsatp
= env
->satp
;
254 env
->satp
= env
->satp_hs
;
256 /* Current V=0 and we are about to change to V=1 */
257 env
->mstatus_hs
= env
->mstatus
& mstatus_mask
;
258 env
->mstatus
&= ~mstatus_mask
;
259 env
->mstatus
|= env
->vsstatus
;
261 env
->stvec_hs
= env
->stvec
;
262 env
->stvec
= env
->vstvec
;
264 env
->sscratch_hs
= env
->sscratch
;
265 env
->sscratch
= env
->vsscratch
;
267 env
->sepc_hs
= env
->sepc
;
268 env
->sepc
= env
->vsepc
;
270 env
->scause_hs
= env
->scause
;
271 env
->scause
= env
->vscause
;
273 env
->stval_hs
= env
->stval
;
274 env
->stval
= env
->vstval
;
276 env
->satp_hs
= env
->satp
;
277 env
->satp
= env
->vsatp
;
281 bool riscv_cpu_virt_enabled(CPURISCVState
*env
)
283 if (!riscv_has_ext(env
, RVH
)) {
287 return get_field(env
->virt
, VIRT_ONOFF
);
290 void riscv_cpu_set_virt_enabled(CPURISCVState
*env
, bool enable
)
292 if (!riscv_has_ext(env
, RVH
)) {
296 /* Flush the TLB on all virt mode changes. */
297 if (get_field(env
->virt
, VIRT_ONOFF
) != enable
) {
298 tlb_flush(env_cpu(env
));
301 env
->virt
= set_field(env
->virt
, VIRT_ONOFF
, enable
);
304 bool riscv_cpu_two_stage_lookup(int mmu_idx
)
306 return mmu_idx
& TB_FLAGS_PRIV_HYP_ACCESS_MASK
;
309 int riscv_cpu_claim_interrupts(RISCVCPU
*cpu
, uint32_t interrupts
)
311 CPURISCVState
*env
= &cpu
->env
;
312 if (env
->miclaim
& interrupts
) {
315 env
->miclaim
|= interrupts
;
320 uint32_t riscv_cpu_update_mip(RISCVCPU
*cpu
, uint32_t mask
, uint32_t value
)
322 CPURISCVState
*env
= &cpu
->env
;
323 CPUState
*cs
= CPU(cpu
);
324 uint32_t old
= env
->mip
;
327 if (!qemu_mutex_iothread_locked()) {
329 qemu_mutex_lock_iothread();
332 env
->mip
= (env
->mip
& ~mask
) | (value
& mask
);
335 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
337 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
341 qemu_mutex_unlock_iothread();
347 void riscv_cpu_set_rdtime_fn(CPURISCVState
*env
, uint64_t (*fn
)(uint32_t),
351 env
->rdtime_fn_arg
= arg
;
354 void riscv_cpu_set_mode(CPURISCVState
*env
, target_ulong newpriv
)
356 if (newpriv
> PRV_M
) {
357 g_assert_not_reached();
359 if (newpriv
== PRV_H
) {
362 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
366 * Clear the load reservation - otherwise a reservation placed in one
367 * context/process can be used by another, resulting in an SC succeeding
368 * incorrectly. Version 2.2 of the ISA specification explicitly requires
369 * this behaviour, while later revisions say that the kernel "should" use
370 * an SC instruction to force the yielding of a load reservation on a
371 * preemptive context switch. As a result, do both.
377 * get_physical_address_pmp - check PMP permission for this physical address
379 * Match the PMP region and check permission for this physical address and it's
380 * TLB page. Returns 0 if the permission checking was successful
382 * @env: CPURISCVState
383 * @prot: The returned protection attributes
384 * @tlb_size: TLB page size containing addr. It could be modified after PMP
385 * permission checking. NULL if not set TLB page for addr.
386 * @addr: The physical address to be checked permission
387 * @access_type: The type of MMU access
388 * @mode: Indicates current privilege level.
390 static int get_physical_address_pmp(CPURISCVState
*env
, int *prot
,
391 target_ulong
*tlb_size
, hwaddr addr
,
392 int size
, MMUAccessType access_type
,
396 target_ulong tlb_size_pmp
= 0;
398 if (!riscv_feature(env
, RISCV_FEATURE_PMP
)) {
399 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
400 return TRANSLATE_SUCCESS
;
403 if (!pmp_hart_has_privs(env
, addr
, size
, 1 << access_type
, &pmp_priv
,
406 return TRANSLATE_PMP_FAIL
;
409 *prot
= pmp_priv_to_page_prot(pmp_priv
);
410 if (tlb_size
!= NULL
) {
411 if (pmp_is_range_in_tlb(env
, addr
& ~(*tlb_size
- 1), &tlb_size_pmp
)) {
412 *tlb_size
= tlb_size_pmp
;
416 return TRANSLATE_SUCCESS
;
419 /* get_physical_address - get the physical address for this virtual address
421 * Do a page table walk to obtain the physical address corresponding to a
422 * virtual address. Returns 0 if the translation was successful
424 * Adapted from Spike's mmu_t::translate and mmu_t::walk
426 * @env: CPURISCVState
427 * @physical: This will be set to the calculated physical address
428 * @prot: The returned protection attributes
429 * @addr: The virtual address to be translated
430 * @fault_pte_addr: If not NULL, this will be set to fault pte address
431 * when a error occurs on pte address translation.
432 * This will already be shifted to match htval.
433 * @access_type: The type of MMU access
434 * @mmu_idx: Indicates current privilege level
435 * @first_stage: Are we in first stage translation?
436 * Second stage is used for hypervisor guest translation
437 * @two_stage: Are we going to perform two stage translation
438 * @is_debug: Is this access from a debugger or the monitor?
440 static int get_physical_address(CPURISCVState
*env
, hwaddr
*physical
,
441 int *prot
, target_ulong addr
,
442 target_ulong
*fault_pte_addr
,
443 int access_type
, int mmu_idx
,
444 bool first_stage
, bool two_stage
,
447 /* NOTE: the env->pc value visible here will not be
448 * correct, but the value visible to the exception handler
449 * (riscv_cpu_do_interrupt) is correct */
451 MemTxAttrs attrs
= MEMTXATTRS_UNSPECIFIED
;
452 int mode
= mmu_idx
& TB_FLAGS_PRIV_MMU_MASK
;
453 bool use_background
= false;
456 * Check if we should use the background registers for the two
457 * stage translation. We don't need to check if we actually need
458 * two stage translation as that happened before this function
459 * was called. Background registers will be used if the guest has
460 * forced a two stage translation to be on (in HS or M mode).
462 if (!riscv_cpu_virt_enabled(env
) && two_stage
) {
463 use_background
= true;
466 /* MPRV does not affect the virtual-machine load/store
467 instructions, HLV, HLVX, and HSV. */
468 if (riscv_cpu_two_stage_lookup(mmu_idx
)) {
469 mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
470 } else if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
) {
471 if (get_field(env
->mstatus
, MSTATUS_MPRV
)) {
472 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
476 if (first_stage
== false) {
477 /* We are in stage 2 translation, this is similar to stage 1. */
478 /* Stage 2 is always taken as U-mode */
482 if (mode
== PRV_M
|| !riscv_feature(env
, RISCV_FEATURE_MMU
)) {
484 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
485 return TRANSLATE_SUCCESS
;
491 int levels
, ptidxbits
, ptesize
, vm
, sum
, mxr
, widened
;
493 if (first_stage
== true) {
494 mxr
= get_field(env
->mstatus
, MSTATUS_MXR
);
496 mxr
= get_field(env
->vsstatus
, MSTATUS_MXR
);
499 if (first_stage
== true) {
500 if (use_background
) {
501 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
502 base
= (hwaddr
)get_field(env
->vsatp
, SATP32_PPN
) << PGSHIFT
;
503 vm
= get_field(env
->vsatp
, SATP32_MODE
);
505 base
= (hwaddr
)get_field(env
->vsatp
, SATP64_PPN
) << PGSHIFT
;
506 vm
= get_field(env
->vsatp
, SATP64_MODE
);
509 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
510 base
= (hwaddr
)get_field(env
->satp
, SATP32_PPN
) << PGSHIFT
;
511 vm
= get_field(env
->satp
, SATP32_MODE
);
513 base
= (hwaddr
)get_field(env
->satp
, SATP64_PPN
) << PGSHIFT
;
514 vm
= get_field(env
->satp
, SATP64_MODE
);
519 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
520 base
= (hwaddr
)get_field(env
->hgatp
, SATP32_PPN
) << PGSHIFT
;
521 vm
= get_field(env
->hgatp
, SATP32_MODE
);
523 base
= (hwaddr
)get_field(env
->hgatp
, SATP64_PPN
) << PGSHIFT
;
524 vm
= get_field(env
->hgatp
, SATP64_MODE
);
528 /* status.SUM will be ignored if execute on background */
529 sum
= get_field(env
->mstatus
, MSTATUS_SUM
) || use_background
|| is_debug
;
532 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
534 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
536 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
538 levels
= 5; ptidxbits
= 9; ptesize
= 8; break;
541 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
542 return TRANSLATE_SUCCESS
;
544 g_assert_not_reached();
547 CPUState
*cs
= env_cpu(env
);
548 int va_bits
= PGSHIFT
+ levels
* ptidxbits
+ widened
;
549 target_ulong mask
, masked_msbs
;
551 if (TARGET_LONG_BITS
> (va_bits
- 1)) {
552 mask
= (1L << (TARGET_LONG_BITS
- (va_bits
- 1))) - 1;
556 masked_msbs
= (addr
>> (va_bits
- 1)) & mask
;
558 if (masked_msbs
!= 0 && masked_msbs
!= mask
) {
559 return TRANSLATE_FAIL
;
562 int ptshift
= (levels
- 1) * ptidxbits
;
565 #if !TCG_OVERSIZED_GUEST
568 for (i
= 0; i
< levels
; i
++, ptshift
-= ptidxbits
) {
571 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
572 ((1 << (ptidxbits
+ widened
)) - 1);
574 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
575 ((1 << ptidxbits
) - 1);
578 /* check that physical address of PTE is legal */
581 if (two_stage
&& first_stage
) {
585 /* Do the second stage translation on the base PTE address. */
586 int vbase_ret
= get_physical_address(env
, &vbase
, &vbase_prot
,
587 base
, NULL
, MMU_DATA_LOAD
,
588 mmu_idx
, false, true,
591 if (vbase_ret
!= TRANSLATE_SUCCESS
) {
592 if (fault_pte_addr
) {
593 *fault_pte_addr
= (base
+ idx
* ptesize
) >> 2;
595 return TRANSLATE_G_STAGE_FAIL
;
598 pte_addr
= vbase
+ idx
* ptesize
;
600 pte_addr
= base
+ idx
* ptesize
;
604 int pmp_ret
= get_physical_address_pmp(env
, &pmp_prot
, NULL
, pte_addr
,
605 sizeof(target_ulong
),
606 MMU_DATA_LOAD
, PRV_S
);
607 if (pmp_ret
!= TRANSLATE_SUCCESS
) {
608 return TRANSLATE_PMP_FAIL
;
612 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
613 pte
= address_space_ldl(cs
->as
, pte_addr
, attrs
, &res
);
615 pte
= address_space_ldq(cs
->as
, pte_addr
, attrs
, &res
);
618 if (res
!= MEMTX_OK
) {
619 return TRANSLATE_FAIL
;
622 hwaddr ppn
= pte
>> PTE_PPN_SHIFT
;
624 if (!(pte
& PTE_V
)) {
626 return TRANSLATE_FAIL
;
627 } else if (!(pte
& (PTE_R
| PTE_W
| PTE_X
))) {
628 /* Inner PTE, continue walking */
629 base
= ppn
<< PGSHIFT
;
630 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == PTE_W
) {
631 /* Reserved leaf PTE flags: PTE_W */
632 return TRANSLATE_FAIL
;
633 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == (PTE_W
| PTE_X
)) {
634 /* Reserved leaf PTE flags: PTE_W + PTE_X */
635 return TRANSLATE_FAIL
;
636 } else if ((pte
& PTE_U
) && ((mode
!= PRV_U
) &&
637 (!sum
|| access_type
== MMU_INST_FETCH
))) {
638 /* User PTE flags when not U mode and mstatus.SUM is not set,
639 or the access type is an instruction fetch */
640 return TRANSLATE_FAIL
;
641 } else if (!(pte
& PTE_U
) && (mode
!= PRV_S
)) {
642 /* Supervisor PTE flags when not S mode */
643 return TRANSLATE_FAIL
;
644 } else if (ppn
& ((1ULL << ptshift
) - 1)) {
646 return TRANSLATE_FAIL
;
647 } else if (access_type
== MMU_DATA_LOAD
&& !((pte
& PTE_R
) ||
648 ((pte
& PTE_X
) && mxr
))) {
649 /* Read access check failed */
650 return TRANSLATE_FAIL
;
651 } else if (access_type
== MMU_DATA_STORE
&& !(pte
& PTE_W
)) {
652 /* Write access check failed */
653 return TRANSLATE_FAIL
;
654 } else if (access_type
== MMU_INST_FETCH
&& !(pte
& PTE_X
)) {
655 /* Fetch access check failed */
656 return TRANSLATE_FAIL
;
658 /* if necessary, set accessed and dirty bits. */
659 target_ulong updated_pte
= pte
| PTE_A
|
660 (access_type
== MMU_DATA_STORE
? PTE_D
: 0);
662 /* Page table updates need to be atomic with MTTCG enabled */
663 if (updated_pte
!= pte
) {
665 * - if accessed or dirty bits need updating, and the PTE is
666 * in RAM, then we do so atomically with a compare and swap.
667 * - if the PTE is in IO space or ROM, then it can't be updated
668 * and we return TRANSLATE_FAIL.
669 * - if the PTE changed by the time we went to update it, then
670 * it is no longer valid and we must re-walk the page table.
673 hwaddr l
= sizeof(target_ulong
), addr1
;
674 mr
= address_space_translate(cs
->as
, pte_addr
,
675 &addr1
, &l
, false, MEMTXATTRS_UNSPECIFIED
);
676 if (memory_region_is_ram(mr
)) {
677 target_ulong
*pte_pa
=
678 qemu_map_ram_ptr(mr
->ram_block
, addr1
);
679 #if TCG_OVERSIZED_GUEST
680 /* MTTCG is not enabled on oversized TCG guests so
681 * page table updates do not need to be atomic */
682 *pte_pa
= pte
= updated_pte
;
684 target_ulong old_pte
=
685 qatomic_cmpxchg(pte_pa
, pte
, updated_pte
);
686 if (old_pte
!= pte
) {
693 /* misconfigured PTE in ROM (AD bits are not preset) or
694 * PTE is in IO space and can't be updated atomically */
695 return TRANSLATE_FAIL
;
699 /* for superpage mappings, make a fake leaf PTE for the TLB's
701 target_ulong vpn
= addr
>> PGSHIFT
;
702 *physical
= ((ppn
| (vpn
& ((1L << ptshift
) - 1))) << PGSHIFT
) |
703 (addr
& ~TARGET_PAGE_MASK
);
705 /* set permissions on the TLB entry */
706 if ((pte
& PTE_R
) || ((pte
& PTE_X
) && mxr
)) {
712 /* add write permission on stores or if the page is already dirty,
713 so that we TLB miss on later writes to update the dirty bit */
715 (access_type
== MMU_DATA_STORE
|| (pte
& PTE_D
))) {
718 return TRANSLATE_SUCCESS
;
721 return TRANSLATE_FAIL
;
724 static void raise_mmu_exception(CPURISCVState
*env
, target_ulong address
,
725 MMUAccessType access_type
, bool pmp_violation
,
726 bool first_stage
, bool two_stage
)
728 CPUState
*cs
= env_cpu(env
);
729 int page_fault_exceptions
, vm
;
732 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
733 stap_mode
= SATP32_MODE
;
735 stap_mode
= SATP64_MODE
;
739 vm
= get_field(env
->satp
, stap_mode
);
741 vm
= get_field(env
->hgatp
, stap_mode
);
744 page_fault_exceptions
= vm
!= VM_1_10_MBARE
&& !pmp_violation
;
746 switch (access_type
) {
748 if (riscv_cpu_virt_enabled(env
) && !first_stage
) {
749 cs
->exception_index
= RISCV_EXCP_INST_GUEST_PAGE_FAULT
;
751 cs
->exception_index
= page_fault_exceptions
?
752 RISCV_EXCP_INST_PAGE_FAULT
: RISCV_EXCP_INST_ACCESS_FAULT
;
756 if (two_stage
&& !first_stage
) {
757 cs
->exception_index
= RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
;
759 cs
->exception_index
= page_fault_exceptions
?
760 RISCV_EXCP_LOAD_PAGE_FAULT
: RISCV_EXCP_LOAD_ACCESS_FAULT
;
764 if (two_stage
&& !first_stage
) {
765 cs
->exception_index
= RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
;
767 cs
->exception_index
= page_fault_exceptions
?
768 RISCV_EXCP_STORE_PAGE_FAULT
: RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
772 g_assert_not_reached();
774 env
->badaddr
= address
;
775 env
->two_stage_lookup
= two_stage
;
778 hwaddr
riscv_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
780 RISCVCPU
*cpu
= RISCV_CPU(cs
);
781 CPURISCVState
*env
= &cpu
->env
;
784 int mmu_idx
= cpu_mmu_index(&cpu
->env
, false);
786 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, NULL
, 0, mmu_idx
,
787 true, riscv_cpu_virt_enabled(env
), true)) {
791 if (riscv_cpu_virt_enabled(env
)) {
792 if (get_physical_address(env
, &phys_addr
, &prot
, phys_addr
, NULL
,
793 0, mmu_idx
, false, true, true)) {
798 return phys_addr
& TARGET_PAGE_MASK
;
801 void riscv_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
802 vaddr addr
, unsigned size
,
803 MMUAccessType access_type
,
804 int mmu_idx
, MemTxAttrs attrs
,
805 MemTxResult response
, uintptr_t retaddr
)
807 RISCVCPU
*cpu
= RISCV_CPU(cs
);
808 CPURISCVState
*env
= &cpu
->env
;
810 if (access_type
== MMU_DATA_STORE
) {
811 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
812 } else if (access_type
== MMU_DATA_LOAD
) {
813 cs
->exception_index
= RISCV_EXCP_LOAD_ACCESS_FAULT
;
815 cs
->exception_index
= RISCV_EXCP_INST_ACCESS_FAULT
;
819 env
->two_stage_lookup
= riscv_cpu_virt_enabled(env
) ||
820 riscv_cpu_two_stage_lookup(mmu_idx
);
821 riscv_raise_exception(&cpu
->env
, cs
->exception_index
, retaddr
);
824 void riscv_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
825 MMUAccessType access_type
, int mmu_idx
,
828 RISCVCPU
*cpu
= RISCV_CPU(cs
);
829 CPURISCVState
*env
= &cpu
->env
;
830 switch (access_type
) {
832 cs
->exception_index
= RISCV_EXCP_INST_ADDR_MIS
;
835 cs
->exception_index
= RISCV_EXCP_LOAD_ADDR_MIS
;
838 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ADDR_MIS
;
841 g_assert_not_reached();
844 env
->two_stage_lookup
= riscv_cpu_virt_enabled(env
) ||
845 riscv_cpu_two_stage_lookup(mmu_idx
);
846 riscv_raise_exception(env
, cs
->exception_index
, retaddr
);
849 bool riscv_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
850 MMUAccessType access_type
, int mmu_idx
,
851 bool probe
, uintptr_t retaddr
)
853 RISCVCPU
*cpu
= RISCV_CPU(cs
);
854 CPURISCVState
*env
= &cpu
->env
;
857 int prot
, prot2
, prot_pmp
;
858 bool pmp_violation
= false;
859 bool first_stage_error
= true;
860 bool two_stage_lookup
= false;
861 int ret
= TRANSLATE_FAIL
;
863 /* default TLB page size */
864 target_ulong tlb_size
= TARGET_PAGE_SIZE
;
866 env
->guest_phys_fault_addr
= 0;
868 qemu_log_mask(CPU_LOG_MMU
, "%s ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
869 __func__
, address
, access_type
, mmu_idx
);
871 /* MPRV does not affect the virtual-machine load/store
872 instructions, HLV, HLVX, and HSV. */
873 if (riscv_cpu_two_stage_lookup(mmu_idx
)) {
874 mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
875 } else if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
&&
876 get_field(env
->mstatus
, MSTATUS_MPRV
)) {
877 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
878 if (riscv_has_ext(env
, RVH
) && get_field(env
->mstatus
, MSTATUS_MPV
)) {
879 two_stage_lookup
= true;
883 if (riscv_cpu_virt_enabled(env
) ||
884 ((riscv_cpu_two_stage_lookup(mmu_idx
) || two_stage_lookup
) &&
885 access_type
!= MMU_INST_FETCH
)) {
886 /* Two stage lookup */
887 ret
= get_physical_address(env
, &pa
, &prot
, address
,
888 &env
->guest_phys_fault_addr
, access_type
,
889 mmu_idx
, true, true, false);
892 * A G-stage exception may be triggered during two state lookup.
893 * And the env->guest_phys_fault_addr has already been set in
894 * get_physical_address().
896 if (ret
== TRANSLATE_G_STAGE_FAIL
) {
897 first_stage_error
= false;
898 access_type
= MMU_DATA_LOAD
;
901 qemu_log_mask(CPU_LOG_MMU
,
902 "%s 1st-stage address=%" VADDR_PRIx
" ret %d physical "
903 TARGET_FMT_plx
" prot %d\n",
904 __func__
, address
, ret
, pa
, prot
);
906 if (ret
== TRANSLATE_SUCCESS
) {
907 /* Second stage lookup */
910 ret
= get_physical_address(env
, &pa
, &prot2
, im_address
, NULL
,
911 access_type
, mmu_idx
, false, true,
914 qemu_log_mask(CPU_LOG_MMU
,
915 "%s 2nd-stage address=%" VADDR_PRIx
" ret %d physical "
916 TARGET_FMT_plx
" prot %d\n",
917 __func__
, im_address
, ret
, pa
, prot2
);
921 if (ret
== TRANSLATE_SUCCESS
) {
922 ret
= get_physical_address_pmp(env
, &prot_pmp
, &tlb_size
, pa
,
923 size
, access_type
, mode
);
925 qemu_log_mask(CPU_LOG_MMU
,
926 "%s PMP address=" TARGET_FMT_plx
" ret %d prot"
927 " %d tlb_size " TARGET_FMT_lu
"\n",
928 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
933 if (ret
!= TRANSLATE_SUCCESS
) {
935 * Guest physical address translation failed, this is a HS
938 first_stage_error
= false;
939 env
->guest_phys_fault_addr
= (im_address
|
941 (TARGET_PAGE_SIZE
- 1))) >> 2;
945 /* Single stage lookup */
946 ret
= get_physical_address(env
, &pa
, &prot
, address
, NULL
,
947 access_type
, mmu_idx
, true, false, false);
949 qemu_log_mask(CPU_LOG_MMU
,
950 "%s address=%" VADDR_PRIx
" ret %d physical "
951 TARGET_FMT_plx
" prot %d\n",
952 __func__
, address
, ret
, pa
, prot
);
954 if (ret
== TRANSLATE_SUCCESS
) {
955 ret
= get_physical_address_pmp(env
, &prot_pmp
, &tlb_size
, pa
,
956 size
, access_type
, mode
);
958 qemu_log_mask(CPU_LOG_MMU
,
959 "%s PMP address=" TARGET_FMT_plx
" ret %d prot"
960 " %d tlb_size " TARGET_FMT_lu
"\n",
961 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
967 if (ret
== TRANSLATE_PMP_FAIL
) {
968 pmp_violation
= true;
971 if (ret
== TRANSLATE_SUCCESS
) {
972 tlb_set_page(cs
, address
& ~(tlb_size
- 1), pa
& ~(tlb_size
- 1),
973 prot
, mmu_idx
, tlb_size
);
978 raise_mmu_exception(env
, address
, access_type
, pmp_violation
,
980 riscv_cpu_virt_enabled(env
) ||
981 riscv_cpu_two_stage_lookup(mmu_idx
));
982 riscv_raise_exception(env
, cs
->exception_index
, retaddr
);
987 #endif /* !CONFIG_USER_ONLY */
992 * Adapted from Spike's processor_t::take_trap.
995 void riscv_cpu_do_interrupt(CPUState
*cs
)
997 #if !defined(CONFIG_USER_ONLY)
999 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1000 CPURISCVState
*env
= &cpu
->env
;
1003 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
1004 * so we mask off the MSB and separate into trap type and cause.
1006 bool async
= !!(cs
->exception_index
& RISCV_EXCP_INT_FLAG
);
1007 target_ulong cause
= cs
->exception_index
& RISCV_EXCP_INT_MASK
;
1008 target_ulong deleg
= async
? env
->mideleg
: env
->medeleg
;
1009 bool write_tval
= false;
1010 target_ulong tval
= 0;
1011 target_ulong htval
= 0;
1012 target_ulong mtval2
= 0;
1014 if (cause
== RISCV_EXCP_SEMIHOST
) {
1015 if (env
->priv
>= PRV_S
) {
1016 env
->gpr
[xA0
] = do_common_semihosting(cs
);
1020 cause
= RISCV_EXCP_BREAKPOINT
;
1024 /* set tval to badaddr for traps with address information */
1026 case RISCV_EXCP_INST_GUEST_PAGE_FAULT
:
1027 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
:
1028 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
:
1029 case RISCV_EXCP_INST_ADDR_MIS
:
1030 case RISCV_EXCP_INST_ACCESS_FAULT
:
1031 case RISCV_EXCP_LOAD_ADDR_MIS
:
1032 case RISCV_EXCP_STORE_AMO_ADDR_MIS
:
1033 case RISCV_EXCP_LOAD_ACCESS_FAULT
:
1034 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT
:
1035 case RISCV_EXCP_INST_PAGE_FAULT
:
1036 case RISCV_EXCP_LOAD_PAGE_FAULT
:
1037 case RISCV_EXCP_STORE_PAGE_FAULT
:
1039 tval
= env
->badaddr
;
1044 /* ecall is dispatched as one cause so translate based on mode */
1045 if (cause
== RISCV_EXCP_U_ECALL
) {
1046 assert(env
->priv
<= 3);
1048 if (env
->priv
== PRV_M
) {
1049 cause
= RISCV_EXCP_M_ECALL
;
1050 } else if (env
->priv
== PRV_S
&& riscv_cpu_virt_enabled(env
)) {
1051 cause
= RISCV_EXCP_VS_ECALL
;
1052 } else if (env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) {
1053 cause
= RISCV_EXCP_S_ECALL
;
1054 } else if (env
->priv
== PRV_U
) {
1055 cause
= RISCV_EXCP_U_ECALL
;
1060 trace_riscv_trap(env
->mhartid
, async
, cause
, env
->pc
, tval
,
1061 riscv_cpu_get_trap_name(cause
, async
));
1063 qemu_log_mask(CPU_LOG_INT
,
1064 "%s: hart:"TARGET_FMT_ld
", async:%d, cause:"TARGET_FMT_lx
", "
1065 "epc:0x"TARGET_FMT_lx
", tval:0x"TARGET_FMT_lx
", desc=%s\n",
1066 __func__
, env
->mhartid
, async
, cause
, env
->pc
, tval
,
1067 riscv_cpu_get_trap_name(cause
, async
));
1069 if (env
->priv
<= PRV_S
&&
1070 cause
< TARGET_LONG_BITS
&& ((deleg
>> cause
) & 1)) {
1071 /* handle the trap in S-mode */
1072 if (riscv_has_ext(env
, RVH
)) {
1073 target_ulong hdeleg
= async
? env
->hideleg
: env
->hedeleg
;
1075 if (env
->two_stage_lookup
&& write_tval
) {
1077 * If we are writing a guest virtual address to stval, set
1078 * this to 1. If we are trapping to VS we will set this to 0
1081 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, 1);
1083 /* For other HS-mode traps, we set this to 0. */
1084 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, 0);
1087 if (riscv_cpu_virt_enabled(env
) && ((hdeleg
>> cause
) & 1)) {
1088 /* Trap to VS mode */
1090 * See if we need to adjust cause. Yes if its VS mode interrupt
1091 * no if hypervisor has delegated one of hs mode's interrupt
1093 if (cause
== IRQ_VS_TIMER
|| cause
== IRQ_VS_SOFT
||
1094 cause
== IRQ_VS_EXT
) {
1097 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, 0);
1098 } else if (riscv_cpu_virt_enabled(env
)) {
1099 /* Trap into HS mode, from virt */
1100 riscv_cpu_swap_hypervisor_regs(env
);
1101 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPVP
,
1103 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
,
1104 riscv_cpu_virt_enabled(env
));
1106 htval
= env
->guest_phys_fault_addr
;
1108 riscv_cpu_set_virt_enabled(env
, 0);
1110 /* Trap into HS mode */
1111 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
, false);
1112 htval
= env
->guest_phys_fault_addr
;
1117 s
= set_field(s
, MSTATUS_SPIE
, get_field(s
, MSTATUS_SIE
));
1118 s
= set_field(s
, MSTATUS_SPP
, env
->priv
);
1119 s
= set_field(s
, MSTATUS_SIE
, 0);
1121 env
->scause
= cause
| ((target_ulong
)async
<< (TARGET_LONG_BITS
- 1));
1122 env
->sepc
= env
->pc
;
1125 env
->pc
= (env
->stvec
>> 2 << 2) +
1126 ((async
&& (env
->stvec
& 3) == 1) ? cause
* 4 : 0);
1127 riscv_cpu_set_mode(env
, PRV_S
);
1129 /* handle the trap in M-mode */
1130 if (riscv_has_ext(env
, RVH
)) {
1131 if (riscv_cpu_virt_enabled(env
)) {
1132 riscv_cpu_swap_hypervisor_regs(env
);
1134 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_MPV
,
1135 riscv_cpu_virt_enabled(env
));
1136 if (riscv_cpu_virt_enabled(env
) && tval
) {
1137 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_GVA
, 1);
1140 mtval2
= env
->guest_phys_fault_addr
;
1142 /* Trapping to M mode, virt is disabled */
1143 riscv_cpu_set_virt_enabled(env
, 0);
1147 s
= set_field(s
, MSTATUS_MPIE
, get_field(s
, MSTATUS_MIE
));
1148 s
= set_field(s
, MSTATUS_MPP
, env
->priv
);
1149 s
= set_field(s
, MSTATUS_MIE
, 0);
1151 env
->mcause
= cause
| ~(((target_ulong
)-1) >> async
);
1152 env
->mepc
= env
->pc
;
1154 env
->mtval2
= mtval2
;
1155 env
->pc
= (env
->mtvec
>> 2 << 2) +
1156 ((async
&& (env
->mtvec
& 3) == 1) ? cause
* 4 : 0);
1157 riscv_cpu_set_mode(env
, PRV_M
);
1160 /* NOTE: it is not necessary to yield load reservations here. It is only
1161 * necessary for an SC from "another hart" to cause a load reservation
1162 * to be yielded. Refer to the memory consistency model section of the
1163 * RISC-V ISA Specification.
1166 env
->two_stage_lookup
= false;
1168 cs
->exception_index
= RISCV_EXCP_NONE
; /* mark handled to qemu */