2 * RISC-V CPU helpers for qemu.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
28 int riscv_cpu_mmu_index(CPURISCVState
*env
, bool ifetch
)
30 #ifdef CONFIG_USER_ONLY
37 #ifndef CONFIG_USER_ONLY
38 static int riscv_cpu_local_irq_pending(CPURISCVState
*env
)
42 target_ulong mstatus_mie
= get_field(env
->mstatus
, MSTATUS_MIE
);
43 target_ulong mstatus_sie
= get_field(env
->mstatus
, MSTATUS_SIE
);
44 target_ulong hs_mstatus_sie
= get_field(env
->mstatus_hs
, MSTATUS_SIE
);
46 target_ulong pending
= env
->mip
& env
->mie
&
47 ~(MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
48 target_ulong vspending
= (env
->mip
& env
->mie
&
49 (MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
));
51 target_ulong mie
= env
->priv
< PRV_M
||
52 (env
->priv
== PRV_M
&& mstatus_mie
);
53 target_ulong sie
= env
->priv
< PRV_S
||
54 (env
->priv
== PRV_S
&& mstatus_sie
);
55 target_ulong hs_sie
= env
->priv
< PRV_S
||
56 (env
->priv
== PRV_S
&& hs_mstatus_sie
);
58 if (riscv_cpu_virt_enabled(env
)) {
59 target_ulong pending_hs_irq
= pending
& -hs_sie
;
62 riscv_cpu_set_force_hs_excep(env
, FORCE_HS_EXCEP
);
63 return ctz64(pending_hs_irq
);
69 irqs
= (pending
& ~env
->mideleg
& -mie
) | (pending
& env
->mideleg
& -sie
);
72 return ctz64(irqs
); /* since non-zero */
74 return EXCP_NONE
; /* indicates no pending interrupt */
79 bool riscv_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
81 #if !defined(CONFIG_USER_ONLY)
82 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
83 RISCVCPU
*cpu
= RISCV_CPU(cs
);
84 CPURISCVState
*env
= &cpu
->env
;
85 int interruptno
= riscv_cpu_local_irq_pending(env
);
86 if (interruptno
>= 0) {
87 cs
->exception_index
= RISCV_EXCP_INT_FLAG
| interruptno
;
88 riscv_cpu_do_interrupt(cs
);
96 #if !defined(CONFIG_USER_ONLY)
98 /* Return true is floating point support is currently enabled */
99 bool riscv_cpu_fp_enabled(CPURISCVState
*env
)
101 if (env
->mstatus
& MSTATUS_FS
) {
102 if (riscv_cpu_virt_enabled(env
) && !(env
->mstatus_hs
& MSTATUS_FS
)) {
111 void riscv_cpu_swap_hypervisor_regs(CPURISCVState
*env
)
113 target_ulong mstatus_mask
= MSTATUS_MXR
| MSTATUS_SUM
| MSTATUS_FS
|
114 MSTATUS_SPP
| MSTATUS_SPIE
| MSTATUS_SIE
;
115 bool current_virt
= riscv_cpu_virt_enabled(env
);
117 g_assert(riscv_has_ext(env
, RVH
));
119 #if defined(TARGET_RISCV64)
120 mstatus_mask
|= MSTATUS64_UXL
;
124 /* Current V=1 and we are about to change to V=0 */
125 env
->vsstatus
= env
->mstatus
& mstatus_mask
;
126 env
->mstatus
&= ~mstatus_mask
;
127 env
->mstatus
|= env
->mstatus_hs
;
129 #if defined(TARGET_RISCV32)
130 env
->vsstatush
= env
->mstatush
;
131 env
->mstatush
|= env
->mstatush_hs
;
134 env
->vstvec
= env
->stvec
;
135 env
->stvec
= env
->stvec_hs
;
137 env
->vsscratch
= env
->sscratch
;
138 env
->sscratch
= env
->sscratch_hs
;
140 env
->vsepc
= env
->sepc
;
141 env
->sepc
= env
->sepc_hs
;
143 env
->vscause
= env
->scause
;
144 env
->scause
= env
->scause_hs
;
146 env
->vstval
= env
->sbadaddr
;
147 env
->sbadaddr
= env
->stval_hs
;
149 env
->vsatp
= env
->satp
;
150 env
->satp
= env
->satp_hs
;
152 /* Current V=0 and we are about to change to V=1 */
153 env
->mstatus_hs
= env
->mstatus
& mstatus_mask
;
154 env
->mstatus
&= ~mstatus_mask
;
155 env
->mstatus
|= env
->vsstatus
;
157 #if defined(TARGET_RISCV32)
158 env
->mstatush_hs
= env
->mstatush
;
159 env
->mstatush
|= env
->vsstatush
;
162 env
->stvec_hs
= env
->stvec
;
163 env
->stvec
= env
->vstvec
;
165 env
->sscratch_hs
= env
->sscratch
;
166 env
->sscratch
= env
->vsscratch
;
168 env
->sepc_hs
= env
->sepc
;
169 env
->sepc
= env
->vsepc
;
171 env
->scause_hs
= env
->scause
;
172 env
->scause
= env
->vscause
;
174 env
->stval_hs
= env
->sbadaddr
;
175 env
->sbadaddr
= env
->vstval
;
177 env
->satp_hs
= env
->satp
;
178 env
->satp
= env
->vsatp
;
182 bool riscv_cpu_virt_enabled(CPURISCVState
*env
)
184 if (!riscv_has_ext(env
, RVH
)) {
188 return get_field(env
->virt
, VIRT_ONOFF
);
191 void riscv_cpu_set_virt_enabled(CPURISCVState
*env
, bool enable
)
193 if (!riscv_has_ext(env
, RVH
)) {
197 /* Flush the TLB on all virt mode changes. */
198 if (get_field(env
->virt
, VIRT_ONOFF
) != enable
) {
199 tlb_flush(env_cpu(env
));
202 env
->virt
= set_field(env
->virt
, VIRT_ONOFF
, enable
);
205 bool riscv_cpu_force_hs_excep_enabled(CPURISCVState
*env
)
207 if (!riscv_has_ext(env
, RVH
)) {
211 return get_field(env
->virt
, FORCE_HS_EXCEP
);
214 void riscv_cpu_set_force_hs_excep(CPURISCVState
*env
, bool enable
)
216 if (!riscv_has_ext(env
, RVH
)) {
220 env
->virt
= set_field(env
->virt
, FORCE_HS_EXCEP
, enable
);
223 int riscv_cpu_claim_interrupts(RISCVCPU
*cpu
, uint32_t interrupts
)
225 CPURISCVState
*env
= &cpu
->env
;
226 if (env
->miclaim
& interrupts
) {
229 env
->miclaim
|= interrupts
;
234 uint32_t riscv_cpu_update_mip(RISCVCPU
*cpu
, uint32_t mask
, uint32_t value
)
236 CPURISCVState
*env
= &cpu
->env
;
237 CPUState
*cs
= CPU(cpu
);
238 uint32_t old
= env
->mip
;
241 if (!qemu_mutex_iothread_locked()) {
243 qemu_mutex_lock_iothread();
246 env
->mip
= (env
->mip
& ~mask
) | (value
& mask
);
249 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
251 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
255 qemu_mutex_unlock_iothread();
261 void riscv_cpu_set_rdtime_fn(CPURISCVState
*env
, uint64_t (*fn
)(void))
266 void riscv_cpu_set_mode(CPURISCVState
*env
, target_ulong newpriv
)
268 if (newpriv
> PRV_M
) {
269 g_assert_not_reached();
271 if (newpriv
== PRV_H
) {
274 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
278 * Clear the load reservation - otherwise a reservation placed in one
279 * context/process can be used by another, resulting in an SC succeeding
280 * incorrectly. Version 2.2 of the ISA specification explicitly requires
281 * this behaviour, while later revisions say that the kernel "should" use
282 * an SC instruction to force the yielding of a load reservation on a
283 * preemptive context switch. As a result, do both.
288 /* get_physical_address - get the physical address for this virtual address
290 * Do a page table walk to obtain the physical address corresponding to a
291 * virtual address. Returns 0 if the translation was successful
293 * Adapted from Spike's mmu_t::translate and mmu_t::walk
295 * @env: CPURISCVState
296 * @physical: This will be set to the calculated physical address
297 * @prot: The returned protection attributes
298 * @addr: The virtual address to be translated
299 * @access_type: The type of MMU access
300 * @mmu_idx: Indicates current privilege level
301 * @first_stage: Are we in first stage translation?
302 * Second stage is used for hypervisor guest translation
303 * @two_stage: Are we going to perform two stage translation
305 static int get_physical_address(CPURISCVState
*env
, hwaddr
*physical
,
306 int *prot
, target_ulong addr
,
307 int access_type
, int mmu_idx
,
308 bool first_stage
, bool two_stage
)
310 /* NOTE: the env->pc value visible here will not be
311 * correct, but the value visible to the exception handler
312 * (riscv_cpu_do_interrupt) is correct */
314 MemTxAttrs attrs
= MEMTXATTRS_UNSPECIFIED
;
316 bool use_background
= false;
319 * Check if we should use the background registers for the two
320 * stage translation. We don't need to check if we actually need
321 * two stage translation as that happened before this function
322 * was called. Background registers will be used if the guest has
323 * forced a two stage translation to be on (in HS or M mode).
325 if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
) {
326 if (get_field(env
->mstatus
, MSTATUS_MPRV
)) {
327 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
329 if (riscv_has_ext(env
, RVH
) &&
330 MSTATUS_MPV_ISSET(env
)) {
331 use_background
= true;
336 if (mode
== PRV_S
&& access_type
!= MMU_INST_FETCH
&&
337 riscv_has_ext(env
, RVH
) && !riscv_cpu_virt_enabled(env
)) {
338 if (get_field(env
->hstatus
, HSTATUS_SPRV
)) {
339 mode
= get_field(env
->mstatus
, SSTATUS_SPP
);
340 use_background
= true;
344 if (first_stage
== false) {
345 /* We are in stage 2 translation, this is similar to stage 1. */
346 /* Stage 2 is always taken as U-mode */
350 if (mode
== PRV_M
|| !riscv_feature(env
, RISCV_FEATURE_MMU
)) {
352 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
353 return TRANSLATE_SUCCESS
;
359 int levels
, ptidxbits
, ptesize
, vm
, sum
, mxr
, widened
;
361 if (first_stage
== true) {
362 mxr
= get_field(env
->mstatus
, MSTATUS_MXR
);
364 mxr
= get_field(env
->vsstatus
, MSTATUS_MXR
);
367 if (env
->priv_ver
>= PRIV_VERSION_1_10_0
) {
368 if (first_stage
== true) {
369 if (use_background
) {
370 base
= (hwaddr
)get_field(env
->vsatp
, SATP_PPN
) << PGSHIFT
;
371 vm
= get_field(env
->vsatp
, SATP_MODE
);
373 base
= (hwaddr
)get_field(env
->satp
, SATP_PPN
) << PGSHIFT
;
374 vm
= get_field(env
->satp
, SATP_MODE
);
378 base
= (hwaddr
)get_field(env
->hgatp
, HGATP_PPN
) << PGSHIFT
;
379 vm
= get_field(env
->hgatp
, HGATP_MODE
);
382 sum
= get_field(env
->mstatus
, MSTATUS_SUM
);
385 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
387 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
389 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
391 levels
= 5; ptidxbits
= 9; ptesize
= 8; break;
394 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
395 return TRANSLATE_SUCCESS
;
397 g_assert_not_reached();
401 base
= (hwaddr
)(env
->sptbr
) << PGSHIFT
;
402 sum
= !get_field(env
->mstatus
, MSTATUS_PUM
);
403 vm
= get_field(env
->mstatus
, MSTATUS_VM
);
406 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
408 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
410 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
413 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
414 return TRANSLATE_SUCCESS
;
416 g_assert_not_reached();
420 CPUState
*cs
= env_cpu(env
);
421 int va_bits
= PGSHIFT
+ levels
* ptidxbits
+ widened
;
422 target_ulong mask
, masked_msbs
;
424 if (TARGET_LONG_BITS
> (va_bits
- 1)) {
425 mask
= (1L << (TARGET_LONG_BITS
- (va_bits
- 1))) - 1;
429 masked_msbs
= (addr
>> (va_bits
- 1)) & mask
;
431 if (masked_msbs
!= 0 && masked_msbs
!= mask
) {
432 return TRANSLATE_FAIL
;
435 int ptshift
= (levels
- 1) * ptidxbits
;
438 #if !TCG_OVERSIZED_GUEST
441 for (i
= 0; i
< levels
; i
++, ptshift
-= ptidxbits
) {
444 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
445 ((1 << (ptidxbits
+ widened
)) - 1);
447 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
448 ((1 << ptidxbits
) - 1);
451 /* check that physical address of PTE is legal */
454 if (two_stage
&& first_stage
) {
458 /* Do the second stage translation on the base PTE address. */
459 get_physical_address(env
, &vbase
, &vbase_prot
, base
, access_type
,
460 mmu_idx
, false, true);
462 pte_addr
= vbase
+ idx
* ptesize
;
464 pte_addr
= base
+ idx
* ptesize
;
467 if (riscv_feature(env
, RISCV_FEATURE_PMP
) &&
468 !pmp_hart_has_privs(env
, pte_addr
, sizeof(target_ulong
),
469 1 << MMU_DATA_LOAD
, PRV_S
)) {
470 return TRANSLATE_PMP_FAIL
;
473 #if defined(TARGET_RISCV32)
474 target_ulong pte
= address_space_ldl(cs
->as
, pte_addr
, attrs
, &res
);
475 #elif defined(TARGET_RISCV64)
476 target_ulong pte
= address_space_ldq(cs
->as
, pte_addr
, attrs
, &res
);
478 if (res
!= MEMTX_OK
) {
479 return TRANSLATE_FAIL
;
482 hwaddr ppn
= pte
>> PTE_PPN_SHIFT
;
484 if (!(pte
& PTE_V
)) {
486 return TRANSLATE_FAIL
;
487 } else if (!(pte
& (PTE_R
| PTE_W
| PTE_X
))) {
488 /* Inner PTE, continue walking */
489 base
= ppn
<< PGSHIFT
;
490 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == PTE_W
) {
491 /* Reserved leaf PTE flags: PTE_W */
492 return TRANSLATE_FAIL
;
493 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == (PTE_W
| PTE_X
)) {
494 /* Reserved leaf PTE flags: PTE_W + PTE_X */
495 return TRANSLATE_FAIL
;
496 } else if ((pte
& PTE_U
) && ((mode
!= PRV_U
) &&
497 (!sum
|| access_type
== MMU_INST_FETCH
))) {
498 /* User PTE flags when not U mode and mstatus.SUM is not set,
499 or the access type is an instruction fetch */
500 return TRANSLATE_FAIL
;
501 } else if (!(pte
& PTE_U
) && (mode
!= PRV_S
)) {
502 /* Supervisor PTE flags when not S mode */
503 return TRANSLATE_FAIL
;
504 } else if (ppn
& ((1ULL << ptshift
) - 1)) {
506 return TRANSLATE_FAIL
;
507 } else if (access_type
== MMU_DATA_LOAD
&& !((pte
& PTE_R
) ||
508 ((pte
& PTE_X
) && mxr
))) {
509 /* Read access check failed */
510 return TRANSLATE_FAIL
;
511 } else if (access_type
== MMU_DATA_STORE
&& !(pte
& PTE_W
)) {
512 /* Write access check failed */
513 return TRANSLATE_FAIL
;
514 } else if (access_type
== MMU_INST_FETCH
&& !(pte
& PTE_X
)) {
515 /* Fetch access check failed */
516 return TRANSLATE_FAIL
;
518 /* if necessary, set accessed and dirty bits. */
519 target_ulong updated_pte
= pte
| PTE_A
|
520 (access_type
== MMU_DATA_STORE
? PTE_D
: 0);
522 /* Page table updates need to be atomic with MTTCG enabled */
523 if (updated_pte
!= pte
) {
525 * - if accessed or dirty bits need updating, and the PTE is
526 * in RAM, then we do so atomically with a compare and swap.
527 * - if the PTE is in IO space or ROM, then it can't be updated
528 * and we return TRANSLATE_FAIL.
529 * - if the PTE changed by the time we went to update it, then
530 * it is no longer valid and we must re-walk the page table.
533 hwaddr l
= sizeof(target_ulong
), addr1
;
534 mr
= address_space_translate(cs
->as
, pte_addr
,
535 &addr1
, &l
, false, MEMTXATTRS_UNSPECIFIED
);
536 if (memory_region_is_ram(mr
)) {
537 target_ulong
*pte_pa
=
538 qemu_map_ram_ptr(mr
->ram_block
, addr1
);
539 #if TCG_OVERSIZED_GUEST
540 /* MTTCG is not enabled on oversized TCG guests so
541 * page table updates do not need to be atomic */
542 *pte_pa
= pte
= updated_pte
;
544 target_ulong old_pte
=
545 atomic_cmpxchg(pte_pa
, pte
, updated_pte
);
546 if (old_pte
!= pte
) {
553 /* misconfigured PTE in ROM (AD bits are not preset) or
554 * PTE is in IO space and can't be updated atomically */
555 return TRANSLATE_FAIL
;
559 /* for superpage mappings, make a fake leaf PTE for the TLB's
561 target_ulong vpn
= addr
>> PGSHIFT
;
562 *physical
= (ppn
| (vpn
& ((1L << ptshift
) - 1))) << PGSHIFT
;
564 /* set permissions on the TLB entry */
565 if ((pte
& PTE_R
) || ((pte
& PTE_X
) && mxr
)) {
571 /* add write permission on stores or if the page is already dirty,
572 so that we TLB miss on later writes to update the dirty bit */
574 (access_type
== MMU_DATA_STORE
|| (pte
& PTE_D
))) {
577 return TRANSLATE_SUCCESS
;
580 return TRANSLATE_FAIL
;
583 static void raise_mmu_exception(CPURISCVState
*env
, target_ulong address
,
584 MMUAccessType access_type
, bool pmp_violation
,
587 CPUState
*cs
= env_cpu(env
);
588 int page_fault_exceptions
;
590 page_fault_exceptions
=
591 (env
->priv_ver
>= PRIV_VERSION_1_10_0
) &&
592 get_field(env
->satp
, SATP_MODE
) != VM_1_10_MBARE
&&
595 page_fault_exceptions
=
596 get_field(env
->hgatp
, HGATP_MODE
) != VM_1_10_MBARE
&&
599 switch (access_type
) {
601 if (riscv_cpu_virt_enabled(env
) && !first_stage
) {
602 cs
->exception_index
= RISCV_EXCP_INST_GUEST_PAGE_FAULT
;
604 cs
->exception_index
= page_fault_exceptions
?
605 RISCV_EXCP_INST_PAGE_FAULT
: RISCV_EXCP_INST_ACCESS_FAULT
;
609 if (riscv_cpu_virt_enabled(env
) && !first_stage
) {
610 cs
->exception_index
= RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
;
612 cs
->exception_index
= page_fault_exceptions
?
613 RISCV_EXCP_LOAD_PAGE_FAULT
: RISCV_EXCP_LOAD_ACCESS_FAULT
;
617 if (riscv_cpu_virt_enabled(env
) && !first_stage
) {
618 cs
->exception_index
= RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
;
620 cs
->exception_index
= page_fault_exceptions
?
621 RISCV_EXCP_STORE_PAGE_FAULT
: RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
625 g_assert_not_reached();
627 env
->badaddr
= address
;
630 hwaddr
riscv_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
632 RISCVCPU
*cpu
= RISCV_CPU(cs
);
633 CPURISCVState
*env
= &cpu
->env
;
636 int mmu_idx
= cpu_mmu_index(&cpu
->env
, false);
638 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, 0, mmu_idx
,
639 true, riscv_cpu_virt_enabled(env
))) {
643 if (riscv_cpu_virt_enabled(env
)) {
644 if (get_physical_address(env
, &phys_addr
, &prot
, phys_addr
,
645 0, mmu_idx
, false, true)) {
653 void riscv_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
654 vaddr addr
, unsigned size
,
655 MMUAccessType access_type
,
656 int mmu_idx
, MemTxAttrs attrs
,
657 MemTxResult response
, uintptr_t retaddr
)
659 RISCVCPU
*cpu
= RISCV_CPU(cs
);
660 CPURISCVState
*env
= &cpu
->env
;
662 if (access_type
== MMU_DATA_STORE
) {
663 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
665 cs
->exception_index
= RISCV_EXCP_LOAD_ACCESS_FAULT
;
669 riscv_raise_exception(&cpu
->env
, cs
->exception_index
, retaddr
);
672 void riscv_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
673 MMUAccessType access_type
, int mmu_idx
,
676 RISCVCPU
*cpu
= RISCV_CPU(cs
);
677 CPURISCVState
*env
= &cpu
->env
;
678 switch (access_type
) {
680 cs
->exception_index
= RISCV_EXCP_INST_ADDR_MIS
;
683 cs
->exception_index
= RISCV_EXCP_LOAD_ADDR_MIS
;
686 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ADDR_MIS
;
689 g_assert_not_reached();
692 riscv_raise_exception(env
, cs
->exception_index
, retaddr
);
696 bool riscv_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
697 MMUAccessType access_type
, int mmu_idx
,
698 bool probe
, uintptr_t retaddr
)
700 RISCVCPU
*cpu
= RISCV_CPU(cs
);
701 CPURISCVState
*env
= &cpu
->env
;
702 #ifndef CONFIG_USER_ONLY
706 bool pmp_violation
= false;
707 bool m_mode_two_stage
= false;
708 bool hs_mode_two_stage
= false;
709 bool first_stage_error
= true;
710 int ret
= TRANSLATE_FAIL
;
713 env
->guest_phys_fault_addr
= 0;
715 qemu_log_mask(CPU_LOG_MMU
, "%s ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
716 __func__
, address
, access_type
, mmu_idx
);
719 * Determine if we are in M mode and MPRV is set or in HS mode and SPRV is
720 * set and we want to access a virtulisation address.
722 if (riscv_has_ext(env
, RVH
)) {
723 m_mode_two_stage
= env
->priv
== PRV_M
&&
724 access_type
!= MMU_INST_FETCH
&&
725 get_field(env
->mstatus
, MSTATUS_MPRV
) &&
726 MSTATUS_MPV_ISSET(env
);
728 hs_mode_two_stage
= env
->priv
== PRV_S
&&
729 !riscv_cpu_virt_enabled(env
) &&
730 access_type
!= MMU_INST_FETCH
&&
731 get_field(env
->hstatus
, HSTATUS_SPRV
) &&
732 get_field(env
->hstatus
, HSTATUS_SPV
);
735 if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
) {
736 if (get_field(env
->mstatus
, MSTATUS_MPRV
)) {
737 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
741 if (riscv_cpu_virt_enabled(env
) || m_mode_two_stage
|| hs_mode_two_stage
) {
742 /* Two stage lookup */
743 ret
= get_physical_address(env
, &pa
, &prot
, address
, access_type
,
744 mmu_idx
, true, true);
746 qemu_log_mask(CPU_LOG_MMU
,
747 "%s 1st-stage address=%" VADDR_PRIx
" ret %d physical "
748 TARGET_FMT_plx
" prot %d\n",
749 __func__
, address
, ret
, pa
, prot
);
751 if (ret
!= TRANSLATE_FAIL
) {
752 /* Second stage lookup */
755 ret
= get_physical_address(env
, &pa
, &prot2
, im_address
,
756 access_type
, mmu_idx
, false, true);
758 qemu_log_mask(CPU_LOG_MMU
,
759 "%s 2nd-stage address=%" VADDR_PRIx
" ret %d physical "
760 TARGET_FMT_plx
" prot %d\n",
761 __func__
, im_address
, ret
, pa
, prot2
);
765 if (riscv_feature(env
, RISCV_FEATURE_PMP
) &&
766 (ret
== TRANSLATE_SUCCESS
) &&
767 !pmp_hart_has_privs(env
, pa
, size
, 1 << access_type
, mode
)) {
768 ret
= TRANSLATE_PMP_FAIL
;
771 if (ret
!= TRANSLATE_SUCCESS
) {
773 * Guest physical address translation failed, this is a HS
776 first_stage_error
= false;
777 env
->guest_phys_fault_addr
= (im_address
|
779 (TARGET_PAGE_SIZE
- 1))) >> 2;
783 /* Single stage lookup */
784 ret
= get_physical_address(env
, &pa
, &prot
, address
, access_type
,
785 mmu_idx
, true, false);
787 qemu_log_mask(CPU_LOG_MMU
,
788 "%s address=%" VADDR_PRIx
" ret %d physical "
789 TARGET_FMT_plx
" prot %d\n",
790 __func__
, address
, ret
, pa
, prot
);
793 if (riscv_feature(env
, RISCV_FEATURE_PMP
) &&
794 (ret
== TRANSLATE_SUCCESS
) &&
795 !pmp_hart_has_privs(env
, pa
, size
, 1 << access_type
, mode
)) {
796 ret
= TRANSLATE_PMP_FAIL
;
798 if (ret
== TRANSLATE_PMP_FAIL
) {
799 pmp_violation
= true;
802 if (ret
== TRANSLATE_SUCCESS
) {
803 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
, pa
& TARGET_PAGE_MASK
,
804 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
809 raise_mmu_exception(env
, address
, access_type
, pmp_violation
, first_stage_error
);
810 riscv_raise_exception(env
, cs
->exception_index
, retaddr
);
816 switch (access_type
) {
818 cs
->exception_index
= RISCV_EXCP_INST_PAGE_FAULT
;
821 cs
->exception_index
= RISCV_EXCP_LOAD_PAGE_FAULT
;
824 cs
->exception_index
= RISCV_EXCP_STORE_PAGE_FAULT
;
827 g_assert_not_reached();
829 env
->badaddr
= address
;
830 cpu_loop_exit_restore(cs
, retaddr
);
837 * Adapted from Spike's processor_t::take_trap.
840 void riscv_cpu_do_interrupt(CPUState
*cs
)
842 #if !defined(CONFIG_USER_ONLY)
844 RISCVCPU
*cpu
= RISCV_CPU(cs
);
845 CPURISCVState
*env
= &cpu
->env
;
846 bool force_hs_execp
= riscv_cpu_force_hs_excep_enabled(env
);
849 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
850 * so we mask off the MSB and separate into trap type and cause.
852 bool async
= !!(cs
->exception_index
& RISCV_EXCP_INT_FLAG
);
853 target_ulong cause
= cs
->exception_index
& RISCV_EXCP_INT_MASK
;
854 target_ulong deleg
= async
? env
->mideleg
: env
->medeleg
;
855 target_ulong tval
= 0;
856 target_ulong htval
= 0;
857 target_ulong mtval2
= 0;
860 /* set tval to badaddr for traps with address information */
862 case RISCV_EXCP_INST_GUEST_PAGE_FAULT
:
863 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
:
864 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
:
865 force_hs_execp
= true;
867 case RISCV_EXCP_INST_ADDR_MIS
:
868 case RISCV_EXCP_INST_ACCESS_FAULT
:
869 case RISCV_EXCP_LOAD_ADDR_MIS
:
870 case RISCV_EXCP_STORE_AMO_ADDR_MIS
:
871 case RISCV_EXCP_LOAD_ACCESS_FAULT
:
872 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT
:
873 case RISCV_EXCP_INST_PAGE_FAULT
:
874 case RISCV_EXCP_LOAD_PAGE_FAULT
:
875 case RISCV_EXCP_STORE_PAGE_FAULT
:
881 /* ecall is dispatched as one cause so translate based on mode */
882 if (cause
== RISCV_EXCP_U_ECALL
) {
883 assert(env
->priv
<= 3);
885 if (env
->priv
== PRV_M
) {
886 cause
= RISCV_EXCP_M_ECALL
;
887 } else if (env
->priv
== PRV_S
&& riscv_cpu_virt_enabled(env
)) {
888 cause
= RISCV_EXCP_VS_ECALL
;
889 } else if (env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) {
890 cause
= RISCV_EXCP_S_ECALL
;
891 } else if (env
->priv
== PRV_U
) {
892 cause
= RISCV_EXCP_U_ECALL
;
897 trace_riscv_trap(env
->mhartid
, async
, cause
, env
->pc
, tval
, cause
< 23 ?
898 (async
? riscv_intr_names
: riscv_excp_names
)[cause
] : "(unknown)");
900 if (env
->priv
<= PRV_S
&&
901 cause
< TARGET_LONG_BITS
&& ((deleg
>> cause
) & 1)) {
902 /* handle the trap in S-mode */
903 if (riscv_has_ext(env
, RVH
)) {
904 target_ulong hdeleg
= async
? env
->hideleg
: env
->hedeleg
;
906 if (riscv_cpu_virt_enabled(env
) && ((hdeleg
>> cause
) & 1) &&
909 * See if we need to adjust cause. Yes if its VS mode interrupt
910 * no if hypervisor has delegated one of hs mode's interrupt
912 if (cause
== IRQ_VS_TIMER
|| cause
== IRQ_VS_SOFT
||
915 /* Trap to VS mode */
916 } else if (riscv_cpu_virt_enabled(env
)) {
917 /* Trap into HS mode, from virt */
918 riscv_cpu_swap_hypervisor_regs(env
);
919 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SP2V
,
920 get_field(env
->hstatus
, HSTATUS_SPV
));
921 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SP2P
,
922 get_field(env
->mstatus
, SSTATUS_SPP
));
923 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
,
924 riscv_cpu_virt_enabled(env
));
926 htval
= env
->guest_phys_fault_addr
;
928 riscv_cpu_set_virt_enabled(env
, 0);
929 riscv_cpu_set_force_hs_excep(env
, 0);
931 /* Trap into HS mode */
932 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SP2V
,
933 get_field(env
->hstatus
, HSTATUS_SPV
));
934 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SP2P
,
935 get_field(env
->mstatus
, SSTATUS_SPP
));
936 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
,
937 riscv_cpu_virt_enabled(env
));
939 htval
= env
->guest_phys_fault_addr
;
944 s
= set_field(s
, MSTATUS_SPIE
, env
->priv_ver
>= PRIV_VERSION_1_10_0
?
945 get_field(s
, MSTATUS_SIE
) : get_field(s
, MSTATUS_UIE
<< env
->priv
));
946 s
= set_field(s
, MSTATUS_SPP
, env
->priv
);
947 s
= set_field(s
, MSTATUS_SIE
, 0);
949 env
->scause
= cause
| ((target_ulong
)async
<< (TARGET_LONG_BITS
- 1));
951 env
->sbadaddr
= tval
;
953 env
->pc
= (env
->stvec
>> 2 << 2) +
954 ((async
&& (env
->stvec
& 3) == 1) ? cause
* 4 : 0);
955 riscv_cpu_set_mode(env
, PRV_S
);
957 /* handle the trap in M-mode */
958 if (riscv_has_ext(env
, RVH
)) {
959 if (riscv_cpu_virt_enabled(env
)) {
960 riscv_cpu_swap_hypervisor_regs(env
);
962 #ifdef TARGET_RISCV32
963 env
->mstatush
= set_field(env
->mstatush
, MSTATUS_MPV
,
964 riscv_cpu_virt_enabled(env
));
965 env
->mstatush
= set_field(env
->mstatush
, MSTATUS_MTL
,
966 riscv_cpu_force_hs_excep_enabled(env
));
968 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_MPV
,
969 riscv_cpu_virt_enabled(env
));
970 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_MTL
,
971 riscv_cpu_force_hs_excep_enabled(env
));
974 mtval2
= env
->guest_phys_fault_addr
;
976 /* Trapping to M mode, virt is disabled */
977 riscv_cpu_set_virt_enabled(env
, 0);
978 riscv_cpu_set_force_hs_excep(env
, 0);
982 s
= set_field(s
, MSTATUS_MPIE
, env
->priv_ver
>= PRIV_VERSION_1_10_0
?
983 get_field(s
, MSTATUS_MIE
) : get_field(s
, MSTATUS_UIE
<< env
->priv
));
984 s
= set_field(s
, MSTATUS_MPP
, env
->priv
);
985 s
= set_field(s
, MSTATUS_MIE
, 0);
987 env
->mcause
= cause
| ~(((target_ulong
)-1) >> async
);
989 env
->mbadaddr
= tval
;
990 env
->mtval2
= mtval2
;
991 env
->pc
= (env
->mtvec
>> 2 << 2) +
992 ((async
&& (env
->mtvec
& 3) == 1) ? cause
* 4 : 0);
993 riscv_cpu_set_mode(env
, PRV_M
);
996 /* NOTE: it is not necessary to yield load reservations here. It is only
997 * necessary for an SC from "another hart" to cause a load reservation
998 * to be yielded. Refer to the memory consistency model section of the
999 * RISC-V ISA Specification.
1003 cs
->exception_index
= EXCP_NONE
; /* mark handled to qemu */