2 * RISC-V CPU helpers for qemu.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/main-loop.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
27 #include "semihosting/common-semi.h"
29 int riscv_cpu_mmu_index(CPURISCVState
*env
, bool ifetch
)
31 #ifdef CONFIG_USER_ONLY
38 #ifndef CONFIG_USER_ONLY
39 static int riscv_cpu_local_irq_pending(CPURISCVState
*env
)
43 target_ulong mstatus_mie
= get_field(env
->mstatus
, MSTATUS_MIE
);
44 target_ulong mstatus_sie
= get_field(env
->mstatus
, MSTATUS_SIE
);
45 target_ulong hs_mstatus_sie
= get_field(env
->mstatus_hs
, MSTATUS_SIE
);
47 target_ulong pending
= env
->mip
& env
->mie
&
48 ~(MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
);
49 target_ulong vspending
= (env
->mip
& env
->mie
&
50 (MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
));
52 target_ulong mie
= env
->priv
< PRV_M
||
53 (env
->priv
== PRV_M
&& mstatus_mie
);
54 target_ulong sie
= env
->priv
< PRV_S
||
55 (env
->priv
== PRV_S
&& mstatus_sie
);
56 target_ulong hs_sie
= env
->priv
< PRV_S
||
57 (env
->priv
== PRV_S
&& hs_mstatus_sie
);
59 if (riscv_cpu_virt_enabled(env
)) {
60 target_ulong pending_hs_irq
= pending
& -hs_sie
;
63 riscv_cpu_set_force_hs_excep(env
, FORCE_HS_EXCEP
);
64 return ctz64(pending_hs_irq
);
70 irqs
= (pending
& ~env
->mideleg
& -mie
) | (pending
& env
->mideleg
& -sie
);
73 return ctz64(irqs
); /* since non-zero */
75 return EXCP_NONE
; /* indicates no pending interrupt */
80 bool riscv_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
82 #if !defined(CONFIG_USER_ONLY)
83 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
84 RISCVCPU
*cpu
= RISCV_CPU(cs
);
85 CPURISCVState
*env
= &cpu
->env
;
86 int interruptno
= riscv_cpu_local_irq_pending(env
);
87 if (interruptno
>= 0) {
88 cs
->exception_index
= RISCV_EXCP_INT_FLAG
| interruptno
;
89 riscv_cpu_do_interrupt(cs
);
97 #if !defined(CONFIG_USER_ONLY)
99 /* Return true is floating point support is currently enabled */
100 bool riscv_cpu_fp_enabled(CPURISCVState
*env
)
102 if (env
->mstatus
& MSTATUS_FS
) {
103 if (riscv_cpu_virt_enabled(env
) && !(env
->mstatus_hs
& MSTATUS_FS
)) {
112 void riscv_cpu_swap_hypervisor_regs(CPURISCVState
*env
)
114 uint64_t mstatus_mask
= MSTATUS_MXR
| MSTATUS_SUM
| MSTATUS_FS
|
115 MSTATUS_SPP
| MSTATUS_SPIE
| MSTATUS_SIE
|
117 bool current_virt
= riscv_cpu_virt_enabled(env
);
119 g_assert(riscv_has_ext(env
, RVH
));
122 /* Current V=1 and we are about to change to V=0 */
123 env
->vsstatus
= env
->mstatus
& mstatus_mask
;
124 env
->mstatus
&= ~mstatus_mask
;
125 env
->mstatus
|= env
->mstatus_hs
;
127 env
->vstvec
= env
->stvec
;
128 env
->stvec
= env
->stvec_hs
;
130 env
->vsscratch
= env
->sscratch
;
131 env
->sscratch
= env
->sscratch_hs
;
133 env
->vsepc
= env
->sepc
;
134 env
->sepc
= env
->sepc_hs
;
136 env
->vscause
= env
->scause
;
137 env
->scause
= env
->scause_hs
;
139 env
->vstval
= env
->sbadaddr
;
140 env
->sbadaddr
= env
->stval_hs
;
142 env
->vsatp
= env
->satp
;
143 env
->satp
= env
->satp_hs
;
145 /* Current V=0 and we are about to change to V=1 */
146 env
->mstatus_hs
= env
->mstatus
& mstatus_mask
;
147 env
->mstatus
&= ~mstatus_mask
;
148 env
->mstatus
|= env
->vsstatus
;
150 env
->stvec_hs
= env
->stvec
;
151 env
->stvec
= env
->vstvec
;
153 env
->sscratch_hs
= env
->sscratch
;
154 env
->sscratch
= env
->vsscratch
;
156 env
->sepc_hs
= env
->sepc
;
157 env
->sepc
= env
->vsepc
;
159 env
->scause_hs
= env
->scause
;
160 env
->scause
= env
->vscause
;
162 env
->stval_hs
= env
->sbadaddr
;
163 env
->sbadaddr
= env
->vstval
;
165 env
->satp_hs
= env
->satp
;
166 env
->satp
= env
->vsatp
;
170 bool riscv_cpu_virt_enabled(CPURISCVState
*env
)
172 if (!riscv_has_ext(env
, RVH
)) {
176 return get_field(env
->virt
, VIRT_ONOFF
);
179 void riscv_cpu_set_virt_enabled(CPURISCVState
*env
, bool enable
)
181 if (!riscv_has_ext(env
, RVH
)) {
185 /* Flush the TLB on all virt mode changes. */
186 if (get_field(env
->virt
, VIRT_ONOFF
) != enable
) {
187 tlb_flush(env_cpu(env
));
190 env
->virt
= set_field(env
->virt
, VIRT_ONOFF
, enable
);
193 bool riscv_cpu_force_hs_excep_enabled(CPURISCVState
*env
)
195 if (!riscv_has_ext(env
, RVH
)) {
199 return get_field(env
->virt
, FORCE_HS_EXCEP
);
202 void riscv_cpu_set_force_hs_excep(CPURISCVState
*env
, bool enable
)
204 if (!riscv_has_ext(env
, RVH
)) {
208 env
->virt
= set_field(env
->virt
, FORCE_HS_EXCEP
, enable
);
211 bool riscv_cpu_two_stage_lookup(int mmu_idx
)
213 return mmu_idx
& TB_FLAGS_PRIV_HYP_ACCESS_MASK
;
216 int riscv_cpu_claim_interrupts(RISCVCPU
*cpu
, uint32_t interrupts
)
218 CPURISCVState
*env
= &cpu
->env
;
219 if (env
->miclaim
& interrupts
) {
222 env
->miclaim
|= interrupts
;
227 uint32_t riscv_cpu_update_mip(RISCVCPU
*cpu
, uint32_t mask
, uint32_t value
)
229 CPURISCVState
*env
= &cpu
->env
;
230 CPUState
*cs
= CPU(cpu
);
231 uint32_t old
= env
->mip
;
234 if (!qemu_mutex_iothread_locked()) {
236 qemu_mutex_lock_iothread();
239 env
->mip
= (env
->mip
& ~mask
) | (value
& mask
);
242 cpu_interrupt(cs
, CPU_INTERRUPT_HARD
);
244 cpu_reset_interrupt(cs
, CPU_INTERRUPT_HARD
);
248 qemu_mutex_unlock_iothread();
254 void riscv_cpu_set_rdtime_fn(CPURISCVState
*env
, uint64_t (*fn
)(uint32_t),
258 env
->rdtime_fn_arg
= arg
;
261 void riscv_cpu_set_mode(CPURISCVState
*env
, target_ulong newpriv
)
263 if (newpriv
> PRV_M
) {
264 g_assert_not_reached();
266 if (newpriv
== PRV_H
) {
269 /* tlb_flush is unnecessary as mode is contained in mmu_idx */
273 * Clear the load reservation - otherwise a reservation placed in one
274 * context/process can be used by another, resulting in an SC succeeding
275 * incorrectly. Version 2.2 of the ISA specification explicitly requires
276 * this behaviour, while later revisions say that the kernel "should" use
277 * an SC instruction to force the yielding of a load reservation on a
278 * preemptive context switch. As a result, do both.
284 * get_physical_address_pmp - check PMP permission for this physical address
286 * Match the PMP region and check permission for this physical address and it's
287 * TLB page. Returns 0 if the permission checking was successful
289 * @env: CPURISCVState
290 * @prot: The returned protection attributes
291 * @tlb_size: TLB page size containing addr. It could be modified after PMP
292 * permission checking. NULL if not set TLB page for addr.
293 * @addr: The physical address to be checked permission
294 * @access_type: The type of MMU access
295 * @mode: Indicates current privilege level.
297 static int get_physical_address_pmp(CPURISCVState
*env
, int *prot
,
298 target_ulong
*tlb_size
, hwaddr addr
,
299 int size
, MMUAccessType access_type
,
303 target_ulong tlb_size_pmp
= 0;
305 if (!riscv_feature(env
, RISCV_FEATURE_PMP
)) {
306 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
307 return TRANSLATE_SUCCESS
;
310 if (!pmp_hart_has_privs(env
, addr
, size
, 1 << access_type
, &pmp_priv
,
313 return TRANSLATE_PMP_FAIL
;
316 *prot
= pmp_priv_to_page_prot(pmp_priv
);
317 if (tlb_size
!= NULL
) {
318 if (pmp_is_range_in_tlb(env
, addr
& ~(*tlb_size
- 1), &tlb_size_pmp
)) {
319 *tlb_size
= tlb_size_pmp
;
323 return TRANSLATE_SUCCESS
;
326 /* get_physical_address - get the physical address for this virtual address
328 * Do a page table walk to obtain the physical address corresponding to a
329 * virtual address. Returns 0 if the translation was successful
331 * Adapted from Spike's mmu_t::translate and mmu_t::walk
333 * @env: CPURISCVState
334 * @physical: This will be set to the calculated physical address
335 * @prot: The returned protection attributes
336 * @addr: The virtual address to be translated
337 * @fault_pte_addr: If not NULL, this will be set to fault pte address
338 * when a error occurs on pte address translation.
339 * This will already be shifted to match htval.
340 * @access_type: The type of MMU access
341 * @mmu_idx: Indicates current privilege level
342 * @first_stage: Are we in first stage translation?
343 * Second stage is used for hypervisor guest translation
344 * @two_stage: Are we going to perform two stage translation
346 static int get_physical_address(CPURISCVState
*env
, hwaddr
*physical
,
347 int *prot
, target_ulong addr
,
348 target_ulong
*fault_pte_addr
,
349 int access_type
, int mmu_idx
,
350 bool first_stage
, bool two_stage
)
352 /* NOTE: the env->pc value visible here will not be
353 * correct, but the value visible to the exception handler
354 * (riscv_cpu_do_interrupt) is correct */
356 MemTxAttrs attrs
= MEMTXATTRS_UNSPECIFIED
;
357 int mode
= mmu_idx
& TB_FLAGS_PRIV_MMU_MASK
;
358 bool use_background
= false;
361 * Check if we should use the background registers for the two
362 * stage translation. We don't need to check if we actually need
363 * two stage translation as that happened before this function
364 * was called. Background registers will be used if the guest has
365 * forced a two stage translation to be on (in HS or M mode).
367 if (!riscv_cpu_virt_enabled(env
) && two_stage
) {
368 use_background
= true;
371 /* MPRV does not affect the virtual-machine load/store
372 instructions, HLV, HLVX, and HSV. */
373 if (riscv_cpu_two_stage_lookup(mmu_idx
)) {
374 mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
375 } else if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
) {
376 if (get_field(env
->mstatus
, MSTATUS_MPRV
)) {
377 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
381 if (first_stage
== false) {
382 /* We are in stage 2 translation, this is similar to stage 1. */
383 /* Stage 2 is always taken as U-mode */
387 if (mode
== PRV_M
|| !riscv_feature(env
, RISCV_FEATURE_MMU
)) {
389 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
390 return TRANSLATE_SUCCESS
;
396 int levels
, ptidxbits
, ptesize
, vm
, sum
, mxr
, widened
;
398 if (first_stage
== true) {
399 mxr
= get_field(env
->mstatus
, MSTATUS_MXR
);
401 mxr
= get_field(env
->vsstatus
, MSTATUS_MXR
);
404 if (first_stage
== true) {
405 if (use_background
) {
406 base
= (hwaddr
)get_field(env
->vsatp
, SATP_PPN
) << PGSHIFT
;
407 vm
= get_field(env
->vsatp
, SATP_MODE
);
409 base
= (hwaddr
)get_field(env
->satp
, SATP_PPN
) << PGSHIFT
;
410 vm
= get_field(env
->satp
, SATP_MODE
);
414 base
= (hwaddr
)get_field(env
->hgatp
, HGATP_PPN
) << PGSHIFT
;
415 vm
= get_field(env
->hgatp
, HGATP_MODE
);
418 /* status.SUM will be ignored if execute on background */
419 sum
= get_field(env
->mstatus
, MSTATUS_SUM
) || use_background
;
422 levels
= 2; ptidxbits
= 10; ptesize
= 4; break;
424 levels
= 3; ptidxbits
= 9; ptesize
= 8; break;
426 levels
= 4; ptidxbits
= 9; ptesize
= 8; break;
428 levels
= 5; ptidxbits
= 9; ptesize
= 8; break;
431 *prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
432 return TRANSLATE_SUCCESS
;
434 g_assert_not_reached();
437 CPUState
*cs
= env_cpu(env
);
438 int va_bits
= PGSHIFT
+ levels
* ptidxbits
+ widened
;
439 target_ulong mask
, masked_msbs
;
441 if (TARGET_LONG_BITS
> (va_bits
- 1)) {
442 mask
= (1L << (TARGET_LONG_BITS
- (va_bits
- 1))) - 1;
446 masked_msbs
= (addr
>> (va_bits
- 1)) & mask
;
448 if (masked_msbs
!= 0 && masked_msbs
!= mask
) {
449 return TRANSLATE_FAIL
;
452 int ptshift
= (levels
- 1) * ptidxbits
;
455 #if !TCG_OVERSIZED_GUEST
458 for (i
= 0; i
< levels
; i
++, ptshift
-= ptidxbits
) {
461 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
462 ((1 << (ptidxbits
+ widened
)) - 1);
464 idx
= (addr
>> (PGSHIFT
+ ptshift
)) &
465 ((1 << ptidxbits
) - 1);
468 /* check that physical address of PTE is legal */
471 if (two_stage
&& first_stage
) {
475 /* Do the second stage translation on the base PTE address. */
476 int vbase_ret
= get_physical_address(env
, &vbase
, &vbase_prot
,
477 base
, NULL
, MMU_DATA_LOAD
,
478 mmu_idx
, false, true);
480 if (vbase_ret
!= TRANSLATE_SUCCESS
) {
481 if (fault_pte_addr
) {
482 *fault_pte_addr
= (base
+ idx
* ptesize
) >> 2;
484 return TRANSLATE_G_STAGE_FAIL
;
487 pte_addr
= vbase
+ idx
* ptesize
;
489 pte_addr
= base
+ idx
* ptesize
;
493 int pmp_ret
= get_physical_address_pmp(env
, &pmp_prot
, NULL
, pte_addr
,
494 sizeof(target_ulong
),
495 MMU_DATA_LOAD
, PRV_S
);
496 if (pmp_ret
!= TRANSLATE_SUCCESS
) {
497 return TRANSLATE_PMP_FAIL
;
501 if (riscv_cpu_is_32bit(env
)) {
502 pte
= address_space_ldl(cs
->as
, pte_addr
, attrs
, &res
);
504 pte
= address_space_ldq(cs
->as
, pte_addr
, attrs
, &res
);
507 if (res
!= MEMTX_OK
) {
508 return TRANSLATE_FAIL
;
511 hwaddr ppn
= pte
>> PTE_PPN_SHIFT
;
513 if (!(pte
& PTE_V
)) {
515 return TRANSLATE_FAIL
;
516 } else if (!(pte
& (PTE_R
| PTE_W
| PTE_X
))) {
517 /* Inner PTE, continue walking */
518 base
= ppn
<< PGSHIFT
;
519 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == PTE_W
) {
520 /* Reserved leaf PTE flags: PTE_W */
521 return TRANSLATE_FAIL
;
522 } else if ((pte
& (PTE_R
| PTE_W
| PTE_X
)) == (PTE_W
| PTE_X
)) {
523 /* Reserved leaf PTE flags: PTE_W + PTE_X */
524 return TRANSLATE_FAIL
;
525 } else if ((pte
& PTE_U
) && ((mode
!= PRV_U
) &&
526 (!sum
|| access_type
== MMU_INST_FETCH
))) {
527 /* User PTE flags when not U mode and mstatus.SUM is not set,
528 or the access type is an instruction fetch */
529 return TRANSLATE_FAIL
;
530 } else if (!(pte
& PTE_U
) && (mode
!= PRV_S
)) {
531 /* Supervisor PTE flags when not S mode */
532 return TRANSLATE_FAIL
;
533 } else if (ppn
& ((1ULL << ptshift
) - 1)) {
535 return TRANSLATE_FAIL
;
536 } else if (access_type
== MMU_DATA_LOAD
&& !((pte
& PTE_R
) ||
537 ((pte
& PTE_X
) && mxr
))) {
538 /* Read access check failed */
539 return TRANSLATE_FAIL
;
540 } else if (access_type
== MMU_DATA_STORE
&& !(pte
& PTE_W
)) {
541 /* Write access check failed */
542 return TRANSLATE_FAIL
;
543 } else if (access_type
== MMU_INST_FETCH
&& !(pte
& PTE_X
)) {
544 /* Fetch access check failed */
545 return TRANSLATE_FAIL
;
547 /* if necessary, set accessed and dirty bits. */
548 target_ulong updated_pte
= pte
| PTE_A
|
549 (access_type
== MMU_DATA_STORE
? PTE_D
: 0);
551 /* Page table updates need to be atomic with MTTCG enabled */
552 if (updated_pte
!= pte
) {
554 * - if accessed or dirty bits need updating, and the PTE is
555 * in RAM, then we do so atomically with a compare and swap.
556 * - if the PTE is in IO space or ROM, then it can't be updated
557 * and we return TRANSLATE_FAIL.
558 * - if the PTE changed by the time we went to update it, then
559 * it is no longer valid and we must re-walk the page table.
562 hwaddr l
= sizeof(target_ulong
), addr1
;
563 mr
= address_space_translate(cs
->as
, pte_addr
,
564 &addr1
, &l
, false, MEMTXATTRS_UNSPECIFIED
);
565 if (memory_region_is_ram(mr
)) {
566 target_ulong
*pte_pa
=
567 qemu_map_ram_ptr(mr
->ram_block
, addr1
);
568 #if TCG_OVERSIZED_GUEST
569 /* MTTCG is not enabled on oversized TCG guests so
570 * page table updates do not need to be atomic */
571 *pte_pa
= pte
= updated_pte
;
573 target_ulong old_pte
=
574 qatomic_cmpxchg(pte_pa
, pte
, updated_pte
);
575 if (old_pte
!= pte
) {
582 /* misconfigured PTE in ROM (AD bits are not preset) or
583 * PTE is in IO space and can't be updated atomically */
584 return TRANSLATE_FAIL
;
588 /* for superpage mappings, make a fake leaf PTE for the TLB's
590 target_ulong vpn
= addr
>> PGSHIFT
;
591 *physical
= ((ppn
| (vpn
& ((1L << ptshift
) - 1))) << PGSHIFT
) |
592 (addr
& ~TARGET_PAGE_MASK
);
594 /* set permissions on the TLB entry */
595 if ((pte
& PTE_R
) || ((pte
& PTE_X
) && mxr
)) {
601 /* add write permission on stores or if the page is already dirty,
602 so that we TLB miss on later writes to update the dirty bit */
604 (access_type
== MMU_DATA_STORE
|| (pte
& PTE_D
))) {
607 return TRANSLATE_SUCCESS
;
610 return TRANSLATE_FAIL
;
613 static void raise_mmu_exception(CPURISCVState
*env
, target_ulong address
,
614 MMUAccessType access_type
, bool pmp_violation
,
615 bool first_stage
, bool two_stage
)
617 CPUState
*cs
= env_cpu(env
);
618 int page_fault_exceptions
;
620 page_fault_exceptions
=
621 get_field(env
->satp
, SATP_MODE
) != VM_1_10_MBARE
&&
624 page_fault_exceptions
=
625 get_field(env
->hgatp
, HGATP_MODE
) != VM_1_10_MBARE
&&
628 switch (access_type
) {
630 if (riscv_cpu_virt_enabled(env
) && !first_stage
) {
631 cs
->exception_index
= RISCV_EXCP_INST_GUEST_PAGE_FAULT
;
633 cs
->exception_index
= page_fault_exceptions
?
634 RISCV_EXCP_INST_PAGE_FAULT
: RISCV_EXCP_INST_ACCESS_FAULT
;
638 if (two_stage
&& !first_stage
) {
639 cs
->exception_index
= RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
;
641 cs
->exception_index
= page_fault_exceptions
?
642 RISCV_EXCP_LOAD_PAGE_FAULT
: RISCV_EXCP_LOAD_ACCESS_FAULT
;
646 if (two_stage
&& !first_stage
) {
647 cs
->exception_index
= RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
;
649 cs
->exception_index
= page_fault_exceptions
?
650 RISCV_EXCP_STORE_PAGE_FAULT
: RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
654 g_assert_not_reached();
656 env
->badaddr
= address
;
657 env
->two_stage_lookup
= two_stage
;
660 hwaddr
riscv_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
662 RISCVCPU
*cpu
= RISCV_CPU(cs
);
663 CPURISCVState
*env
= &cpu
->env
;
666 int mmu_idx
= cpu_mmu_index(&cpu
->env
, false);
668 if (get_physical_address(env
, &phys_addr
, &prot
, addr
, NULL
, 0, mmu_idx
,
669 true, riscv_cpu_virt_enabled(env
))) {
673 if (riscv_cpu_virt_enabled(env
)) {
674 if (get_physical_address(env
, &phys_addr
, &prot
, phys_addr
, NULL
,
675 0, mmu_idx
, false, true)) {
680 return phys_addr
& TARGET_PAGE_MASK
;
683 void riscv_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
684 vaddr addr
, unsigned size
,
685 MMUAccessType access_type
,
686 int mmu_idx
, MemTxAttrs attrs
,
687 MemTxResult response
, uintptr_t retaddr
)
689 RISCVCPU
*cpu
= RISCV_CPU(cs
);
690 CPURISCVState
*env
= &cpu
->env
;
692 if (access_type
== MMU_DATA_STORE
) {
693 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ACCESS_FAULT
;
695 cs
->exception_index
= RISCV_EXCP_LOAD_ACCESS_FAULT
;
699 env
->two_stage_lookup
= riscv_cpu_virt_enabled(env
) ||
700 riscv_cpu_two_stage_lookup(mmu_idx
);
701 riscv_raise_exception(&cpu
->env
, cs
->exception_index
, retaddr
);
704 void riscv_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
705 MMUAccessType access_type
, int mmu_idx
,
708 RISCVCPU
*cpu
= RISCV_CPU(cs
);
709 CPURISCVState
*env
= &cpu
->env
;
710 switch (access_type
) {
712 cs
->exception_index
= RISCV_EXCP_INST_ADDR_MIS
;
715 cs
->exception_index
= RISCV_EXCP_LOAD_ADDR_MIS
;
718 cs
->exception_index
= RISCV_EXCP_STORE_AMO_ADDR_MIS
;
721 g_assert_not_reached();
724 env
->two_stage_lookup
= riscv_cpu_virt_enabled(env
) ||
725 riscv_cpu_two_stage_lookup(mmu_idx
);
726 riscv_raise_exception(env
, cs
->exception_index
, retaddr
);
728 #endif /* !CONFIG_USER_ONLY */
730 bool riscv_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
731 MMUAccessType access_type
, int mmu_idx
,
732 bool probe
, uintptr_t retaddr
)
734 RISCVCPU
*cpu
= RISCV_CPU(cs
);
735 CPURISCVState
*env
= &cpu
->env
;
736 #ifndef CONFIG_USER_ONLY
739 int prot
, prot2
, prot_pmp
;
740 bool pmp_violation
= false;
741 bool first_stage_error
= true;
742 bool two_stage_lookup
= false;
743 int ret
= TRANSLATE_FAIL
;
745 /* default TLB page size */
746 target_ulong tlb_size
= TARGET_PAGE_SIZE
;
748 env
->guest_phys_fault_addr
= 0;
750 qemu_log_mask(CPU_LOG_MMU
, "%s ad %" VADDR_PRIx
" rw %d mmu_idx %d\n",
751 __func__
, address
, access_type
, mmu_idx
);
753 /* MPRV does not affect the virtual-machine load/store
754 instructions, HLV, HLVX, and HSV. */
755 if (riscv_cpu_two_stage_lookup(mmu_idx
)) {
756 mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
757 } else if (mode
== PRV_M
&& access_type
!= MMU_INST_FETCH
&&
758 get_field(env
->mstatus
, MSTATUS_MPRV
)) {
759 mode
= get_field(env
->mstatus
, MSTATUS_MPP
);
760 if (riscv_has_ext(env
, RVH
) && get_field(env
->mstatus
, MSTATUS_MPV
)) {
761 two_stage_lookup
= true;
765 if (riscv_cpu_virt_enabled(env
) ||
766 ((riscv_cpu_two_stage_lookup(mmu_idx
) || two_stage_lookup
) &&
767 access_type
!= MMU_INST_FETCH
)) {
768 /* Two stage lookup */
769 ret
= get_physical_address(env
, &pa
, &prot
, address
,
770 &env
->guest_phys_fault_addr
, access_type
,
771 mmu_idx
, true, true);
774 * A G-stage exception may be triggered during two state lookup.
775 * And the env->guest_phys_fault_addr has already been set in
776 * get_physical_address().
778 if (ret
== TRANSLATE_G_STAGE_FAIL
) {
779 first_stage_error
= false;
780 access_type
= MMU_DATA_LOAD
;
783 qemu_log_mask(CPU_LOG_MMU
,
784 "%s 1st-stage address=%" VADDR_PRIx
" ret %d physical "
785 TARGET_FMT_plx
" prot %d\n",
786 __func__
, address
, ret
, pa
, prot
);
788 if (ret
== TRANSLATE_SUCCESS
) {
789 /* Second stage lookup */
792 ret
= get_physical_address(env
, &pa
, &prot2
, im_address
, NULL
,
793 access_type
, mmu_idx
, false, true);
795 qemu_log_mask(CPU_LOG_MMU
,
796 "%s 2nd-stage address=%" VADDR_PRIx
" ret %d physical "
797 TARGET_FMT_plx
" prot %d\n",
798 __func__
, im_address
, ret
, pa
, prot2
);
802 if (ret
== TRANSLATE_SUCCESS
) {
803 ret
= get_physical_address_pmp(env
, &prot_pmp
, &tlb_size
, pa
,
804 size
, access_type
, mode
);
806 qemu_log_mask(CPU_LOG_MMU
,
807 "%s PMP address=" TARGET_FMT_plx
" ret %d prot"
808 " %d tlb_size " TARGET_FMT_lu
"\n",
809 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
814 if (ret
!= TRANSLATE_SUCCESS
) {
816 * Guest physical address translation failed, this is a HS
819 first_stage_error
= false;
820 env
->guest_phys_fault_addr
= (im_address
|
822 (TARGET_PAGE_SIZE
- 1))) >> 2;
826 /* Single stage lookup */
827 ret
= get_physical_address(env
, &pa
, &prot
, address
, NULL
,
828 access_type
, mmu_idx
, true, false);
830 qemu_log_mask(CPU_LOG_MMU
,
831 "%s address=%" VADDR_PRIx
" ret %d physical "
832 TARGET_FMT_plx
" prot %d\n",
833 __func__
, address
, ret
, pa
, prot
);
835 if (ret
== TRANSLATE_SUCCESS
) {
836 ret
= get_physical_address_pmp(env
, &prot_pmp
, &tlb_size
, pa
,
837 size
, access_type
, mode
);
839 qemu_log_mask(CPU_LOG_MMU
,
840 "%s PMP address=" TARGET_FMT_plx
" ret %d prot"
841 " %d tlb_size " TARGET_FMT_lu
"\n",
842 __func__
, pa
, ret
, prot_pmp
, tlb_size
);
848 if (ret
== TRANSLATE_PMP_FAIL
) {
849 pmp_violation
= true;
852 if (ret
== TRANSLATE_SUCCESS
) {
853 tlb_set_page(cs
, address
& ~(tlb_size
- 1), pa
& ~(tlb_size
- 1),
854 prot
, mmu_idx
, tlb_size
);
859 raise_mmu_exception(env
, address
, access_type
, pmp_violation
,
861 riscv_cpu_virt_enabled(env
) ||
862 riscv_cpu_two_stage_lookup(mmu_idx
));
863 riscv_raise_exception(env
, cs
->exception_index
, retaddr
);
869 switch (access_type
) {
871 cs
->exception_index
= RISCV_EXCP_INST_PAGE_FAULT
;
874 cs
->exception_index
= RISCV_EXCP_LOAD_PAGE_FAULT
;
877 cs
->exception_index
= RISCV_EXCP_STORE_PAGE_FAULT
;
880 g_assert_not_reached();
882 env
->badaddr
= address
;
883 cpu_loop_exit_restore(cs
, retaddr
);
890 * Adapted from Spike's processor_t::take_trap.
893 void riscv_cpu_do_interrupt(CPUState
*cs
)
895 #if !defined(CONFIG_USER_ONLY)
897 RISCVCPU
*cpu
= RISCV_CPU(cs
);
898 CPURISCVState
*env
= &cpu
->env
;
899 bool force_hs_execp
= riscv_cpu_force_hs_excep_enabled(env
);
902 /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
903 * so we mask off the MSB and separate into trap type and cause.
905 bool async
= !!(cs
->exception_index
& RISCV_EXCP_INT_FLAG
);
906 target_ulong cause
= cs
->exception_index
& RISCV_EXCP_INT_MASK
;
907 target_ulong deleg
= async
? env
->mideleg
: env
->medeleg
;
908 bool write_tval
= false;
909 target_ulong tval
= 0;
910 target_ulong htval
= 0;
911 target_ulong mtval2
= 0;
913 if (cause
== RISCV_EXCP_SEMIHOST
) {
914 if (env
->priv
>= PRV_S
) {
915 env
->gpr
[xA0
] = do_common_semihosting(cs
);
919 cause
= RISCV_EXCP_BREAKPOINT
;
923 /* set tval to badaddr for traps with address information */
925 case RISCV_EXCP_INST_GUEST_PAGE_FAULT
:
926 case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
:
927 case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
:
928 force_hs_execp
= true;
930 case RISCV_EXCP_INST_ADDR_MIS
:
931 case RISCV_EXCP_INST_ACCESS_FAULT
:
932 case RISCV_EXCP_LOAD_ADDR_MIS
:
933 case RISCV_EXCP_STORE_AMO_ADDR_MIS
:
934 case RISCV_EXCP_LOAD_ACCESS_FAULT
:
935 case RISCV_EXCP_STORE_AMO_ACCESS_FAULT
:
936 case RISCV_EXCP_INST_PAGE_FAULT
:
937 case RISCV_EXCP_LOAD_PAGE_FAULT
:
938 case RISCV_EXCP_STORE_PAGE_FAULT
:
945 /* ecall is dispatched as one cause so translate based on mode */
946 if (cause
== RISCV_EXCP_U_ECALL
) {
947 assert(env
->priv
<= 3);
949 if (env
->priv
== PRV_M
) {
950 cause
= RISCV_EXCP_M_ECALL
;
951 } else if (env
->priv
== PRV_S
&& riscv_cpu_virt_enabled(env
)) {
952 cause
= RISCV_EXCP_VS_ECALL
;
953 } else if (env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) {
954 cause
= RISCV_EXCP_S_ECALL
;
955 } else if (env
->priv
== PRV_U
) {
956 cause
= RISCV_EXCP_U_ECALL
;
961 trace_riscv_trap(env
->mhartid
, async
, cause
, env
->pc
, tval
,
962 riscv_cpu_get_trap_name(cause
, async
));
964 qemu_log_mask(CPU_LOG_INT
,
965 "%s: hart:"TARGET_FMT_ld
", async:%d, cause:"TARGET_FMT_lx
", "
966 "epc:0x"TARGET_FMT_lx
", tval:0x"TARGET_FMT_lx
", desc=%s\n",
967 __func__
, env
->mhartid
, async
, cause
, env
->pc
, tval
,
968 riscv_cpu_get_trap_name(cause
, async
));
970 if (env
->priv
<= PRV_S
&&
971 cause
< TARGET_LONG_BITS
&& ((deleg
>> cause
) & 1)) {
972 /* handle the trap in S-mode */
973 if (riscv_has_ext(env
, RVH
)) {
974 target_ulong hdeleg
= async
? env
->hideleg
: env
->hedeleg
;
976 if (env
->two_stage_lookup
&& write_tval
) {
978 * If we are writing a guest virtual address to stval, set
979 * this to 1. If we are trapping to VS we will set this to 0
982 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, 1);
984 /* For other HS-mode traps, we set this to 0. */
985 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, 0);
988 if (riscv_cpu_virt_enabled(env
) && ((hdeleg
>> cause
) & 1) &&
990 /* Trap to VS mode */
992 * See if we need to adjust cause. Yes if its VS mode interrupt
993 * no if hypervisor has delegated one of hs mode's interrupt
995 if (cause
== IRQ_VS_TIMER
|| cause
== IRQ_VS_SOFT
||
996 cause
== IRQ_VS_EXT
) {
999 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_GVA
, 0);
1000 } else if (riscv_cpu_virt_enabled(env
)) {
1001 /* Trap into HS mode, from virt */
1002 riscv_cpu_swap_hypervisor_regs(env
);
1003 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPVP
,
1005 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
,
1006 riscv_cpu_virt_enabled(env
));
1008 htval
= env
->guest_phys_fault_addr
;
1010 riscv_cpu_set_virt_enabled(env
, 0);
1011 riscv_cpu_set_force_hs_excep(env
, 0);
1013 /* Trap into HS mode */
1014 env
->hstatus
= set_field(env
->hstatus
, HSTATUS_SPV
, false);
1015 htval
= env
->guest_phys_fault_addr
;
1020 s
= set_field(s
, MSTATUS_SPIE
, get_field(s
, MSTATUS_SIE
));
1021 s
= set_field(s
, MSTATUS_SPP
, env
->priv
);
1022 s
= set_field(s
, MSTATUS_SIE
, 0);
1024 env
->scause
= cause
| ((target_ulong
)async
<< (TARGET_LONG_BITS
- 1));
1025 env
->sepc
= env
->pc
;
1026 env
->sbadaddr
= tval
;
1028 env
->pc
= (env
->stvec
>> 2 << 2) +
1029 ((async
&& (env
->stvec
& 3) == 1) ? cause
* 4 : 0);
1030 riscv_cpu_set_mode(env
, PRV_S
);
1032 /* handle the trap in M-mode */
1033 if (riscv_has_ext(env
, RVH
)) {
1034 if (riscv_cpu_virt_enabled(env
)) {
1035 riscv_cpu_swap_hypervisor_regs(env
);
1037 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_MPV
,
1038 riscv_cpu_virt_enabled(env
));
1039 if (riscv_cpu_virt_enabled(env
) && tval
) {
1040 env
->mstatus
= set_field(env
->mstatus
, MSTATUS_GVA
, 1);
1043 mtval2
= env
->guest_phys_fault_addr
;
1045 /* Trapping to M mode, virt is disabled */
1046 riscv_cpu_set_virt_enabled(env
, 0);
1047 riscv_cpu_set_force_hs_excep(env
, 0);
1051 s
= set_field(s
, MSTATUS_MPIE
, get_field(s
, MSTATUS_MIE
));
1052 s
= set_field(s
, MSTATUS_MPP
, env
->priv
);
1053 s
= set_field(s
, MSTATUS_MIE
, 0);
1055 env
->mcause
= cause
| ~(((target_ulong
)-1) >> async
);
1056 env
->mepc
= env
->pc
;
1057 env
->mbadaddr
= tval
;
1058 env
->mtval2
= mtval2
;
1059 env
->pc
= (env
->mtvec
>> 2 << 2) +
1060 ((async
&& (env
->mtvec
& 3) == 1) ? cause
* 4 : 0);
1061 riscv_cpu_set_mode(env
, PRV_M
);
1064 /* NOTE: it is not necessary to yield load reservations here. It is only
1065 * necessary for an SC from "another hart" to cause a load reservation
1066 * to be yielded. Refer to the memory consistency model section of the
1067 * RISC-V ISA Specification.
1070 env
->two_stage_lookup
= false;
1072 cs
->exception_index
= EXCP_NONE
; /* mark handled to qemu */