2 * RISC-V Emulation Helpers for QEMU.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
6 * Copyright (c) 2022 VRULL GmbH
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2 or later, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "internals.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "exec/helper-proto.h"
28 /* Exceptions processing helpers */
29 G_NORETURN
void riscv_raise_exception(CPURISCVState
*env
,
30 uint32_t exception
, uintptr_t pc
)
32 CPUState
*cs
= env_cpu(env
);
33 cs
->exception_index
= exception
;
34 cpu_loop_exit_restore(cs
, pc
);
37 void helper_raise_exception(CPURISCVState
*env
, uint32_t exception
)
39 riscv_raise_exception(env
, exception
, 0);
42 target_ulong
helper_csrr(CPURISCVState
*env
, int csr
)
45 * The seed CSR must be accessed with a read-write instruction. A
46 * read-only instruction such as CSRRS/CSRRC with rs1=x0 or CSRRSI/
47 * CSRRCI with uimm=0 will raise an illegal instruction exception.
49 if (csr
== CSR_SEED
) {
50 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, GETPC());
54 RISCVException ret
= riscv_csrrw(env
, csr
, &val
, 0, 0);
56 if (ret
!= RISCV_EXCP_NONE
) {
57 riscv_raise_exception(env
, ret
, GETPC());
62 void helper_csrw(CPURISCVState
*env
, int csr
, target_ulong src
)
64 target_ulong mask
= env
->xl
== MXL_RV32
? UINT32_MAX
: (target_ulong
)-1;
65 RISCVException ret
= riscv_csrrw(env
, csr
, NULL
, src
, mask
);
67 if (ret
!= RISCV_EXCP_NONE
) {
68 riscv_raise_exception(env
, ret
, GETPC());
72 target_ulong
helper_csrrw(CPURISCVState
*env
, int csr
,
73 target_ulong src
, target_ulong write_mask
)
76 RISCVException ret
= riscv_csrrw(env
, csr
, &val
, src
, write_mask
);
78 if (ret
!= RISCV_EXCP_NONE
) {
79 riscv_raise_exception(env
, ret
, GETPC());
84 target_ulong
helper_csrr_i128(CPURISCVState
*env
, int csr
)
86 Int128 rv
= int128_zero();
87 RISCVException ret
= riscv_csrrw_i128(env
, csr
, &rv
,
91 if (ret
!= RISCV_EXCP_NONE
) {
92 riscv_raise_exception(env
, ret
, GETPC());
95 env
->retxh
= int128_gethi(rv
);
96 return int128_getlo(rv
);
99 void helper_csrw_i128(CPURISCVState
*env
, int csr
,
100 target_ulong srcl
, target_ulong srch
)
102 RISCVException ret
= riscv_csrrw_i128(env
, csr
, NULL
,
103 int128_make128(srcl
, srch
),
106 if (ret
!= RISCV_EXCP_NONE
) {
107 riscv_raise_exception(env
, ret
, GETPC());
111 target_ulong
helper_csrrw_i128(CPURISCVState
*env
, int csr
,
112 target_ulong srcl
, target_ulong srch
,
113 target_ulong maskl
, target_ulong maskh
)
115 Int128 rv
= int128_zero();
116 RISCVException ret
= riscv_csrrw_i128(env
, csr
, &rv
,
117 int128_make128(srcl
, srch
),
118 int128_make128(maskl
, maskh
));
120 if (ret
!= RISCV_EXCP_NONE
) {
121 riscv_raise_exception(env
, ret
, GETPC());
124 env
->retxh
= int128_gethi(rv
);
125 return int128_getlo(rv
);
132 * Raise virtual exceptions and illegal instruction exceptions for
133 * Zicbo[mz] instructions based on the settings of [mhs]envcfg as
134 * specified in section 2.5.1 of the CMO specification.
136 static void check_zicbo_envcfg(CPURISCVState
*env
, target_ulong envbits
,
139 #ifndef CONFIG_USER_ONLY
140 if ((env
->priv
< PRV_M
) && !get_field(env
->menvcfg
, envbits
)) {
141 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, ra
);
144 if (env
->virt_enabled
&&
145 (((env
->priv
<= PRV_S
) && !get_field(env
->henvcfg
, envbits
)) ||
146 ((env
->priv
< PRV_S
) && !get_field(env
->senvcfg
, envbits
)))) {
147 riscv_raise_exception(env
, RISCV_EXCP_VIRT_INSTRUCTION_FAULT
, ra
);
150 if ((env
->priv
< PRV_S
) && !get_field(env
->senvcfg
, envbits
)) {
151 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, ra
);
156 void helper_cbo_zero(CPURISCVState
*env
, target_ulong address
)
158 RISCVCPU
*cpu
= env_archcpu(env
);
159 uint16_t cbozlen
= cpu
->cfg
.cboz_blocksize
;
160 int mmu_idx
= riscv_env_mmu_index(env
, false);
161 uintptr_t ra
= GETPC();
164 check_zicbo_envcfg(env
, MENVCFG_CBZE
, ra
);
166 /* Mask off low-bits to align-down to the cache-block. */
167 address
&= ~(cbozlen
- 1);
170 * cbo.zero requires MMU_DATA_STORE access. Do a probe_write()
171 * to raise any exceptions, including PMP.
173 mem
= probe_write(env
, address
, cbozlen
, mmu_idx
, ra
);
176 memset(mem
, 0, cbozlen
);
179 * This means that we're dealing with an I/O page. Section 4.2
180 * of cmobase v1.0.1 says:
182 * "Cache-block zero instructions store zeros independently
183 * of whether data from the underlying memory locations are
186 * Write zeros in address + cbozlen regardless of not being
189 for (int i
= 0; i
< cbozlen
; i
++) {
190 cpu_stb_mmuidx_ra(env
, address
+ i
, 0, mmu_idx
, ra
);
196 * check_zicbom_access
198 * Check access permissions (LOAD, STORE or FETCH as specified in
199 * section 2.5.2 of the CMO specification) for Zicbom, raising
200 * either store page-fault (non-virtualized) or store guest-page
201 * fault (virtualized).
203 static void check_zicbom_access(CPURISCVState
*env
,
204 target_ulong address
,
207 RISCVCPU
*cpu
= env_archcpu(env
);
208 int mmu_idx
= riscv_env_mmu_index(env
, false);
209 uint16_t cbomlen
= cpu
->cfg
.cbom_blocksize
;
213 /* Mask off low-bits to align-down to the cache-block. */
214 address
&= ~(cbomlen
- 1);
217 * Section 2.5.2 of cmobase v1.0.1:
219 * "A cache-block management instruction is permitted to
220 * access the specified cache block whenever a load instruction
221 * or store instruction is permitted to access the corresponding
222 * physical addresses. If neither a load instruction nor store
223 * instruction is permitted to access the physical addresses,
224 * but an instruction fetch is permitted to access the physical
225 * addresses, whether a cache-block management instruction is
226 * permitted to access the cache block is UNSPECIFIED."
228 ret
= probe_access_flags(env
, address
, cbomlen
, MMU_DATA_LOAD
,
229 mmu_idx
, true, &phost
, ra
);
230 if (ret
!= TLB_INVALID_MASK
) {
231 /* Success: readable */
236 * Since not readable, must be writable. On failure, store
237 * fault/store guest amo fault will be raised by
238 * riscv_cpu_tlb_fill(). PMP exceptions will be caught
241 probe_write(env
, address
, cbomlen
, mmu_idx
, ra
);
244 void helper_cbo_clean_flush(CPURISCVState
*env
, target_ulong address
)
246 uintptr_t ra
= GETPC();
247 check_zicbo_envcfg(env
, MENVCFG_CBCFE
, ra
);
248 check_zicbom_access(env
, address
, ra
);
250 /* We don't emulate the cache-hierarchy, so we're done. */
253 void helper_cbo_inval(CPURISCVState
*env
, target_ulong address
)
255 uintptr_t ra
= GETPC();
256 check_zicbo_envcfg(env
, MENVCFG_CBIE
, ra
);
257 check_zicbom_access(env
, address
, ra
);
259 /* We don't emulate the cache-hierarchy, so we're done. */
262 #ifndef CONFIG_USER_ONLY
264 target_ulong
helper_sret(CPURISCVState
*env
)
267 target_ulong prev_priv
, prev_virt
;
269 if (!(env
->priv
>= PRV_S
)) {
270 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, GETPC());
273 target_ulong retpc
= env
->sepc
;
274 if (!riscv_has_ext(env
, RVC
) && (retpc
& 0x3)) {
275 riscv_raise_exception(env
, RISCV_EXCP_INST_ADDR_MIS
, GETPC());
278 if (get_field(env
->mstatus
, MSTATUS_TSR
) && !(env
->priv
>= PRV_M
)) {
279 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, GETPC());
282 if (env
->virt_enabled
&& get_field(env
->hstatus
, HSTATUS_VTSR
)) {
283 riscv_raise_exception(env
, RISCV_EXCP_VIRT_INSTRUCTION_FAULT
, GETPC());
286 mstatus
= env
->mstatus
;
287 prev_priv
= get_field(mstatus
, MSTATUS_SPP
);
288 mstatus
= set_field(mstatus
, MSTATUS_SIE
,
289 get_field(mstatus
, MSTATUS_SPIE
));
290 mstatus
= set_field(mstatus
, MSTATUS_SPIE
, 1);
291 mstatus
= set_field(mstatus
, MSTATUS_SPP
, PRV_U
);
292 if (env
->priv_ver
>= PRIV_VERSION_1_12_0
) {
293 mstatus
= set_field(mstatus
, MSTATUS_MPRV
, 0);
295 env
->mstatus
= mstatus
;
297 if (riscv_has_ext(env
, RVH
) && !env
->virt_enabled
) {
298 /* We support Hypervisor extensions and virtulisation is disabled */
299 target_ulong hstatus
= env
->hstatus
;
301 prev_virt
= get_field(hstatus
, HSTATUS_SPV
);
303 hstatus
= set_field(hstatus
, HSTATUS_SPV
, 0);
305 env
->hstatus
= hstatus
;
308 riscv_cpu_swap_hypervisor_regs(env
);
311 riscv_cpu_set_virt_enabled(env
, prev_virt
);
314 riscv_cpu_set_mode(env
, prev_priv
);
319 target_ulong
helper_mret(CPURISCVState
*env
)
321 if (!(env
->priv
>= PRV_M
)) {
322 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, GETPC());
325 target_ulong retpc
= env
->mepc
;
326 if (!riscv_has_ext(env
, RVC
) && (retpc
& 0x3)) {
327 riscv_raise_exception(env
, RISCV_EXCP_INST_ADDR_MIS
, GETPC());
330 uint64_t mstatus
= env
->mstatus
;
331 target_ulong prev_priv
= get_field(mstatus
, MSTATUS_MPP
);
333 if (riscv_cpu_cfg(env
)->pmp
&&
334 !pmp_get_num_rules(env
) && (prev_priv
!= PRV_M
)) {
335 riscv_raise_exception(env
, RISCV_EXCP_INST_ACCESS_FAULT
, GETPC());
338 target_ulong prev_virt
= get_field(env
->mstatus
, MSTATUS_MPV
) &&
339 (prev_priv
!= PRV_M
);
340 mstatus
= set_field(mstatus
, MSTATUS_MIE
,
341 get_field(mstatus
, MSTATUS_MPIE
));
342 mstatus
= set_field(mstatus
, MSTATUS_MPIE
, 1);
343 mstatus
= set_field(mstatus
, MSTATUS_MPP
,
344 riscv_has_ext(env
, RVU
) ? PRV_U
: PRV_M
);
345 mstatus
= set_field(mstatus
, MSTATUS_MPV
, 0);
346 if ((env
->priv_ver
>= PRIV_VERSION_1_12_0
) && (prev_priv
!= PRV_M
)) {
347 mstatus
= set_field(mstatus
, MSTATUS_MPRV
, 0);
349 env
->mstatus
= mstatus
;
350 riscv_cpu_set_mode(env
, prev_priv
);
352 if (riscv_has_ext(env
, RVH
)) {
354 riscv_cpu_swap_hypervisor_regs(env
);
357 riscv_cpu_set_virt_enabled(env
, prev_virt
);
363 void helper_wfi(CPURISCVState
*env
)
365 CPUState
*cs
= env_cpu(env
);
366 bool rvs
= riscv_has_ext(env
, RVS
);
367 bool prv_u
= env
->priv
== PRV_U
;
368 bool prv_s
= env
->priv
== PRV_S
;
370 if (((prv_s
|| (!rvs
&& prv_u
)) && get_field(env
->mstatus
, MSTATUS_TW
)) ||
371 (rvs
&& prv_u
&& !env
->virt_enabled
)) {
372 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, GETPC());
373 } else if (env
->virt_enabled
&&
374 (prv_u
|| (prv_s
&& get_field(env
->hstatus
, HSTATUS_VTW
)))) {
375 riscv_raise_exception(env
, RISCV_EXCP_VIRT_INSTRUCTION_FAULT
, GETPC());
378 cs
->exception_index
= EXCP_HLT
;
383 void helper_tlb_flush(CPURISCVState
*env
)
385 CPUState
*cs
= env_cpu(env
);
386 if (!env
->virt_enabled
&&
387 (env
->priv
== PRV_U
||
388 (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_TVM
)))) {
389 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, GETPC());
390 } else if (env
->virt_enabled
&&
391 (env
->priv
== PRV_U
|| get_field(env
->hstatus
, HSTATUS_VTVM
))) {
392 riscv_raise_exception(env
, RISCV_EXCP_VIRT_INSTRUCTION_FAULT
, GETPC());
398 void helper_tlb_flush_all(CPURISCVState
*env
)
400 CPUState
*cs
= env_cpu(env
);
401 tlb_flush_all_cpus_synced(cs
);
404 void helper_hyp_tlb_flush(CPURISCVState
*env
)
406 CPUState
*cs
= env_cpu(env
);
408 if (env
->virt_enabled
) {
409 riscv_raise_exception(env
, RISCV_EXCP_VIRT_INSTRUCTION_FAULT
, GETPC());
412 if (env
->priv
== PRV_M
||
413 (env
->priv
== PRV_S
&& !env
->virt_enabled
)) {
418 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, GETPC());
421 void helper_hyp_gvma_tlb_flush(CPURISCVState
*env
)
423 if (env
->priv
== PRV_S
&& !env
->virt_enabled
&&
424 get_field(env
->mstatus
, MSTATUS_TVM
)) {
425 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, GETPC());
428 helper_hyp_tlb_flush(env
);
431 static int check_access_hlsv(CPURISCVState
*env
, bool x
, uintptr_t ra
)
433 if (env
->priv
== PRV_M
) {
435 } else if (env
->virt_enabled
) {
436 riscv_raise_exception(env
, RISCV_EXCP_VIRT_INSTRUCTION_FAULT
, ra
);
437 } else if (env
->priv
== PRV_U
&& !get_field(env
->hstatus
, HSTATUS_HU
)) {
438 riscv_raise_exception(env
, RISCV_EXCP_ILLEGAL_INST
, ra
);
441 int mode
= get_field(env
->hstatus
, HSTATUS_SPVP
);
442 if (!x
&& mode
== PRV_S
&& get_field(env
->vsstatus
, MSTATUS_SUM
)) {
445 return mode
| MMU_2STAGE_BIT
;
448 target_ulong
helper_hyp_hlv_bu(CPURISCVState
*env
, target_ulong addr
)
450 uintptr_t ra
= GETPC();
451 int mmu_idx
= check_access_hlsv(env
, false, ra
);
452 MemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
454 return cpu_ldb_mmu(env
, addr
, oi
, ra
);
457 target_ulong
helper_hyp_hlv_hu(CPURISCVState
*env
, target_ulong addr
)
459 uintptr_t ra
= GETPC();
460 int mmu_idx
= check_access_hlsv(env
, false, ra
);
461 MemOpIdx oi
= make_memop_idx(MO_TEUW
, mmu_idx
);
463 return cpu_ldw_mmu(env
, addr
, oi
, ra
);
466 target_ulong
helper_hyp_hlv_wu(CPURISCVState
*env
, target_ulong addr
)
468 uintptr_t ra
= GETPC();
469 int mmu_idx
= check_access_hlsv(env
, false, ra
);
470 MemOpIdx oi
= make_memop_idx(MO_TEUL
, mmu_idx
);
472 return cpu_ldl_mmu(env
, addr
, oi
, ra
);
475 target_ulong
helper_hyp_hlv_d(CPURISCVState
*env
, target_ulong addr
)
477 uintptr_t ra
= GETPC();
478 int mmu_idx
= check_access_hlsv(env
, false, ra
);
479 MemOpIdx oi
= make_memop_idx(MO_TEUQ
, mmu_idx
);
481 return cpu_ldq_mmu(env
, addr
, oi
, ra
);
484 void helper_hyp_hsv_b(CPURISCVState
*env
, target_ulong addr
, target_ulong val
)
486 uintptr_t ra
= GETPC();
487 int mmu_idx
= check_access_hlsv(env
, false, ra
);
488 MemOpIdx oi
= make_memop_idx(MO_UB
, mmu_idx
);
490 cpu_stb_mmu(env
, addr
, val
, oi
, ra
);
493 void helper_hyp_hsv_h(CPURISCVState
*env
, target_ulong addr
, target_ulong val
)
495 uintptr_t ra
= GETPC();
496 int mmu_idx
= check_access_hlsv(env
, false, ra
);
497 MemOpIdx oi
= make_memop_idx(MO_TEUW
, mmu_idx
);
499 cpu_stw_mmu(env
, addr
, val
, oi
, ra
);
502 void helper_hyp_hsv_w(CPURISCVState
*env
, target_ulong addr
, target_ulong val
)
504 uintptr_t ra
= GETPC();
505 int mmu_idx
= check_access_hlsv(env
, false, ra
);
506 MemOpIdx oi
= make_memop_idx(MO_TEUL
, mmu_idx
);
508 cpu_stl_mmu(env
, addr
, val
, oi
, ra
);
511 void helper_hyp_hsv_d(CPURISCVState
*env
, target_ulong addr
, target_ulong val
)
513 uintptr_t ra
= GETPC();
514 int mmu_idx
= check_access_hlsv(env
, false, ra
);
515 MemOpIdx oi
= make_memop_idx(MO_TEUQ
, mmu_idx
);
517 cpu_stq_mmu(env
, addr
, val
, oi
, ra
);
521 * TODO: These implementations are not quite correct. They perform the
522 * access using execute permission just fine, but the final PMP check
523 * is supposed to have read permission as well. Without replicating
524 * a fair fraction of cputlb.c, fixing this requires adding new mmu_idx
525 * which would imply that exact check in tlb_fill.
527 target_ulong
helper_hyp_hlvx_hu(CPURISCVState
*env
, target_ulong addr
)
529 uintptr_t ra
= GETPC();
530 int mmu_idx
= check_access_hlsv(env
, true, ra
);
531 MemOpIdx oi
= make_memop_idx(MO_TEUW
, mmu_idx
);
533 return cpu_ldw_code_mmu(env
, addr
, oi
, GETPC());
536 target_ulong
helper_hyp_hlvx_wu(CPURISCVState
*env
, target_ulong addr
)
538 uintptr_t ra
= GETPC();
539 int mmu_idx
= check_access_hlsv(env
, true, ra
);
540 MemOpIdx oi
= make_memop_idx(MO_TEUL
, mmu_idx
);
542 return cpu_ldl_code_mmu(env
, addr
, oi
, ra
);
545 #endif /* !CONFIG_USER_ONLY */