2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/timer.h"
24 #include "qemu/main-loop.h"
25 #include "exec/exec-all.h"
26 #include "sysemu/cpu-timers.h"
27 #include "qemu/guest-random.h"
28 #include "qapi/error.h"
30 /* CSR function table public API */
31 void riscv_get_csr_ops(int csrno
, riscv_csr_operations
*ops
)
33 *ops
= csr_ops
[csrno
& (CSR_TABLE_SIZE
- 1)];
36 void riscv_set_csr_ops(int csrno
, riscv_csr_operations
*ops
)
38 csr_ops
[csrno
& (CSR_TABLE_SIZE
- 1)] = *ops
;
42 static RISCVException
fs(CPURISCVState
*env
, int csrno
)
44 #if !defined(CONFIG_USER_ONLY)
45 if (!env
->debugger
&& !riscv_cpu_fp_enabled(env
) &&
46 !RISCV_CPU(env_cpu(env
))->cfg
.ext_zfinx
) {
47 return RISCV_EXCP_ILLEGAL_INST
;
50 return RISCV_EXCP_NONE
;
53 static RISCVException
vs(CPURISCVState
*env
, int csrno
)
55 CPUState
*cs
= env_cpu(env
);
56 RISCVCPU
*cpu
= RISCV_CPU(cs
);
58 if (env
->misa_ext
& RVV
||
59 cpu
->cfg
.ext_zve32f
|| cpu
->cfg
.ext_zve64f
) {
60 #if !defined(CONFIG_USER_ONLY)
61 if (!env
->debugger
&& !riscv_cpu_vector_enabled(env
)) {
62 return RISCV_EXCP_ILLEGAL_INST
;
65 return RISCV_EXCP_NONE
;
67 return RISCV_EXCP_ILLEGAL_INST
;
70 static RISCVException
ctr(CPURISCVState
*env
, int csrno
)
72 #if !defined(CONFIG_USER_ONLY)
73 CPUState
*cs
= env_cpu(env
);
74 RISCVCPU
*cpu
= RISCV_CPU(cs
);
76 if (!cpu
->cfg
.ext_counters
) {
77 /* The Counters extensions is not enabled */
78 return RISCV_EXCP_ILLEGAL_INST
;
81 if (riscv_cpu_virt_enabled(env
)) {
84 if (!get_field(env
->hcounteren
, COUNTEREN_CY
) &&
85 get_field(env
->mcounteren
, COUNTEREN_CY
)) {
86 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
90 if (!get_field(env
->hcounteren
, COUNTEREN_TM
) &&
91 get_field(env
->mcounteren
, COUNTEREN_TM
)) {
92 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
96 if (!get_field(env
->hcounteren
, COUNTEREN_IR
) &&
97 get_field(env
->mcounteren
, COUNTEREN_IR
)) {
98 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
101 case CSR_HPMCOUNTER3
...CSR_HPMCOUNTER31
:
102 if (!get_field(env
->hcounteren
, 1 << (csrno
- CSR_HPMCOUNTER3
)) &&
103 get_field(env
->mcounteren
, 1 << (csrno
- CSR_HPMCOUNTER3
))) {
104 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
108 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
111 if (!get_field(env
->hcounteren
, COUNTEREN_CY
) &&
112 get_field(env
->mcounteren
, COUNTEREN_CY
)) {
113 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
117 if (!get_field(env
->hcounteren
, COUNTEREN_TM
) &&
118 get_field(env
->mcounteren
, COUNTEREN_TM
)) {
119 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
123 if (!get_field(env
->hcounteren
, COUNTEREN_IR
) &&
124 get_field(env
->mcounteren
, COUNTEREN_IR
)) {
125 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
128 case CSR_HPMCOUNTER3H
...CSR_HPMCOUNTER31H
:
129 if (!get_field(env
->hcounteren
, 1 << (csrno
- CSR_HPMCOUNTER3H
)) &&
130 get_field(env
->mcounteren
, 1 << (csrno
- CSR_HPMCOUNTER3H
))) {
131 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
138 return RISCV_EXCP_NONE
;
141 static RISCVException
ctr32(CPURISCVState
*env
, int csrno
)
143 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
144 return RISCV_EXCP_ILLEGAL_INST
;
147 return ctr(env
, csrno
);
150 #if !defined(CONFIG_USER_ONLY)
151 static RISCVException
any(CPURISCVState
*env
, int csrno
)
153 return RISCV_EXCP_NONE
;
156 static RISCVException
any32(CPURISCVState
*env
, int csrno
)
158 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
159 return RISCV_EXCP_ILLEGAL_INST
;
162 return any(env
, csrno
);
166 static int aia_any(CPURISCVState
*env
, int csrno
)
168 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
169 return RISCV_EXCP_ILLEGAL_INST
;
172 return any(env
, csrno
);
175 static int aia_any32(CPURISCVState
*env
, int csrno
)
177 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
178 return RISCV_EXCP_ILLEGAL_INST
;
181 return any32(env
, csrno
);
184 static RISCVException
smode(CPURISCVState
*env
, int csrno
)
186 if (riscv_has_ext(env
, RVS
)) {
187 return RISCV_EXCP_NONE
;
190 return RISCV_EXCP_ILLEGAL_INST
;
193 static int smode32(CPURISCVState
*env
, int csrno
)
195 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
196 return RISCV_EXCP_ILLEGAL_INST
;
199 return smode(env
, csrno
);
202 static int aia_smode(CPURISCVState
*env
, int csrno
)
204 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
205 return RISCV_EXCP_ILLEGAL_INST
;
208 return smode(env
, csrno
);
211 static int aia_smode32(CPURISCVState
*env
, int csrno
)
213 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
214 return RISCV_EXCP_ILLEGAL_INST
;
217 return smode32(env
, csrno
);
220 static RISCVException
hmode(CPURISCVState
*env
, int csrno
)
222 if (riscv_has_ext(env
, RVS
) &&
223 riscv_has_ext(env
, RVH
)) {
224 /* Hypervisor extension is supported */
225 if ((env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) ||
226 env
->priv
== PRV_M
) {
227 return RISCV_EXCP_NONE
;
229 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
233 return RISCV_EXCP_ILLEGAL_INST
;
236 static RISCVException
hmode32(CPURISCVState
*env
, int csrno
)
238 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
239 if (!riscv_cpu_virt_enabled(env
)) {
240 return RISCV_EXCP_ILLEGAL_INST
;
242 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
246 return hmode(env
, csrno
);
250 /* Checks if PointerMasking registers could be accessed */
251 static RISCVException
pointer_masking(CPURISCVState
*env
, int csrno
)
253 /* Check if j-ext is present */
254 if (riscv_has_ext(env
, RVJ
)) {
255 return RISCV_EXCP_NONE
;
257 return RISCV_EXCP_ILLEGAL_INST
;
260 static int aia_hmode(CPURISCVState
*env
, int csrno
)
262 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
263 return RISCV_EXCP_ILLEGAL_INST
;
266 return hmode(env
, csrno
);
269 static int aia_hmode32(CPURISCVState
*env
, int csrno
)
271 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
272 return RISCV_EXCP_ILLEGAL_INST
;
275 return hmode32(env
, csrno
);
278 static RISCVException
pmp(CPURISCVState
*env
, int csrno
)
280 if (riscv_feature(env
, RISCV_FEATURE_PMP
)) {
281 return RISCV_EXCP_NONE
;
284 return RISCV_EXCP_ILLEGAL_INST
;
287 static RISCVException
epmp(CPURISCVState
*env
, int csrno
)
289 if (env
->priv
== PRV_M
&& riscv_feature(env
, RISCV_FEATURE_EPMP
)) {
290 return RISCV_EXCP_NONE
;
293 return RISCV_EXCP_ILLEGAL_INST
;
296 static RISCVException
debug(CPURISCVState
*env
, int csrno
)
298 if (riscv_feature(env
, RISCV_FEATURE_DEBUG
)) {
299 return RISCV_EXCP_NONE
;
302 return RISCV_EXCP_ILLEGAL_INST
;
306 static RISCVException
seed(CPURISCVState
*env
, int csrno
)
308 RISCVCPU
*cpu
= env_archcpu(env
);
310 if (!cpu
->cfg
.ext_zkr
) {
311 return RISCV_EXCP_ILLEGAL_INST
;
314 #if !defined(CONFIG_USER_ONLY)
316 * With a CSR read-write instruction:
317 * 1) The seed CSR is always available in machine mode as normal.
318 * 2) Attempted access to seed from virtual modes VS and VU always raises
319 * an exception(virtual instruction exception only if mseccfg.sseed=1).
320 * 3) Without the corresponding access control bit set to 1, any attempted
321 * access to seed from U, S or HS modes will raise an illegal instruction
324 if (env
->priv
== PRV_M
) {
325 return RISCV_EXCP_NONE
;
326 } else if (riscv_cpu_virt_enabled(env
)) {
327 if (env
->mseccfg
& MSECCFG_SSEED
) {
328 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
330 return RISCV_EXCP_ILLEGAL_INST
;
333 if (env
->priv
== PRV_S
&& (env
->mseccfg
& MSECCFG_SSEED
)) {
334 return RISCV_EXCP_NONE
;
335 } else if (env
->priv
== PRV_U
&& (env
->mseccfg
& MSECCFG_USEED
)) {
336 return RISCV_EXCP_NONE
;
338 return RISCV_EXCP_ILLEGAL_INST
;
342 return RISCV_EXCP_NONE
;
346 /* User Floating-Point CSRs */
347 static RISCVException
read_fflags(CPURISCVState
*env
, int csrno
,
350 *val
= riscv_cpu_get_fflags(env
);
351 return RISCV_EXCP_NONE
;
354 static RISCVException
write_fflags(CPURISCVState
*env
, int csrno
,
357 #if !defined(CONFIG_USER_ONLY)
358 if (riscv_has_ext(env
, RVF
)) {
359 env
->mstatus
|= MSTATUS_FS
;
362 riscv_cpu_set_fflags(env
, val
& (FSR_AEXC
>> FSR_AEXC_SHIFT
));
363 return RISCV_EXCP_NONE
;
366 static RISCVException
read_frm(CPURISCVState
*env
, int csrno
,
370 return RISCV_EXCP_NONE
;
373 static RISCVException
write_frm(CPURISCVState
*env
, int csrno
,
376 #if !defined(CONFIG_USER_ONLY)
377 if (riscv_has_ext(env
, RVF
)) {
378 env
->mstatus
|= MSTATUS_FS
;
381 env
->frm
= val
& (FSR_RD
>> FSR_RD_SHIFT
);
382 return RISCV_EXCP_NONE
;
385 static RISCVException
read_fcsr(CPURISCVState
*env
, int csrno
,
388 *val
= (riscv_cpu_get_fflags(env
) << FSR_AEXC_SHIFT
)
389 | (env
->frm
<< FSR_RD_SHIFT
);
390 return RISCV_EXCP_NONE
;
393 static RISCVException
write_fcsr(CPURISCVState
*env
, int csrno
,
396 #if !defined(CONFIG_USER_ONLY)
397 if (riscv_has_ext(env
, RVF
)) {
398 env
->mstatus
|= MSTATUS_FS
;
401 env
->frm
= (val
& FSR_RD
) >> FSR_RD_SHIFT
;
402 riscv_cpu_set_fflags(env
, (val
& FSR_AEXC
) >> FSR_AEXC_SHIFT
);
403 return RISCV_EXCP_NONE
;
406 static RISCVException
read_vtype(CPURISCVState
*env
, int csrno
,
412 vill
= (uint32_t)env
->vill
<< 31;
415 vill
= (uint64_t)env
->vill
<< 63;
418 g_assert_not_reached();
420 *val
= (target_ulong
)vill
| env
->vtype
;
421 return RISCV_EXCP_NONE
;
424 static RISCVException
read_vl(CPURISCVState
*env
, int csrno
,
428 return RISCV_EXCP_NONE
;
431 static int read_vlenb(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
433 *val
= env_archcpu(env
)->cfg
.vlen
>> 3;
434 return RISCV_EXCP_NONE
;
437 static RISCVException
read_vxrm(CPURISCVState
*env
, int csrno
,
441 return RISCV_EXCP_NONE
;
444 static RISCVException
write_vxrm(CPURISCVState
*env
, int csrno
,
447 #if !defined(CONFIG_USER_ONLY)
448 env
->mstatus
|= MSTATUS_VS
;
451 return RISCV_EXCP_NONE
;
454 static RISCVException
read_vxsat(CPURISCVState
*env
, int csrno
,
458 return RISCV_EXCP_NONE
;
461 static RISCVException
write_vxsat(CPURISCVState
*env
, int csrno
,
464 #if !defined(CONFIG_USER_ONLY)
465 env
->mstatus
|= MSTATUS_VS
;
468 return RISCV_EXCP_NONE
;
471 static RISCVException
read_vstart(CPURISCVState
*env
, int csrno
,
475 return RISCV_EXCP_NONE
;
478 static RISCVException
write_vstart(CPURISCVState
*env
, int csrno
,
481 #if !defined(CONFIG_USER_ONLY)
482 env
->mstatus
|= MSTATUS_VS
;
485 * The vstart CSR is defined to have only enough writable bits
486 * to hold the largest element index, i.e. lg2(VLEN) bits.
488 env
->vstart
= val
& ~(~0ULL << ctzl(env_archcpu(env
)->cfg
.vlen
));
489 return RISCV_EXCP_NONE
;
492 static int read_vcsr(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
494 *val
= (env
->vxrm
<< VCSR_VXRM_SHIFT
) | (env
->vxsat
<< VCSR_VXSAT_SHIFT
);
495 return RISCV_EXCP_NONE
;
498 static int write_vcsr(CPURISCVState
*env
, int csrno
, target_ulong val
)
500 #if !defined(CONFIG_USER_ONLY)
501 env
->mstatus
|= MSTATUS_VS
;
503 env
->vxrm
= (val
& VCSR_VXRM
) >> VCSR_VXRM_SHIFT
;
504 env
->vxsat
= (val
& VCSR_VXSAT
) >> VCSR_VXSAT_SHIFT
;
505 return RISCV_EXCP_NONE
;
508 /* User Timers and Counters */
509 static RISCVException
read_instret(CPURISCVState
*env
, int csrno
,
512 #if !defined(CONFIG_USER_ONLY)
513 if (icount_enabled()) {
516 *val
= cpu_get_host_ticks();
519 *val
= cpu_get_host_ticks();
521 return RISCV_EXCP_NONE
;
524 static RISCVException
read_instreth(CPURISCVState
*env
, int csrno
,
527 #if !defined(CONFIG_USER_ONLY)
528 if (icount_enabled()) {
529 *val
= icount_get() >> 32;
531 *val
= cpu_get_host_ticks() >> 32;
534 *val
= cpu_get_host_ticks() >> 32;
536 return RISCV_EXCP_NONE
;
539 #if defined(CONFIG_USER_ONLY)
540 static RISCVException
read_time(CPURISCVState
*env
, int csrno
,
543 *val
= cpu_get_host_ticks();
544 return RISCV_EXCP_NONE
;
547 static RISCVException
read_timeh(CPURISCVState
*env
, int csrno
,
550 *val
= cpu_get_host_ticks() >> 32;
551 return RISCV_EXCP_NONE
;
554 #else /* CONFIG_USER_ONLY */
556 static RISCVException
read_time(CPURISCVState
*env
, int csrno
,
559 uint64_t delta
= riscv_cpu_virt_enabled(env
) ? env
->htimedelta
: 0;
561 if (!env
->rdtime_fn
) {
562 return RISCV_EXCP_ILLEGAL_INST
;
565 *val
= env
->rdtime_fn(env
->rdtime_fn_arg
) + delta
;
566 return RISCV_EXCP_NONE
;
569 static RISCVException
read_timeh(CPURISCVState
*env
, int csrno
,
572 uint64_t delta
= riscv_cpu_virt_enabled(env
) ? env
->htimedelta
: 0;
574 if (!env
->rdtime_fn
) {
575 return RISCV_EXCP_ILLEGAL_INST
;
578 *val
= (env
->rdtime_fn(env
->rdtime_fn_arg
) + delta
) >> 32;
579 return RISCV_EXCP_NONE
;
582 /* Machine constants */
584 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
585 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP))
586 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
587 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
589 #define VSTOPI_NUM_SRCS 5
591 static const uint64_t delegable_ints
= S_MODE_INTERRUPTS
|
593 static const uint64_t vs_delegable_ints
= VS_MODE_INTERRUPTS
;
594 static const uint64_t all_ints
= M_MODE_INTERRUPTS
| S_MODE_INTERRUPTS
|
596 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
597 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
598 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
599 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
600 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
601 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
602 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
603 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
604 (1ULL << (RISCV_EXCP_U_ECALL)) | \
605 (1ULL << (RISCV_EXCP_S_ECALL)) | \
606 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
607 (1ULL << (RISCV_EXCP_M_ECALL)) | \
608 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
609 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
610 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
611 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
612 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
613 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
614 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
615 static const target_ulong vs_delegable_excps
= DELEGABLE_EXCPS
&
616 ~((1ULL << (RISCV_EXCP_S_ECALL
)) |
617 (1ULL << (RISCV_EXCP_VS_ECALL
)) |
618 (1ULL << (RISCV_EXCP_M_ECALL
)) |
619 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT
)) |
620 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
)) |
621 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT
)) |
622 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
)));
623 static const target_ulong sstatus_v1_10_mask
= SSTATUS_SIE
| SSTATUS_SPIE
|
624 SSTATUS_UIE
| SSTATUS_UPIE
| SSTATUS_SPP
| SSTATUS_FS
| SSTATUS_XS
|
625 SSTATUS_SUM
| SSTATUS_MXR
| SSTATUS_VS
;
626 static const target_ulong sip_writable_mask
= SIP_SSIP
| MIP_USIP
| MIP_UEIP
;
627 static const target_ulong hip_writable_mask
= MIP_VSSIP
;
628 static const target_ulong hvip_writable_mask
= MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
;
629 static const target_ulong vsip_writable_mask
= MIP_VSSIP
;
631 static const char valid_vm_1_10_32
[16] = {
636 static const char valid_vm_1_10_64
[16] = {
643 /* Machine Information Registers */
644 static RISCVException
read_zero(CPURISCVState
*env
, int csrno
,
648 return RISCV_EXCP_NONE
;
651 static RISCVException
write_ignore(CPURISCVState
*env
, int csrno
,
654 return RISCV_EXCP_NONE
;
657 static RISCVException
read_mvendorid(CPURISCVState
*env
, int csrno
,
660 CPUState
*cs
= env_cpu(env
);
661 RISCVCPU
*cpu
= RISCV_CPU(cs
);
663 *val
= cpu
->cfg
.mvendorid
;
664 return RISCV_EXCP_NONE
;
667 static RISCVException
read_marchid(CPURISCVState
*env
, int csrno
,
670 CPUState
*cs
= env_cpu(env
);
671 RISCVCPU
*cpu
= RISCV_CPU(cs
);
673 *val
= cpu
->cfg
.marchid
;
674 return RISCV_EXCP_NONE
;
677 static RISCVException
read_mimpid(CPURISCVState
*env
, int csrno
,
680 CPUState
*cs
= env_cpu(env
);
681 RISCVCPU
*cpu
= RISCV_CPU(cs
);
683 *val
= cpu
->cfg
.mimpid
;
684 return RISCV_EXCP_NONE
;
687 static RISCVException
read_mhartid(CPURISCVState
*env
, int csrno
,
691 return RISCV_EXCP_NONE
;
694 /* Machine Trap Setup */
696 /* We do not store SD explicitly, only compute it on demand. */
697 static uint64_t add_status_sd(RISCVMXL xl
, uint64_t status
)
699 if ((status
& MSTATUS_FS
) == MSTATUS_FS
||
700 (status
& MSTATUS_VS
) == MSTATUS_VS
||
701 (status
& MSTATUS_XS
) == MSTATUS_XS
) {
704 return status
| MSTATUS32_SD
;
706 return status
| MSTATUS64_SD
;
708 return MSTATUSH128_SD
;
710 g_assert_not_reached();
716 static RISCVException
read_mstatus(CPURISCVState
*env
, int csrno
,
719 *val
= add_status_sd(riscv_cpu_mxl(env
), env
->mstatus
);
720 return RISCV_EXCP_NONE
;
723 static int validate_vm(CPURISCVState
*env
, target_ulong vm
)
725 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
726 return valid_vm_1_10_32
[vm
& 0xf];
728 return valid_vm_1_10_64
[vm
& 0xf];
732 static RISCVException
write_mstatus(CPURISCVState
*env
, int csrno
,
735 uint64_t mstatus
= env
->mstatus
;
737 RISCVMXL xl
= riscv_cpu_mxl(env
);
739 /* flush tlb on mstatus fields that affect VM */
740 if ((val
^ mstatus
) & (MSTATUS_MXR
| MSTATUS_MPP
| MSTATUS_MPV
|
741 MSTATUS_MPRV
| MSTATUS_SUM
)) {
742 tlb_flush(env_cpu(env
));
744 mask
= MSTATUS_SIE
| MSTATUS_SPIE
| MSTATUS_MIE
| MSTATUS_MPIE
|
745 MSTATUS_SPP
| MSTATUS_MPRV
| MSTATUS_SUM
|
746 MSTATUS_MPP
| MSTATUS_MXR
| MSTATUS_TVM
| MSTATUS_TSR
|
747 MSTATUS_TW
| MSTATUS_VS
;
749 if (riscv_has_ext(env
, RVF
)) {
753 if (xl
!= MXL_RV32
|| env
->debugger
) {
755 * RV32: MPV and GVA are not in mstatus. The current plan is to
756 * add them to mstatush. For now, we just don't support it.
758 mask
|= MSTATUS_MPV
| MSTATUS_GVA
;
759 if ((val
& MSTATUS64_UXL
) != 0) {
760 mask
|= MSTATUS64_UXL
;
764 mstatus
= (mstatus
& ~mask
) | (val
& mask
);
767 /* SXL field is for now read only */
768 mstatus
= set_field(mstatus
, MSTATUS64_SXL
, xl
);
770 env
->mstatus
= mstatus
;
771 env
->xl
= cpu_recompute_xl(env
);
773 return RISCV_EXCP_NONE
;
776 static RISCVException
read_mstatush(CPURISCVState
*env
, int csrno
,
779 *val
= env
->mstatus
>> 32;
780 return RISCV_EXCP_NONE
;
783 static RISCVException
write_mstatush(CPURISCVState
*env
, int csrno
,
786 uint64_t valh
= (uint64_t)val
<< 32;
787 uint64_t mask
= MSTATUS_MPV
| MSTATUS_GVA
;
789 if ((valh
^ env
->mstatus
) & (MSTATUS_MPV
)) {
790 tlb_flush(env_cpu(env
));
793 env
->mstatus
= (env
->mstatus
& ~mask
) | (valh
& mask
);
795 return RISCV_EXCP_NONE
;
798 static RISCVException
read_mstatus_i128(CPURISCVState
*env
, int csrno
,
801 *val
= int128_make128(env
->mstatus
, add_status_sd(MXL_RV128
, env
->mstatus
));
802 return RISCV_EXCP_NONE
;
805 static RISCVException
read_misa_i128(CPURISCVState
*env
, int csrno
,
808 *val
= int128_make128(env
->misa_ext
, (uint64_t)MXL_RV128
<< 62);
809 return RISCV_EXCP_NONE
;
812 static RISCVException
read_misa(CPURISCVState
*env
, int csrno
,
817 switch (env
->misa_mxl
) {
819 misa
= (target_ulong
)MXL_RV32
<< 30;
821 #ifdef TARGET_RISCV64
823 misa
= (target_ulong
)MXL_RV64
<< 62;
827 g_assert_not_reached();
830 *val
= misa
| env
->misa_ext
;
831 return RISCV_EXCP_NONE
;
834 static RISCVException
write_misa(CPURISCVState
*env
, int csrno
,
837 if (!riscv_feature(env
, RISCV_FEATURE_MISA
)) {
838 /* drop write to misa */
839 return RISCV_EXCP_NONE
;
842 /* 'I' or 'E' must be present */
843 if (!(val
& (RVI
| RVE
))) {
844 /* It is not, drop write to misa */
845 return RISCV_EXCP_NONE
;
848 /* 'E' excludes all other extensions */
850 /* when we support 'E' we can do "val = RVE;" however
851 * for now we just drop writes if 'E' is present.
853 return RISCV_EXCP_NONE
;
857 * misa.MXL writes are not supported by QEMU.
858 * Drop writes to those bits.
861 /* Mask extensions that are not supported by this hart */
862 val
&= env
->misa_ext_mask
;
864 /* Mask extensions that are not supported by QEMU */
865 val
&= (RVI
| RVE
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
| RVV
);
867 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
868 if ((val
& RVD
) && !(val
& RVF
)) {
872 /* Suppress 'C' if next instruction is not aligned
873 * TODO: this should check next_pc
875 if ((val
& RVC
) && (GETPC() & ~3) != 0) {
879 /* If nothing changed, do nothing. */
880 if (val
== env
->misa_ext
) {
881 return RISCV_EXCP_NONE
;
885 env
->mstatus
&= ~MSTATUS_FS
;
888 /* flush translation cache */
889 tb_flush(env_cpu(env
));
891 env
->xl
= riscv_cpu_mxl(env
);
892 return RISCV_EXCP_NONE
;
895 static RISCVException
read_medeleg(CPURISCVState
*env
, int csrno
,
899 return RISCV_EXCP_NONE
;
902 static RISCVException
write_medeleg(CPURISCVState
*env
, int csrno
,
905 env
->medeleg
= (env
->medeleg
& ~DELEGABLE_EXCPS
) | (val
& DELEGABLE_EXCPS
);
906 return RISCV_EXCP_NONE
;
909 static RISCVException
rmw_mideleg64(CPURISCVState
*env
, int csrno
,
911 uint64_t new_val
, uint64_t wr_mask
)
913 uint64_t mask
= wr_mask
& delegable_ints
;
916 *ret_val
= env
->mideleg
;
919 env
->mideleg
= (env
->mideleg
& ~mask
) | (new_val
& mask
);
921 if (riscv_has_ext(env
, RVH
)) {
922 env
->mideleg
|= HS_MODE_INTERRUPTS
;
925 return RISCV_EXCP_NONE
;
928 static RISCVException
rmw_mideleg(CPURISCVState
*env
, int csrno
,
929 target_ulong
*ret_val
,
930 target_ulong new_val
, target_ulong wr_mask
)
935 ret
= rmw_mideleg64(env
, csrno
, &rval
, new_val
, wr_mask
);
943 static RISCVException
rmw_midelegh(CPURISCVState
*env
, int csrno
,
944 target_ulong
*ret_val
,
945 target_ulong new_val
,
946 target_ulong wr_mask
)
951 ret
= rmw_mideleg64(env
, csrno
, &rval
,
952 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
954 *ret_val
= rval
>> 32;
960 static RISCVException
rmw_mie64(CPURISCVState
*env
, int csrno
,
962 uint64_t new_val
, uint64_t wr_mask
)
964 uint64_t mask
= wr_mask
& all_ints
;
970 env
->mie
= (env
->mie
& ~mask
) | (new_val
& mask
);
972 if (!riscv_has_ext(env
, RVH
)) {
973 env
->mie
&= ~((uint64_t)MIP_SGEIP
);
976 return RISCV_EXCP_NONE
;
979 static RISCVException
rmw_mie(CPURISCVState
*env
, int csrno
,
980 target_ulong
*ret_val
,
981 target_ulong new_val
, target_ulong wr_mask
)
986 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
);
994 static RISCVException
rmw_mieh(CPURISCVState
*env
, int csrno
,
995 target_ulong
*ret_val
,
996 target_ulong new_val
, target_ulong wr_mask
)
1001 ret
= rmw_mie64(env
, csrno
, &rval
,
1002 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1004 *ret_val
= rval
>> 32;
1010 static int read_mtopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
1015 irq
= riscv_cpu_mirq_pending(env
);
1016 if (irq
<= 0 || irq
> 63) {
1019 iprio
= env
->miprio
[irq
];
1021 if (riscv_cpu_default_priority(irq
) > IPRIO_DEFAULT_M
) {
1022 iprio
= IPRIO_MMAXIPRIO
;
1025 *val
= (irq
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
1029 return RISCV_EXCP_NONE
;
1032 static int aia_xlate_vs_csrno(CPURISCVState
*env
, int csrno
)
1034 if (!riscv_cpu_virt_enabled(env
)) {
1040 return CSR_VSISELECT
;
1043 case CSR_SSETEIPNUM
:
1044 return CSR_VSSETEIPNUM
;
1045 case CSR_SCLREIPNUM
:
1046 return CSR_VSCLREIPNUM
;
1047 case CSR_SSETEIENUM
:
1048 return CSR_VSSETEIENUM
;
1049 case CSR_SCLREIENUM
:
1050 return CSR_VSCLREIENUM
;
1058 static int rmw_xiselect(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1059 target_ulong new_val
, target_ulong wr_mask
)
1061 target_ulong
*iselect
;
1063 /* Translate CSR number for VS-mode */
1064 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1066 /* Find the iselect CSR based on CSR number */
1069 iselect
= &env
->miselect
;
1072 iselect
= &env
->siselect
;
1075 iselect
= &env
->vsiselect
;
1078 return RISCV_EXCP_ILLEGAL_INST
;
1085 wr_mask
&= ISELECT_MASK
;
1087 *iselect
= (*iselect
& ~wr_mask
) | (new_val
& wr_mask
);
1090 return RISCV_EXCP_NONE
;
1093 static int rmw_iprio(target_ulong xlen
,
1094 target_ulong iselect
, uint8_t *iprio
,
1095 target_ulong
*val
, target_ulong new_val
,
1096 target_ulong wr_mask
, int ext_irq_no
)
1099 target_ulong old_val
;
1101 if (iselect
< ISELECT_IPRIO0
|| ISELECT_IPRIO15
< iselect
) {
1104 if (xlen
!= 32 && iselect
& 0x1) {
1108 nirqs
= 4 * (xlen
/ 32);
1109 firq
= ((iselect
- ISELECT_IPRIO0
) / (xlen
/ 32)) * (nirqs
);
1112 for (i
= 0; i
< nirqs
; i
++) {
1113 old_val
|= ((target_ulong
)iprio
[firq
+ i
]) << (IPRIO_IRQ_BITS
* i
);
1121 new_val
= (old_val
& ~wr_mask
) | (new_val
& wr_mask
);
1122 for (i
= 0; i
< nirqs
; i
++) {
1124 * M-level and S-level external IRQ priority always read-only
1125 * zero. This means default priority order is always preferred
1126 * for M-level and S-level external IRQs.
1128 if ((firq
+ i
) == ext_irq_no
) {
1131 iprio
[firq
+ i
] = (new_val
>> (IPRIO_IRQ_BITS
* i
)) & 0xff;
1138 static int rmw_xireg(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1139 target_ulong new_val
, target_ulong wr_mask
)
1144 target_ulong priv
, isel
, vgein
;
1146 /* Translate CSR number for VS-mode */
1147 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1149 /* Decode register details from CSR number */
1153 iprio
= env
->miprio
;
1154 isel
= env
->miselect
;
1158 iprio
= env
->siprio
;
1159 isel
= env
->siselect
;
1163 iprio
= env
->hviprio
;
1164 isel
= env
->vsiselect
;
1172 /* Find the selected guest interrupt file */
1173 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1175 if (ISELECT_IPRIO0
<= isel
&& isel
<= ISELECT_IPRIO15
) {
1176 /* Local interrupt priority registers not available for VS-mode */
1178 ret
= rmw_iprio(riscv_cpu_mxl_bits(env
),
1179 isel
, iprio
, val
, new_val
, wr_mask
,
1180 (priv
== PRV_M
) ? IRQ_M_EXT
: IRQ_S_EXT
);
1182 } else if (ISELECT_IMSIC_FIRST
<= isel
&& isel
<= ISELECT_IMSIC_LAST
) {
1183 /* IMSIC registers only available when machine implements it. */
1184 if (env
->aia_ireg_rmw_fn
[priv
]) {
1185 /* Selected guest interrupt file should not be zero */
1186 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1189 /* Call machine specific IMSIC register emulation */
1190 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1191 AIA_MAKE_IREG(isel
, priv
, virt
, vgein
,
1192 riscv_cpu_mxl_bits(env
)),
1193 val
, new_val
, wr_mask
);
1199 return (riscv_cpu_virt_enabled(env
) && virt
) ?
1200 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1202 return RISCV_EXCP_NONE
;
1205 static int rmw_xsetclreinum(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1206 target_ulong new_val
, target_ulong wr_mask
)
1209 bool set
, pend
, virt
;
1210 target_ulong priv
, isel
, vgein
, xlen
, nval
, wmask
;
1212 /* Translate CSR number for VS-mode */
1213 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1215 /* Decode register details from CSR number */
1216 virt
= set
= pend
= false;
1218 case CSR_MSETEIPNUM
:
1223 case CSR_MCLREIPNUM
:
1227 case CSR_MSETEIENUM
:
1231 case CSR_MCLREIENUM
:
1234 case CSR_SSETEIPNUM
:
1239 case CSR_SCLREIPNUM
:
1243 case CSR_SSETEIENUM
:
1247 case CSR_SCLREIENUM
:
1250 case CSR_VSSETEIPNUM
:
1256 case CSR_VSCLREIPNUM
:
1261 case CSR_VSSETEIENUM
:
1266 case CSR_VSCLREIENUM
:
1274 /* IMSIC CSRs only available when machine implements IMSIC. */
1275 if (!env
->aia_ireg_rmw_fn
[priv
]) {
1279 /* Find the selected guest interrupt file */
1280 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1282 /* Selected guest interrupt file should be valid */
1283 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1287 /* Set/Clear CSRs always read zero */
1293 /* Get interrupt number */
1296 /* Find target interrupt pending/enable register */
1297 xlen
= riscv_cpu_mxl_bits(env
);
1298 isel
= (new_val
/ xlen
);
1299 isel
*= (xlen
/ IMSIC_EIPx_BITS
);
1300 isel
+= (pend
) ? ISELECT_IMSIC_EIP0
: ISELECT_IMSIC_EIE0
;
1302 /* Find the interrupt bit to be set/clear */
1303 wmask
= ((target_ulong
)1) << (new_val
% xlen
);
1304 nval
= (set
) ? wmask
: 0;
1306 /* Call machine specific IMSIC register emulation */
1307 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1308 AIA_MAKE_IREG(isel
, priv
, virt
,
1317 return (riscv_cpu_virt_enabled(env
) && virt
) ?
1318 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1320 return RISCV_EXCP_NONE
;
1323 static int rmw_xtopei(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1324 target_ulong new_val
, target_ulong wr_mask
)
1328 target_ulong priv
, vgein
;
1330 /* Translate CSR number for VS-mode */
1331 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1333 /* Decode register details from CSR number */
1350 /* IMSIC CSRs only available when machine implements IMSIC. */
1351 if (!env
->aia_ireg_rmw_fn
[priv
]) {
1355 /* Find the selected guest interrupt file */
1356 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1358 /* Selected guest interrupt file should be valid */
1359 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1363 /* Call machine specific IMSIC register emulation for TOPEI */
1364 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1365 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI
, priv
, virt
, vgein
,
1366 riscv_cpu_mxl_bits(env
)),
1367 val
, new_val
, wr_mask
);
1371 return (riscv_cpu_virt_enabled(env
) && virt
) ?
1372 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1374 return RISCV_EXCP_NONE
;
1377 static RISCVException
read_mtvec(CPURISCVState
*env
, int csrno
,
1381 return RISCV_EXCP_NONE
;
1384 static RISCVException
write_mtvec(CPURISCVState
*env
, int csrno
,
1387 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1388 if ((val
& 3) < 2) {
1391 qemu_log_mask(LOG_UNIMP
, "CSR_MTVEC: reserved mode not supported\n");
1393 return RISCV_EXCP_NONE
;
1396 static RISCVException
read_mcounteren(CPURISCVState
*env
, int csrno
,
1399 *val
= env
->mcounteren
;
1400 return RISCV_EXCP_NONE
;
1403 static RISCVException
write_mcounteren(CPURISCVState
*env
, int csrno
,
1406 env
->mcounteren
= val
;
1407 return RISCV_EXCP_NONE
;
1410 /* Machine Trap Handling */
1411 static RISCVException
read_mscratch_i128(CPURISCVState
*env
, int csrno
,
1414 *val
= int128_make128(env
->mscratch
, env
->mscratchh
);
1415 return RISCV_EXCP_NONE
;
1418 static RISCVException
write_mscratch_i128(CPURISCVState
*env
, int csrno
,
1421 env
->mscratch
= int128_getlo(val
);
1422 env
->mscratchh
= int128_gethi(val
);
1423 return RISCV_EXCP_NONE
;
1426 static RISCVException
read_mscratch(CPURISCVState
*env
, int csrno
,
1429 *val
= env
->mscratch
;
1430 return RISCV_EXCP_NONE
;
1433 static RISCVException
write_mscratch(CPURISCVState
*env
, int csrno
,
1436 env
->mscratch
= val
;
1437 return RISCV_EXCP_NONE
;
1440 static RISCVException
read_mepc(CPURISCVState
*env
, int csrno
,
1444 return RISCV_EXCP_NONE
;
1447 static RISCVException
write_mepc(CPURISCVState
*env
, int csrno
,
1451 return RISCV_EXCP_NONE
;
1454 static RISCVException
read_mcause(CPURISCVState
*env
, int csrno
,
1458 return RISCV_EXCP_NONE
;
1461 static RISCVException
write_mcause(CPURISCVState
*env
, int csrno
,
1465 return RISCV_EXCP_NONE
;
1468 static RISCVException
read_mtval(CPURISCVState
*env
, int csrno
,
1472 return RISCV_EXCP_NONE
;
1475 static RISCVException
write_mtval(CPURISCVState
*env
, int csrno
,
1479 return RISCV_EXCP_NONE
;
1482 /* Execution environment configuration setup */
1483 static RISCVException
read_menvcfg(CPURISCVState
*env
, int csrno
,
1486 *val
= env
->menvcfg
;
1487 return RISCV_EXCP_NONE
;
1490 static RISCVException
write_menvcfg(CPURISCVState
*env
, int csrno
,
1493 uint64_t mask
= MENVCFG_FIOM
| MENVCFG_CBIE
| MENVCFG_CBCFE
| MENVCFG_CBZE
;
1495 if (riscv_cpu_mxl(env
) == MXL_RV64
) {
1496 mask
|= MENVCFG_PBMTE
| MENVCFG_STCE
;
1498 env
->menvcfg
= (env
->menvcfg
& ~mask
) | (val
& mask
);
1500 return RISCV_EXCP_NONE
;
1503 static RISCVException
read_menvcfgh(CPURISCVState
*env
, int csrno
,
1506 *val
= env
->menvcfg
>> 32;
1507 return RISCV_EXCP_NONE
;
1510 static RISCVException
write_menvcfgh(CPURISCVState
*env
, int csrno
,
1513 uint64_t mask
= MENVCFG_PBMTE
| MENVCFG_STCE
;
1514 uint64_t valh
= (uint64_t)val
<< 32;
1516 env
->menvcfg
= (env
->menvcfg
& ~mask
) | (valh
& mask
);
1518 return RISCV_EXCP_NONE
;
1521 static RISCVException
read_senvcfg(CPURISCVState
*env
, int csrno
,
1524 *val
= env
->senvcfg
;
1525 return RISCV_EXCP_NONE
;
1528 static RISCVException
write_senvcfg(CPURISCVState
*env
, int csrno
,
1531 uint64_t mask
= SENVCFG_FIOM
| SENVCFG_CBIE
| SENVCFG_CBCFE
| SENVCFG_CBZE
;
1533 env
->senvcfg
= (env
->senvcfg
& ~mask
) | (val
& mask
);
1535 return RISCV_EXCP_NONE
;
1538 static RISCVException
read_henvcfg(CPURISCVState
*env
, int csrno
,
1541 *val
= env
->henvcfg
;
1542 return RISCV_EXCP_NONE
;
1545 static RISCVException
write_henvcfg(CPURISCVState
*env
, int csrno
,
1548 uint64_t mask
= HENVCFG_FIOM
| HENVCFG_CBIE
| HENVCFG_CBCFE
| HENVCFG_CBZE
;
1550 if (riscv_cpu_mxl(env
) == MXL_RV64
) {
1551 mask
|= HENVCFG_PBMTE
| HENVCFG_STCE
;
1554 env
->henvcfg
= (env
->henvcfg
& ~mask
) | (val
& mask
);
1556 return RISCV_EXCP_NONE
;
1559 static RISCVException
read_henvcfgh(CPURISCVState
*env
, int csrno
,
1562 *val
= env
->henvcfg
>> 32;
1563 return RISCV_EXCP_NONE
;
1566 static RISCVException
write_henvcfgh(CPURISCVState
*env
, int csrno
,
1569 uint64_t mask
= HENVCFG_PBMTE
| HENVCFG_STCE
;
1570 uint64_t valh
= (uint64_t)val
<< 32;
1572 env
->henvcfg
= (env
->henvcfg
& ~mask
) | (valh
& mask
);
1574 return RISCV_EXCP_NONE
;
1577 static RISCVException
rmw_mip64(CPURISCVState
*env
, int csrno
,
1579 uint64_t new_val
, uint64_t wr_mask
)
1581 RISCVCPU
*cpu
= env_archcpu(env
);
1582 uint64_t old_mip
, mask
= wr_mask
& delegable_ints
;
1585 if (mask
& MIP_SEIP
) {
1586 env
->software_seip
= new_val
& MIP_SEIP
;
1587 new_val
|= env
->external_seip
* MIP_SEIP
;
1591 old_mip
= riscv_cpu_update_mip(cpu
, mask
, (new_val
& mask
));
1596 if (csrno
!= CSR_HVIP
) {
1597 gin
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
1598 old_mip
|= (env
->hgeip
& ((target_ulong
)1 << gin
)) ? MIP_VSEIP
: 0;
1605 return RISCV_EXCP_NONE
;
1608 static RISCVException
rmw_mip(CPURISCVState
*env
, int csrno
,
1609 target_ulong
*ret_val
,
1610 target_ulong new_val
, target_ulong wr_mask
)
1615 ret
= rmw_mip64(env
, csrno
, &rval
, new_val
, wr_mask
);
1623 static RISCVException
rmw_miph(CPURISCVState
*env
, int csrno
,
1624 target_ulong
*ret_val
,
1625 target_ulong new_val
, target_ulong wr_mask
)
1630 ret
= rmw_mip64(env
, csrno
, &rval
,
1631 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1633 *ret_val
= rval
>> 32;
1639 /* Supervisor Trap Setup */
1640 static RISCVException
read_sstatus_i128(CPURISCVState
*env
, int csrno
,
1643 uint64_t mask
= sstatus_v1_10_mask
;
1644 uint64_t sstatus
= env
->mstatus
& mask
;
1645 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1646 mask
|= SSTATUS64_UXL
;
1649 *val
= int128_make128(sstatus
, add_status_sd(MXL_RV128
, sstatus
));
1650 return RISCV_EXCP_NONE
;
1653 static RISCVException
read_sstatus(CPURISCVState
*env
, int csrno
,
1656 target_ulong mask
= (sstatus_v1_10_mask
);
1657 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1658 mask
|= SSTATUS64_UXL
;
1660 /* TODO: Use SXL not MXL. */
1661 *val
= add_status_sd(riscv_cpu_mxl(env
), env
->mstatus
& mask
);
1662 return RISCV_EXCP_NONE
;
1665 static RISCVException
write_sstatus(CPURISCVState
*env
, int csrno
,
1668 target_ulong mask
= (sstatus_v1_10_mask
);
1670 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1671 if ((val
& SSTATUS64_UXL
) != 0) {
1672 mask
|= SSTATUS64_UXL
;
1675 target_ulong newval
= (env
->mstatus
& ~mask
) | (val
& mask
);
1676 return write_mstatus(env
, CSR_MSTATUS
, newval
);
1679 static RISCVException
rmw_vsie64(CPURISCVState
*env
, int csrno
,
1681 uint64_t new_val
, uint64_t wr_mask
)
1684 uint64_t rval
, vsbits
, mask
= env
->hideleg
& VS_MODE_INTERRUPTS
;
1686 /* Bring VS-level bits to correct position */
1687 vsbits
= new_val
& (VS_MODE_INTERRUPTS
>> 1);
1688 new_val
&= ~(VS_MODE_INTERRUPTS
>> 1);
1689 new_val
|= vsbits
<< 1;
1690 vsbits
= wr_mask
& (VS_MODE_INTERRUPTS
>> 1);
1691 wr_mask
&= ~(VS_MODE_INTERRUPTS
>> 1);
1692 wr_mask
|= vsbits
<< 1;
1694 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
& mask
);
1697 vsbits
= rval
& VS_MODE_INTERRUPTS
;
1698 rval
&= ~VS_MODE_INTERRUPTS
;
1699 *ret_val
= rval
| (vsbits
>> 1);
1705 static RISCVException
rmw_vsie(CPURISCVState
*env
, int csrno
,
1706 target_ulong
*ret_val
,
1707 target_ulong new_val
, target_ulong wr_mask
)
1712 ret
= rmw_vsie64(env
, csrno
, &rval
, new_val
, wr_mask
);
1720 static RISCVException
rmw_vsieh(CPURISCVState
*env
, int csrno
,
1721 target_ulong
*ret_val
,
1722 target_ulong new_val
, target_ulong wr_mask
)
1727 ret
= rmw_vsie64(env
, csrno
, &rval
,
1728 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1730 *ret_val
= rval
>> 32;
1736 static RISCVException
rmw_sie64(CPURISCVState
*env
, int csrno
,
1738 uint64_t new_val
, uint64_t wr_mask
)
1741 uint64_t mask
= env
->mideleg
& S_MODE_INTERRUPTS
;
1743 if (riscv_cpu_virt_enabled(env
)) {
1744 if (env
->hvictl
& HVICTL_VTI
) {
1745 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
1747 ret
= rmw_vsie64(env
, CSR_VSIE
, ret_val
, new_val
, wr_mask
);
1749 ret
= rmw_mie64(env
, csrno
, ret_val
, new_val
, wr_mask
& mask
);
1759 static RISCVException
rmw_sie(CPURISCVState
*env
, int csrno
,
1760 target_ulong
*ret_val
,
1761 target_ulong new_val
, target_ulong wr_mask
)
1766 ret
= rmw_sie64(env
, csrno
, &rval
, new_val
, wr_mask
);
1767 if (ret
== RISCV_EXCP_NONE
&& ret_val
) {
1774 static RISCVException
rmw_sieh(CPURISCVState
*env
, int csrno
,
1775 target_ulong
*ret_val
,
1776 target_ulong new_val
, target_ulong wr_mask
)
1781 ret
= rmw_sie64(env
, csrno
, &rval
,
1782 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1784 *ret_val
= rval
>> 32;
1790 static RISCVException
read_stvec(CPURISCVState
*env
, int csrno
,
1794 return RISCV_EXCP_NONE
;
1797 static RISCVException
write_stvec(CPURISCVState
*env
, int csrno
,
1800 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1801 if ((val
& 3) < 2) {
1804 qemu_log_mask(LOG_UNIMP
, "CSR_STVEC: reserved mode not supported\n");
1806 return RISCV_EXCP_NONE
;
1809 static RISCVException
read_scounteren(CPURISCVState
*env
, int csrno
,
1812 *val
= env
->scounteren
;
1813 return RISCV_EXCP_NONE
;
1816 static RISCVException
write_scounteren(CPURISCVState
*env
, int csrno
,
1819 env
->scounteren
= val
;
1820 return RISCV_EXCP_NONE
;
1823 /* Supervisor Trap Handling */
1824 static RISCVException
read_sscratch_i128(CPURISCVState
*env
, int csrno
,
1827 *val
= int128_make128(env
->sscratch
, env
->sscratchh
);
1828 return RISCV_EXCP_NONE
;
1831 static RISCVException
write_sscratch_i128(CPURISCVState
*env
, int csrno
,
1834 env
->sscratch
= int128_getlo(val
);
1835 env
->sscratchh
= int128_gethi(val
);
1836 return RISCV_EXCP_NONE
;
1839 static RISCVException
read_sscratch(CPURISCVState
*env
, int csrno
,
1842 *val
= env
->sscratch
;
1843 return RISCV_EXCP_NONE
;
1846 static RISCVException
write_sscratch(CPURISCVState
*env
, int csrno
,
1849 env
->sscratch
= val
;
1850 return RISCV_EXCP_NONE
;
1853 static RISCVException
read_sepc(CPURISCVState
*env
, int csrno
,
1857 return RISCV_EXCP_NONE
;
1860 static RISCVException
write_sepc(CPURISCVState
*env
, int csrno
,
1864 return RISCV_EXCP_NONE
;
1867 static RISCVException
read_scause(CPURISCVState
*env
, int csrno
,
1871 return RISCV_EXCP_NONE
;
1874 static RISCVException
write_scause(CPURISCVState
*env
, int csrno
,
1878 return RISCV_EXCP_NONE
;
1881 static RISCVException
read_stval(CPURISCVState
*env
, int csrno
,
1885 return RISCV_EXCP_NONE
;
1888 static RISCVException
write_stval(CPURISCVState
*env
, int csrno
,
1892 return RISCV_EXCP_NONE
;
1895 static RISCVException
rmw_vsip64(CPURISCVState
*env
, int csrno
,
1897 uint64_t new_val
, uint64_t wr_mask
)
1900 uint64_t rval
, vsbits
, mask
= env
->hideleg
& vsip_writable_mask
;
1902 /* Bring VS-level bits to correct position */
1903 vsbits
= new_val
& (VS_MODE_INTERRUPTS
>> 1);
1904 new_val
&= ~(VS_MODE_INTERRUPTS
>> 1);
1905 new_val
|= vsbits
<< 1;
1906 vsbits
= wr_mask
& (VS_MODE_INTERRUPTS
>> 1);
1907 wr_mask
&= ~(VS_MODE_INTERRUPTS
>> 1);
1908 wr_mask
|= vsbits
<< 1;
1910 ret
= rmw_mip64(env
, csrno
, &rval
, new_val
, wr_mask
& mask
);
1913 vsbits
= rval
& VS_MODE_INTERRUPTS
;
1914 rval
&= ~VS_MODE_INTERRUPTS
;
1915 *ret_val
= rval
| (vsbits
>> 1);
1921 static RISCVException
rmw_vsip(CPURISCVState
*env
, int csrno
,
1922 target_ulong
*ret_val
,
1923 target_ulong new_val
, target_ulong wr_mask
)
1928 ret
= rmw_vsip64(env
, csrno
, &rval
, new_val
, wr_mask
);
1936 static RISCVException
rmw_vsiph(CPURISCVState
*env
, int csrno
,
1937 target_ulong
*ret_val
,
1938 target_ulong new_val
, target_ulong wr_mask
)
1943 ret
= rmw_vsip64(env
, csrno
, &rval
,
1944 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1946 *ret_val
= rval
>> 32;
1952 static RISCVException
rmw_sip64(CPURISCVState
*env
, int csrno
,
1954 uint64_t new_val
, uint64_t wr_mask
)
1957 uint64_t mask
= env
->mideleg
& sip_writable_mask
;
1959 if (riscv_cpu_virt_enabled(env
)) {
1960 if (env
->hvictl
& HVICTL_VTI
) {
1961 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
1963 ret
= rmw_vsip64(env
, CSR_VSIP
, ret_val
, new_val
, wr_mask
);
1965 ret
= rmw_mip64(env
, csrno
, ret_val
, new_val
, wr_mask
& mask
);
1969 *ret_val
&= env
->mideleg
& S_MODE_INTERRUPTS
;
1975 static RISCVException
rmw_sip(CPURISCVState
*env
, int csrno
,
1976 target_ulong
*ret_val
,
1977 target_ulong new_val
, target_ulong wr_mask
)
1982 ret
= rmw_sip64(env
, csrno
, &rval
, new_val
, wr_mask
);
1990 static RISCVException
rmw_siph(CPURISCVState
*env
, int csrno
,
1991 target_ulong
*ret_val
,
1992 target_ulong new_val
, target_ulong wr_mask
)
1997 ret
= rmw_sip64(env
, csrno
, &rval
,
1998 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2000 *ret_val
= rval
>> 32;
2006 /* Supervisor Protection and Translation */
2007 static RISCVException
read_satp(CPURISCVState
*env
, int csrno
,
2010 if (!riscv_feature(env
, RISCV_FEATURE_MMU
)) {
2012 return RISCV_EXCP_NONE
;
2015 if (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_TVM
)) {
2016 return RISCV_EXCP_ILLEGAL_INST
;
2021 return RISCV_EXCP_NONE
;
2024 static RISCVException
write_satp(CPURISCVState
*env
, int csrno
,
2027 target_ulong vm
, mask
;
2029 if (!riscv_feature(env
, RISCV_FEATURE_MMU
)) {
2030 return RISCV_EXCP_NONE
;
2033 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
2034 vm
= validate_vm(env
, get_field(val
, SATP32_MODE
));
2035 mask
= (val
^ env
->satp
) & (SATP32_MODE
| SATP32_ASID
| SATP32_PPN
);
2037 vm
= validate_vm(env
, get_field(val
, SATP64_MODE
));
2038 mask
= (val
^ env
->satp
) & (SATP64_MODE
| SATP64_ASID
| SATP64_PPN
);
2042 if (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_TVM
)) {
2043 return RISCV_EXCP_ILLEGAL_INST
;
2046 * The ISA defines SATP.MODE=Bare as "no translation", but we still
2047 * pass these through QEMU's TLB emulation as it improves
2048 * performance. Flushing the TLB on SATP writes with paging
2049 * enabled avoids leaking those invalid cached mappings.
2051 tlb_flush(env_cpu(env
));
2055 return RISCV_EXCP_NONE
;
2058 static int read_vstopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2062 uint64_t vseip
, vsgein
;
2063 uint32_t iid
, iprio
, hviid
, hviprio
, gein
;
2064 uint32_t s
, scount
= 0, siid
[VSTOPI_NUM_SRCS
], siprio
[VSTOPI_NUM_SRCS
];
2066 gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
2067 hviid
= get_field(env
->hvictl
, HVICTL_IID
);
2068 hviprio
= get_field(env
->hvictl
, HVICTL_IPRIO
);
2071 vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
2072 vseip
= env
->mie
& (env
->mip
| vsgein
) & MIP_VSEIP
;
2073 if (gein
<= env
->geilen
&& vseip
) {
2074 siid
[scount
] = IRQ_S_EXT
;
2075 siprio
[scount
] = IPRIO_MMAXIPRIO
+ 1;
2076 if (env
->aia_ireg_rmw_fn
[PRV_S
]) {
2078 * Call machine specific IMSIC register emulation for
2081 ret
= env
->aia_ireg_rmw_fn
[PRV_S
](
2082 env
->aia_ireg_rmw_fn_arg
[PRV_S
],
2083 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI
, PRV_S
, true, gein
,
2084 riscv_cpu_mxl_bits(env
)),
2086 if (!ret
&& topei
) {
2087 siprio
[scount
] = topei
& IMSIC_TOPEI_IPRIO_MASK
;
2093 if (hviid
== IRQ_S_EXT
&& hviprio
) {
2094 siid
[scount
] = IRQ_S_EXT
;
2095 siprio
[scount
] = hviprio
;
2100 if (env
->hvictl
& HVICTL_VTI
) {
2101 if (hviid
!= IRQ_S_EXT
) {
2102 siid
[scount
] = hviid
;
2103 siprio
[scount
] = hviprio
;
2107 irq
= riscv_cpu_vsirq_pending(env
);
2108 if (irq
!= IRQ_S_EXT
&& 0 < irq
&& irq
<= 63) {
2110 siprio
[scount
] = env
->hviprio
[irq
];
2117 for (s
= 0; s
< scount
; s
++) {
2118 if (siprio
[s
] < iprio
) {
2125 if (env
->hvictl
& HVICTL_IPRIOM
) {
2126 if (iprio
> IPRIO_MMAXIPRIO
) {
2127 iprio
= IPRIO_MMAXIPRIO
;
2130 if (riscv_cpu_default_priority(iid
) > IPRIO_DEFAULT_S
) {
2131 iprio
= IPRIO_MMAXIPRIO
;
2141 *val
= (iid
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
2143 return RISCV_EXCP_NONE
;
2146 static int read_stopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2151 if (riscv_cpu_virt_enabled(env
)) {
2152 return read_vstopi(env
, CSR_VSTOPI
, val
);
2155 irq
= riscv_cpu_sirq_pending(env
);
2156 if (irq
<= 0 || irq
> 63) {
2159 iprio
= env
->siprio
[irq
];
2161 if (riscv_cpu_default_priority(irq
) > IPRIO_DEFAULT_S
) {
2162 iprio
= IPRIO_MMAXIPRIO
;
2165 *val
= (irq
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
2169 return RISCV_EXCP_NONE
;
2172 /* Hypervisor Extensions */
2173 static RISCVException
read_hstatus(CPURISCVState
*env
, int csrno
,
2176 *val
= env
->hstatus
;
2177 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
2178 /* We only support 64-bit VSXL */
2179 *val
= set_field(*val
, HSTATUS_VSXL
, 2);
2181 /* We only support little endian */
2182 *val
= set_field(*val
, HSTATUS_VSBE
, 0);
2183 return RISCV_EXCP_NONE
;
2186 static RISCVException
write_hstatus(CPURISCVState
*env
, int csrno
,
2190 if (riscv_cpu_mxl(env
) != MXL_RV32
&& get_field(val
, HSTATUS_VSXL
) != 2) {
2191 qemu_log_mask(LOG_UNIMP
, "QEMU does not support mixed HSXLEN options.");
2193 if (get_field(val
, HSTATUS_VSBE
) != 0) {
2194 qemu_log_mask(LOG_UNIMP
, "QEMU does not support big endian guests.");
2196 return RISCV_EXCP_NONE
;
2199 static RISCVException
read_hedeleg(CPURISCVState
*env
, int csrno
,
2202 *val
= env
->hedeleg
;
2203 return RISCV_EXCP_NONE
;
2206 static RISCVException
write_hedeleg(CPURISCVState
*env
, int csrno
,
2209 env
->hedeleg
= val
& vs_delegable_excps
;
2210 return RISCV_EXCP_NONE
;
2213 static RISCVException
rmw_hideleg64(CPURISCVState
*env
, int csrno
,
2215 uint64_t new_val
, uint64_t wr_mask
)
2217 uint64_t mask
= wr_mask
& vs_delegable_ints
;
2220 *ret_val
= env
->hideleg
& vs_delegable_ints
;
2223 env
->hideleg
= (env
->hideleg
& ~mask
) | (new_val
& mask
);
2224 return RISCV_EXCP_NONE
;
2227 static RISCVException
rmw_hideleg(CPURISCVState
*env
, int csrno
,
2228 target_ulong
*ret_val
,
2229 target_ulong new_val
, target_ulong wr_mask
)
2234 ret
= rmw_hideleg64(env
, csrno
, &rval
, new_val
, wr_mask
);
2242 static RISCVException
rmw_hidelegh(CPURISCVState
*env
, int csrno
,
2243 target_ulong
*ret_val
,
2244 target_ulong new_val
, target_ulong wr_mask
)
2249 ret
= rmw_hideleg64(env
, csrno
, &rval
,
2250 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2252 *ret_val
= rval
>> 32;
2258 static RISCVException
rmw_hvip64(CPURISCVState
*env
, int csrno
,
2260 uint64_t new_val
, uint64_t wr_mask
)
2264 ret
= rmw_mip64(env
, csrno
, ret_val
, new_val
,
2265 wr_mask
& hvip_writable_mask
);
2267 *ret_val
&= VS_MODE_INTERRUPTS
;
2273 static RISCVException
rmw_hvip(CPURISCVState
*env
, int csrno
,
2274 target_ulong
*ret_val
,
2275 target_ulong new_val
, target_ulong wr_mask
)
2280 ret
= rmw_hvip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2288 static RISCVException
rmw_hviph(CPURISCVState
*env
, int csrno
,
2289 target_ulong
*ret_val
,
2290 target_ulong new_val
, target_ulong wr_mask
)
2295 ret
= rmw_hvip64(env
, csrno
, &rval
,
2296 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2298 *ret_val
= rval
>> 32;
2304 static RISCVException
rmw_hip(CPURISCVState
*env
, int csrno
,
2305 target_ulong
*ret_value
,
2306 target_ulong new_value
, target_ulong write_mask
)
2308 int ret
= rmw_mip(env
, csrno
, ret_value
, new_value
,
2309 write_mask
& hip_writable_mask
);
2312 *ret_value
&= HS_MODE_INTERRUPTS
;
2317 static RISCVException
rmw_hie(CPURISCVState
*env
, int csrno
,
2318 target_ulong
*ret_val
,
2319 target_ulong new_val
, target_ulong wr_mask
)
2324 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
& HS_MODE_INTERRUPTS
);
2326 *ret_val
= rval
& HS_MODE_INTERRUPTS
;
2332 static RISCVException
read_hcounteren(CPURISCVState
*env
, int csrno
,
2335 *val
= env
->hcounteren
;
2336 return RISCV_EXCP_NONE
;
2339 static RISCVException
write_hcounteren(CPURISCVState
*env
, int csrno
,
2342 env
->hcounteren
= val
;
2343 return RISCV_EXCP_NONE
;
2346 static RISCVException
read_hgeie(CPURISCVState
*env
, int csrno
,
2352 return RISCV_EXCP_NONE
;
2355 static RISCVException
write_hgeie(CPURISCVState
*env
, int csrno
,
2358 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2359 val
&= ((((target_ulong
)1) << env
->geilen
) - 1) << 1;
2361 /* Update mip.SGEIP bit */
2362 riscv_cpu_update_mip(env_archcpu(env
), MIP_SGEIP
,
2363 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
2364 return RISCV_EXCP_NONE
;
2367 static RISCVException
read_htval(CPURISCVState
*env
, int csrno
,
2371 return RISCV_EXCP_NONE
;
2374 static RISCVException
write_htval(CPURISCVState
*env
, int csrno
,
2378 return RISCV_EXCP_NONE
;
2381 static RISCVException
read_htinst(CPURISCVState
*env
, int csrno
,
2385 return RISCV_EXCP_NONE
;
2388 static RISCVException
write_htinst(CPURISCVState
*env
, int csrno
,
2391 return RISCV_EXCP_NONE
;
2394 static RISCVException
read_hgeip(CPURISCVState
*env
, int csrno
,
2400 return RISCV_EXCP_NONE
;
2403 static RISCVException
read_hgatp(CPURISCVState
*env
, int csrno
,
2407 return RISCV_EXCP_NONE
;
2410 static RISCVException
write_hgatp(CPURISCVState
*env
, int csrno
,
2414 return RISCV_EXCP_NONE
;
2417 static RISCVException
read_htimedelta(CPURISCVState
*env
, int csrno
,
2420 if (!env
->rdtime_fn
) {
2421 return RISCV_EXCP_ILLEGAL_INST
;
2424 *val
= env
->htimedelta
;
2425 return RISCV_EXCP_NONE
;
2428 static RISCVException
write_htimedelta(CPURISCVState
*env
, int csrno
,
2431 if (!env
->rdtime_fn
) {
2432 return RISCV_EXCP_ILLEGAL_INST
;
2435 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
2436 env
->htimedelta
= deposit64(env
->htimedelta
, 0, 32, (uint64_t)val
);
2438 env
->htimedelta
= val
;
2440 return RISCV_EXCP_NONE
;
2443 static RISCVException
read_htimedeltah(CPURISCVState
*env
, int csrno
,
2446 if (!env
->rdtime_fn
) {
2447 return RISCV_EXCP_ILLEGAL_INST
;
2450 *val
= env
->htimedelta
>> 32;
2451 return RISCV_EXCP_NONE
;
2454 static RISCVException
write_htimedeltah(CPURISCVState
*env
, int csrno
,
2457 if (!env
->rdtime_fn
) {
2458 return RISCV_EXCP_ILLEGAL_INST
;
2461 env
->htimedelta
= deposit64(env
->htimedelta
, 32, 32, (uint64_t)val
);
2462 return RISCV_EXCP_NONE
;
2465 static int read_hvictl(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2468 return RISCV_EXCP_NONE
;
2471 static int write_hvictl(CPURISCVState
*env
, int csrno
, target_ulong val
)
2473 env
->hvictl
= val
& HVICTL_VALID_MASK
;
2474 return RISCV_EXCP_NONE
;
2477 static int read_hvipriox(CPURISCVState
*env
, int first_index
,
2478 uint8_t *iprio
, target_ulong
*val
)
2480 int i
, irq
, rdzero
, num_irqs
= 4 * (riscv_cpu_mxl_bits(env
) / 32);
2482 /* First index has to be a multiple of number of irqs per register */
2483 if (first_index
% num_irqs
) {
2484 return (riscv_cpu_virt_enabled(env
)) ?
2485 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
2488 /* Fill-up return value */
2490 for (i
= 0; i
< num_irqs
; i
++) {
2491 if (riscv_cpu_hviprio_index2irq(first_index
+ i
, &irq
, &rdzero
)) {
2497 *val
|= ((target_ulong
)iprio
[irq
]) << (i
* 8);
2500 return RISCV_EXCP_NONE
;
2503 static int write_hvipriox(CPURISCVState
*env
, int first_index
,
2504 uint8_t *iprio
, target_ulong val
)
2506 int i
, irq
, rdzero
, num_irqs
= 4 * (riscv_cpu_mxl_bits(env
) / 32);
2508 /* First index has to be a multiple of number of irqs per register */
2509 if (first_index
% num_irqs
) {
2510 return (riscv_cpu_virt_enabled(env
)) ?
2511 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
2514 /* Fill-up priority arrary */
2515 for (i
= 0; i
< num_irqs
; i
++) {
2516 if (riscv_cpu_hviprio_index2irq(first_index
+ i
, &irq
, &rdzero
)) {
2522 iprio
[irq
] = (val
>> (i
* 8)) & 0xff;
2526 return RISCV_EXCP_NONE
;
2529 static int read_hviprio1(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2531 return read_hvipriox(env
, 0, env
->hviprio
, val
);
2534 static int write_hviprio1(CPURISCVState
*env
, int csrno
, target_ulong val
)
2536 return write_hvipriox(env
, 0, env
->hviprio
, val
);
2539 static int read_hviprio1h(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2541 return read_hvipriox(env
, 4, env
->hviprio
, val
);
2544 static int write_hviprio1h(CPURISCVState
*env
, int csrno
, target_ulong val
)
2546 return write_hvipriox(env
, 4, env
->hviprio
, val
);
2549 static int read_hviprio2(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2551 return read_hvipriox(env
, 8, env
->hviprio
, val
);
2554 static int write_hviprio2(CPURISCVState
*env
, int csrno
, target_ulong val
)
2556 return write_hvipriox(env
, 8, env
->hviprio
, val
);
2559 static int read_hviprio2h(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2561 return read_hvipriox(env
, 12, env
->hviprio
, val
);
2564 static int write_hviprio2h(CPURISCVState
*env
, int csrno
, target_ulong val
)
2566 return write_hvipriox(env
, 12, env
->hviprio
, val
);
2569 /* Virtual CSR Registers */
2570 static RISCVException
read_vsstatus(CPURISCVState
*env
, int csrno
,
2573 *val
= env
->vsstatus
;
2574 return RISCV_EXCP_NONE
;
2577 static RISCVException
write_vsstatus(CPURISCVState
*env
, int csrno
,
2580 uint64_t mask
= (target_ulong
)-1;
2581 if ((val
& VSSTATUS64_UXL
) == 0) {
2582 mask
&= ~VSSTATUS64_UXL
;
2584 env
->vsstatus
= (env
->vsstatus
& ~mask
) | (uint64_t)val
;
2585 return RISCV_EXCP_NONE
;
2588 static int read_vstvec(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2591 return RISCV_EXCP_NONE
;
2594 static RISCVException
write_vstvec(CPURISCVState
*env
, int csrno
,
2598 return RISCV_EXCP_NONE
;
2601 static RISCVException
read_vsscratch(CPURISCVState
*env
, int csrno
,
2604 *val
= env
->vsscratch
;
2605 return RISCV_EXCP_NONE
;
2608 static RISCVException
write_vsscratch(CPURISCVState
*env
, int csrno
,
2611 env
->vsscratch
= val
;
2612 return RISCV_EXCP_NONE
;
2615 static RISCVException
read_vsepc(CPURISCVState
*env
, int csrno
,
2619 return RISCV_EXCP_NONE
;
2622 static RISCVException
write_vsepc(CPURISCVState
*env
, int csrno
,
2626 return RISCV_EXCP_NONE
;
2629 static RISCVException
read_vscause(CPURISCVState
*env
, int csrno
,
2632 *val
= env
->vscause
;
2633 return RISCV_EXCP_NONE
;
2636 static RISCVException
write_vscause(CPURISCVState
*env
, int csrno
,
2640 return RISCV_EXCP_NONE
;
2643 static RISCVException
read_vstval(CPURISCVState
*env
, int csrno
,
2647 return RISCV_EXCP_NONE
;
2650 static RISCVException
write_vstval(CPURISCVState
*env
, int csrno
,
2654 return RISCV_EXCP_NONE
;
2657 static RISCVException
read_vsatp(CPURISCVState
*env
, int csrno
,
2661 return RISCV_EXCP_NONE
;
2664 static RISCVException
write_vsatp(CPURISCVState
*env
, int csrno
,
2668 return RISCV_EXCP_NONE
;
2671 static RISCVException
read_mtval2(CPURISCVState
*env
, int csrno
,
2675 return RISCV_EXCP_NONE
;
2678 static RISCVException
write_mtval2(CPURISCVState
*env
, int csrno
,
2682 return RISCV_EXCP_NONE
;
2685 static RISCVException
read_mtinst(CPURISCVState
*env
, int csrno
,
2689 return RISCV_EXCP_NONE
;
2692 static RISCVException
write_mtinst(CPURISCVState
*env
, int csrno
,
2696 return RISCV_EXCP_NONE
;
2699 /* Physical Memory Protection */
2700 static RISCVException
read_mseccfg(CPURISCVState
*env
, int csrno
,
2703 *val
= mseccfg_csr_read(env
);
2704 return RISCV_EXCP_NONE
;
2707 static RISCVException
write_mseccfg(CPURISCVState
*env
, int csrno
,
2710 mseccfg_csr_write(env
, val
);
2711 return RISCV_EXCP_NONE
;
2714 static bool check_pmp_reg_index(CPURISCVState
*env
, uint32_t reg_index
)
2716 /* TODO: RV128 restriction check */
2717 if ((reg_index
& 1) && (riscv_cpu_mxl(env
) == MXL_RV64
)) {
2723 static RISCVException
read_pmpcfg(CPURISCVState
*env
, int csrno
,
2726 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
2728 if (!check_pmp_reg_index(env
, reg_index
)) {
2729 return RISCV_EXCP_ILLEGAL_INST
;
2731 *val
= pmpcfg_csr_read(env
, csrno
- CSR_PMPCFG0
);
2732 return RISCV_EXCP_NONE
;
2735 static RISCVException
write_pmpcfg(CPURISCVState
*env
, int csrno
,
2738 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
2740 if (!check_pmp_reg_index(env
, reg_index
)) {
2741 return RISCV_EXCP_ILLEGAL_INST
;
2743 pmpcfg_csr_write(env
, csrno
- CSR_PMPCFG0
, val
);
2744 return RISCV_EXCP_NONE
;
2747 static RISCVException
read_pmpaddr(CPURISCVState
*env
, int csrno
,
2750 *val
= pmpaddr_csr_read(env
, csrno
- CSR_PMPADDR0
);
2751 return RISCV_EXCP_NONE
;
2754 static RISCVException
write_pmpaddr(CPURISCVState
*env
, int csrno
,
2757 pmpaddr_csr_write(env
, csrno
- CSR_PMPADDR0
, val
);
2758 return RISCV_EXCP_NONE
;
2761 static RISCVException
read_tselect(CPURISCVState
*env
, int csrno
,
2764 *val
= tselect_csr_read(env
);
2765 return RISCV_EXCP_NONE
;
2768 static RISCVException
write_tselect(CPURISCVState
*env
, int csrno
,
2771 tselect_csr_write(env
, val
);
2772 return RISCV_EXCP_NONE
;
2775 static RISCVException
read_tdata(CPURISCVState
*env
, int csrno
,
2778 /* return 0 in tdata1 to end the trigger enumeration */
2779 if (env
->trigger_cur
>= TRIGGER_NUM
&& csrno
== CSR_TDATA1
) {
2781 return RISCV_EXCP_NONE
;
2784 if (!tdata_available(env
, csrno
- CSR_TDATA1
)) {
2785 return RISCV_EXCP_ILLEGAL_INST
;
2788 *val
= tdata_csr_read(env
, csrno
- CSR_TDATA1
);
2789 return RISCV_EXCP_NONE
;
2792 static RISCVException
write_tdata(CPURISCVState
*env
, int csrno
,
2795 if (!tdata_available(env
, csrno
- CSR_TDATA1
)) {
2796 return RISCV_EXCP_ILLEGAL_INST
;
2799 tdata_csr_write(env
, csrno
- CSR_TDATA1
, val
);
2800 return RISCV_EXCP_NONE
;
2804 * Functions to access Pointer Masking feature registers
2805 * We have to check if current priv lvl could modify
2808 static bool check_pm_current_disabled(CPURISCVState
*env
, int csrno
)
2810 int csr_priv
= get_field(csrno
, 0x300);
2813 if (env
->debugger
) {
2817 * If priv lvls differ that means we're accessing csr from higher priv lvl,
2818 * so allow the access
2820 if (env
->priv
!= csr_priv
) {
2823 switch (env
->priv
) {
2825 pm_current
= get_field(env
->mmte
, M_PM_CURRENT
);
2828 pm_current
= get_field(env
->mmte
, S_PM_CURRENT
);
2831 pm_current
= get_field(env
->mmte
, U_PM_CURRENT
);
2834 g_assert_not_reached();
2836 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
2840 static RISCVException
read_mmte(CPURISCVState
*env
, int csrno
,
2843 *val
= env
->mmte
& MMTE_MASK
;
2844 return RISCV_EXCP_NONE
;
2847 static RISCVException
write_mmte(CPURISCVState
*env
, int csrno
,
2851 target_ulong wpri_val
= val
& MMTE_MASK
;
2853 if (val
!= wpri_val
) {
2854 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
2855 "MMTE: WPRI violation written 0x", val
,
2856 "vs expected 0x", wpri_val
);
2858 /* for machine mode pm.current is hardwired to 1 */
2859 wpri_val
|= MMTE_M_PM_CURRENT
;
2861 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
2862 wpri_val
&= ~(MMTE_M_PM_INSN
| MMTE_S_PM_INSN
| MMTE_U_PM_INSN
);
2863 env
->mmte
= wpri_val
| PM_EXT_DIRTY
;
2864 riscv_cpu_update_mask(env
);
2866 /* Set XS and SD bits, since PM CSRs are dirty */
2867 mstatus
= env
->mstatus
| MSTATUS_XS
;
2868 write_mstatus(env
, csrno
, mstatus
);
2869 return RISCV_EXCP_NONE
;
2872 static RISCVException
read_smte(CPURISCVState
*env
, int csrno
,
2875 *val
= env
->mmte
& SMTE_MASK
;
2876 return RISCV_EXCP_NONE
;
2879 static RISCVException
write_smte(CPURISCVState
*env
, int csrno
,
2882 target_ulong wpri_val
= val
& SMTE_MASK
;
2884 if (val
!= wpri_val
) {
2885 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
2886 "SMTE: WPRI violation written 0x", val
,
2887 "vs expected 0x", wpri_val
);
2890 /* if pm.current==0 we can't modify current PM CSRs */
2891 if (check_pm_current_disabled(env
, csrno
)) {
2892 return RISCV_EXCP_NONE
;
2895 wpri_val
|= (env
->mmte
& ~SMTE_MASK
);
2896 write_mmte(env
, csrno
, wpri_val
);
2897 return RISCV_EXCP_NONE
;
2900 static RISCVException
read_umte(CPURISCVState
*env
, int csrno
,
2903 *val
= env
->mmte
& UMTE_MASK
;
2904 return RISCV_EXCP_NONE
;
2907 static RISCVException
write_umte(CPURISCVState
*env
, int csrno
,
2910 target_ulong wpri_val
= val
& UMTE_MASK
;
2912 if (val
!= wpri_val
) {
2913 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
2914 "UMTE: WPRI violation written 0x", val
,
2915 "vs expected 0x", wpri_val
);
2918 if (check_pm_current_disabled(env
, csrno
)) {
2919 return RISCV_EXCP_NONE
;
2922 wpri_val
|= (env
->mmte
& ~UMTE_MASK
);
2923 write_mmte(env
, csrno
, wpri_val
);
2924 return RISCV_EXCP_NONE
;
2927 static RISCVException
read_mpmmask(CPURISCVState
*env
, int csrno
,
2930 *val
= env
->mpmmask
;
2931 return RISCV_EXCP_NONE
;
2934 static RISCVException
write_mpmmask(CPURISCVState
*env
, int csrno
,
2940 if ((env
->priv
== PRV_M
) && (env
->mmte
& M_PM_ENABLE
)) {
2941 env
->cur_pmmask
= val
;
2943 env
->mmte
|= PM_EXT_DIRTY
;
2945 /* Set XS and SD bits, since PM CSRs are dirty */
2946 mstatus
= env
->mstatus
| MSTATUS_XS
;
2947 write_mstatus(env
, csrno
, mstatus
);
2948 return RISCV_EXCP_NONE
;
2951 static RISCVException
read_spmmask(CPURISCVState
*env
, int csrno
,
2954 *val
= env
->spmmask
;
2955 return RISCV_EXCP_NONE
;
2958 static RISCVException
write_spmmask(CPURISCVState
*env
, int csrno
,
2963 /* if pm.current==0 we can't modify current PM CSRs */
2964 if (check_pm_current_disabled(env
, csrno
)) {
2965 return RISCV_EXCP_NONE
;
2968 if ((env
->priv
== PRV_S
) && (env
->mmte
& S_PM_ENABLE
)) {
2969 env
->cur_pmmask
= val
;
2971 env
->mmte
|= PM_EXT_DIRTY
;
2973 /* Set XS and SD bits, since PM CSRs are dirty */
2974 mstatus
= env
->mstatus
| MSTATUS_XS
;
2975 write_mstatus(env
, csrno
, mstatus
);
2976 return RISCV_EXCP_NONE
;
2979 static RISCVException
read_upmmask(CPURISCVState
*env
, int csrno
,
2982 *val
= env
->upmmask
;
2983 return RISCV_EXCP_NONE
;
2986 static RISCVException
write_upmmask(CPURISCVState
*env
, int csrno
,
2991 /* if pm.current==0 we can't modify current PM CSRs */
2992 if (check_pm_current_disabled(env
, csrno
)) {
2993 return RISCV_EXCP_NONE
;
2996 if ((env
->priv
== PRV_U
) && (env
->mmte
& U_PM_ENABLE
)) {
2997 env
->cur_pmmask
= val
;
2999 env
->mmte
|= PM_EXT_DIRTY
;
3001 /* Set XS and SD bits, since PM CSRs are dirty */
3002 mstatus
= env
->mstatus
| MSTATUS_XS
;
3003 write_mstatus(env
, csrno
, mstatus
);
3004 return RISCV_EXCP_NONE
;
3007 static RISCVException
read_mpmbase(CPURISCVState
*env
, int csrno
,
3010 *val
= env
->mpmbase
;
3011 return RISCV_EXCP_NONE
;
3014 static RISCVException
write_mpmbase(CPURISCVState
*env
, int csrno
,
3020 if ((env
->priv
== PRV_M
) && (env
->mmte
& M_PM_ENABLE
)) {
3021 env
->cur_pmbase
= val
;
3023 env
->mmte
|= PM_EXT_DIRTY
;
3025 /* Set XS and SD bits, since PM CSRs are dirty */
3026 mstatus
= env
->mstatus
| MSTATUS_XS
;
3027 write_mstatus(env
, csrno
, mstatus
);
3028 return RISCV_EXCP_NONE
;
3031 static RISCVException
read_spmbase(CPURISCVState
*env
, int csrno
,
3034 *val
= env
->spmbase
;
3035 return RISCV_EXCP_NONE
;
3038 static RISCVException
write_spmbase(CPURISCVState
*env
, int csrno
,
3043 /* if pm.current==0 we can't modify current PM CSRs */
3044 if (check_pm_current_disabled(env
, csrno
)) {
3045 return RISCV_EXCP_NONE
;
3048 if ((env
->priv
== PRV_S
) && (env
->mmte
& S_PM_ENABLE
)) {
3049 env
->cur_pmbase
= val
;
3051 env
->mmte
|= PM_EXT_DIRTY
;
3053 /* Set XS and SD bits, since PM CSRs are dirty */
3054 mstatus
= env
->mstatus
| MSTATUS_XS
;
3055 write_mstatus(env
, csrno
, mstatus
);
3056 return RISCV_EXCP_NONE
;
3059 static RISCVException
read_upmbase(CPURISCVState
*env
, int csrno
,
3062 *val
= env
->upmbase
;
3063 return RISCV_EXCP_NONE
;
3066 static RISCVException
write_upmbase(CPURISCVState
*env
, int csrno
,
3071 /* if pm.current==0 we can't modify current PM CSRs */
3072 if (check_pm_current_disabled(env
, csrno
)) {
3073 return RISCV_EXCP_NONE
;
3076 if ((env
->priv
== PRV_U
) && (env
->mmte
& U_PM_ENABLE
)) {
3077 env
->cur_pmbase
= val
;
3079 env
->mmte
|= PM_EXT_DIRTY
;
3081 /* Set XS and SD bits, since PM CSRs are dirty */
3082 mstatus
= env
->mstatus
| MSTATUS_XS
;
3083 write_mstatus(env
, csrno
, mstatus
);
3084 return RISCV_EXCP_NONE
;
3089 /* Crypto Extension */
3090 static RISCVException
rmw_seed(CPURISCVState
*env
, int csrno
,
3091 target_ulong
*ret_value
,
3092 target_ulong new_value
,
3093 target_ulong write_mask
)
3096 Error
*random_e
= NULL
;
3100 random_r
= qemu_guest_getrandom(&random_v
, 2, &random_e
);
3101 if (unlikely(random_r
< 0)) {
3103 * Failed, for unknown reasons in the crypto subsystem.
3104 * The best we can do is log the reason and return a
3105 * failure indication to the guest. There is no reason
3106 * we know to expect the failure to be transitory, so
3107 * indicate DEAD to avoid having the guest spin on WAIT.
3109 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
3110 __func__
, error_get_pretty(random_e
));
3111 error_free(random_e
);
3112 rval
= SEED_OPST_DEAD
;
3114 rval
= random_v
| SEED_OPST_ES16
;
3121 return RISCV_EXCP_NONE
;
3125 * riscv_csrrw - read and/or update control and status register
3127 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
3128 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
3129 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
3130 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
3133 static inline RISCVException
riscv_csrrw_check(CPURISCVState
*env
,
3138 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
3139 int read_only
= get_field(csrno
, 0xC00) == 3;
3140 int csr_min_priv
= csr_ops
[csrno
].min_priv_ver
;
3141 #if !defined(CONFIG_USER_ONLY)
3142 int csr_priv
, effective_priv
= env
->priv
;
3144 if (riscv_has_ext(env
, RVH
) && env
->priv
== PRV_S
) {
3146 * We are in either HS or VS mode.
3147 * Add 1 to the effective privledge level to allow us to access the
3148 * Hypervisor CSRs. The `hmode` predicate will determine if access
3149 * should be allowed(HS) or if a virtual instruction exception should be
3155 csr_priv
= get_field(csrno
, 0x300);
3156 if (!env
->debugger
&& (effective_priv
< csr_priv
)) {
3157 if (csr_priv
== (PRV_S
+ 1) && riscv_cpu_virt_enabled(env
)) {
3158 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
3160 return RISCV_EXCP_ILLEGAL_INST
;
3163 if (write_mask
&& read_only
) {
3164 return RISCV_EXCP_ILLEGAL_INST
;
3167 /* ensure the CSR extension is enabled. */
3168 if (!cpu
->cfg
.ext_icsr
) {
3169 return RISCV_EXCP_ILLEGAL_INST
;
3172 /* check predicate */
3173 if (!csr_ops
[csrno
].predicate
) {
3174 return RISCV_EXCP_ILLEGAL_INST
;
3177 if (env
->priv_ver
< csr_min_priv
) {
3178 return RISCV_EXCP_ILLEGAL_INST
;
3181 return csr_ops
[csrno
].predicate(env
, csrno
);
3184 static RISCVException
riscv_csrrw_do64(CPURISCVState
*env
, int csrno
,
3185 target_ulong
*ret_value
,
3186 target_ulong new_value
,
3187 target_ulong write_mask
)
3190 target_ulong old_value
;
3192 /* execute combined read/write operation if it exists */
3193 if (csr_ops
[csrno
].op
) {
3194 return csr_ops
[csrno
].op(env
, csrno
, ret_value
, new_value
, write_mask
);
3197 /* if no accessor exists then return failure */
3198 if (!csr_ops
[csrno
].read
) {
3199 return RISCV_EXCP_ILLEGAL_INST
;
3201 /* read old value */
3202 ret
= csr_ops
[csrno
].read(env
, csrno
, &old_value
);
3203 if (ret
!= RISCV_EXCP_NONE
) {
3207 /* write value if writable and write mask set, otherwise drop writes */
3209 new_value
= (old_value
& ~write_mask
) | (new_value
& write_mask
);
3210 if (csr_ops
[csrno
].write
) {
3211 ret
= csr_ops
[csrno
].write(env
, csrno
, new_value
);
3212 if (ret
!= RISCV_EXCP_NONE
) {
3218 /* return old value */
3220 *ret_value
= old_value
;
3223 return RISCV_EXCP_NONE
;
3226 RISCVException
riscv_csrrw(CPURISCVState
*env
, int csrno
,
3227 target_ulong
*ret_value
,
3228 target_ulong new_value
, target_ulong write_mask
)
3230 RISCVCPU
*cpu
= env_archcpu(env
);
3232 RISCVException ret
= riscv_csrrw_check(env
, csrno
, write_mask
, cpu
);
3233 if (ret
!= RISCV_EXCP_NONE
) {
3237 return riscv_csrrw_do64(env
, csrno
, ret_value
, new_value
, write_mask
);
3240 static RISCVException
riscv_csrrw_do128(CPURISCVState
*env
, int csrno
,
3248 /* read old value */
3249 ret
= csr_ops
[csrno
].read128(env
, csrno
, &old_value
);
3250 if (ret
!= RISCV_EXCP_NONE
) {
3254 /* write value if writable and write mask set, otherwise drop writes */
3255 if (int128_nz(write_mask
)) {
3256 new_value
= int128_or(int128_and(old_value
, int128_not(write_mask
)),
3257 int128_and(new_value
, write_mask
));
3258 if (csr_ops
[csrno
].write128
) {
3259 ret
= csr_ops
[csrno
].write128(env
, csrno
, new_value
);
3260 if (ret
!= RISCV_EXCP_NONE
) {
3263 } else if (csr_ops
[csrno
].write
) {
3264 /* avoids having to write wrappers for all registers */
3265 ret
= csr_ops
[csrno
].write(env
, csrno
, int128_getlo(new_value
));
3266 if (ret
!= RISCV_EXCP_NONE
) {
3272 /* return old value */
3274 *ret_value
= old_value
;
3277 return RISCV_EXCP_NONE
;
3280 RISCVException
riscv_csrrw_i128(CPURISCVState
*env
, int csrno
,
3282 Int128 new_value
, Int128 write_mask
)
3285 RISCVCPU
*cpu
= env_archcpu(env
);
3287 ret
= riscv_csrrw_check(env
, csrno
, int128_nz(write_mask
), cpu
);
3288 if (ret
!= RISCV_EXCP_NONE
) {
3292 if (csr_ops
[csrno
].read128
) {
3293 return riscv_csrrw_do128(env
, csrno
, ret_value
, new_value
, write_mask
);
3297 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3299 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3300 * significant), for those, this fallback is correctly handling the accesses
3302 target_ulong old_value
;
3303 ret
= riscv_csrrw_do64(env
, csrno
, &old_value
,
3304 int128_getlo(new_value
),
3305 int128_getlo(write_mask
));
3306 if (ret
== RISCV_EXCP_NONE
&& ret_value
) {
3307 *ret_value
= int128_make64(old_value
);
3313 * Debugger support. If not in user mode, set env->debugger before the
3314 * riscv_csrrw call and clear it after the call.
3316 RISCVException
riscv_csrrw_debug(CPURISCVState
*env
, int csrno
,
3317 target_ulong
*ret_value
,
3318 target_ulong new_value
,
3319 target_ulong write_mask
)
3322 #if !defined(CONFIG_USER_ONLY)
3323 env
->debugger
= true;
3325 ret
= riscv_csrrw(env
, csrno
, ret_value
, new_value
, write_mask
);
3326 #if !defined(CONFIG_USER_ONLY)
3327 env
->debugger
= false;
3332 /* Control and Status Register function table */
3333 riscv_csr_operations csr_ops
[CSR_TABLE_SIZE
] = {
3334 /* User Floating-Point CSRs */
3335 [CSR_FFLAGS
] = { "fflags", fs
, read_fflags
, write_fflags
},
3336 [CSR_FRM
] = { "frm", fs
, read_frm
, write_frm
},
3337 [CSR_FCSR
] = { "fcsr", fs
, read_fcsr
, write_fcsr
},
3339 [CSR_VSTART
] = { "vstart", vs
, read_vstart
, write_vstart
,
3340 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3341 [CSR_VXSAT
] = { "vxsat", vs
, read_vxsat
, write_vxsat
,
3342 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3343 [CSR_VXRM
] = { "vxrm", vs
, read_vxrm
, write_vxrm
,
3344 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3345 [CSR_VCSR
] = { "vcsr", vs
, read_vcsr
, write_vcsr
,
3346 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3347 [CSR_VL
] = { "vl", vs
, read_vl
,
3348 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3349 [CSR_VTYPE
] = { "vtype", vs
, read_vtype
,
3350 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3351 [CSR_VLENB
] = { "vlenb", vs
, read_vlenb
,
3352 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3353 /* User Timers and Counters */
3354 [CSR_CYCLE
] = { "cycle", ctr
, read_instret
},
3355 [CSR_INSTRET
] = { "instret", ctr
, read_instret
},
3356 [CSR_CYCLEH
] = { "cycleh", ctr32
, read_instreth
},
3357 [CSR_INSTRETH
] = { "instreth", ctr32
, read_instreth
},
3360 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3361 * rdtime callback is not provided by machine/platform emulation.
3363 [CSR_TIME
] = { "time", ctr
, read_time
},
3364 [CSR_TIMEH
] = { "timeh", ctr32
, read_timeh
},
3366 /* Crypto Extension */
3367 [CSR_SEED
] = { "seed", seed
, NULL
, NULL
, rmw_seed
},
3369 #if !defined(CONFIG_USER_ONLY)
3370 /* Machine Timers and Counters */
3371 [CSR_MCYCLE
] = { "mcycle", any
, read_instret
},
3372 [CSR_MINSTRET
] = { "minstret", any
, read_instret
},
3373 [CSR_MCYCLEH
] = { "mcycleh", any32
, read_instreth
},
3374 [CSR_MINSTRETH
] = { "minstreth", any32
, read_instreth
},
3376 /* Machine Information Registers */
3377 [CSR_MVENDORID
] = { "mvendorid", any
, read_mvendorid
},
3378 [CSR_MARCHID
] = { "marchid", any
, read_marchid
},
3379 [CSR_MIMPID
] = { "mimpid", any
, read_mimpid
},
3380 [CSR_MHARTID
] = { "mhartid", any
, read_mhartid
},
3382 [CSR_MCONFIGPTR
] = { "mconfigptr", any
, read_zero
,
3383 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3384 /* Machine Trap Setup */
3385 [CSR_MSTATUS
] = { "mstatus", any
, read_mstatus
, write_mstatus
, NULL
,
3386 read_mstatus_i128
},
3387 [CSR_MISA
] = { "misa", any
, read_misa
, write_misa
, NULL
,
3389 [CSR_MIDELEG
] = { "mideleg", any
, NULL
, NULL
, rmw_mideleg
},
3390 [CSR_MEDELEG
] = { "medeleg", any
, read_medeleg
, write_medeleg
},
3391 [CSR_MIE
] = { "mie", any
, NULL
, NULL
, rmw_mie
},
3392 [CSR_MTVEC
] = { "mtvec", any
, read_mtvec
, write_mtvec
},
3393 [CSR_MCOUNTEREN
] = { "mcounteren", any
, read_mcounteren
, write_mcounteren
},
3395 [CSR_MSTATUSH
] = { "mstatush", any32
, read_mstatush
, write_mstatush
},
3397 /* Machine Trap Handling */
3398 [CSR_MSCRATCH
] = { "mscratch", any
, read_mscratch
, write_mscratch
, NULL
,
3399 read_mscratch_i128
, write_mscratch_i128
},
3400 [CSR_MEPC
] = { "mepc", any
, read_mepc
, write_mepc
},
3401 [CSR_MCAUSE
] = { "mcause", any
, read_mcause
, write_mcause
},
3402 [CSR_MTVAL
] = { "mtval", any
, read_mtval
, write_mtval
},
3403 [CSR_MIP
] = { "mip", any
, NULL
, NULL
, rmw_mip
},
3405 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
3406 [CSR_MISELECT
] = { "miselect", aia_any
, NULL
, NULL
, rmw_xiselect
},
3407 [CSR_MIREG
] = { "mireg", aia_any
, NULL
, NULL
, rmw_xireg
},
3409 /* Machine-Level Interrupts (AIA) */
3410 [CSR_MTOPI
] = { "mtopi", aia_any
, read_mtopi
},
3412 /* Machine-Level IMSIC Interface (AIA) */
3413 [CSR_MSETEIPNUM
] = { "mseteipnum", aia_any
, NULL
, NULL
, rmw_xsetclreinum
},
3414 [CSR_MCLREIPNUM
] = { "mclreipnum", aia_any
, NULL
, NULL
, rmw_xsetclreinum
},
3415 [CSR_MSETEIENUM
] = { "mseteienum", aia_any
, NULL
, NULL
, rmw_xsetclreinum
},
3416 [CSR_MCLREIENUM
] = { "mclreienum", aia_any
, NULL
, NULL
, rmw_xsetclreinum
},
3417 [CSR_MTOPEI
] = { "mtopei", aia_any
, NULL
, NULL
, rmw_xtopei
},
3419 /* Virtual Interrupts for Supervisor Level (AIA) */
3420 [CSR_MVIEN
] = { "mvien", aia_any
, read_zero
, write_ignore
},
3421 [CSR_MVIP
] = { "mvip", aia_any
, read_zero
, write_ignore
},
3423 /* Machine-Level High-Half CSRs (AIA) */
3424 [CSR_MIDELEGH
] = { "midelegh", aia_any32
, NULL
, NULL
, rmw_midelegh
},
3425 [CSR_MIEH
] = { "mieh", aia_any32
, NULL
, NULL
, rmw_mieh
},
3426 [CSR_MVIENH
] = { "mvienh", aia_any32
, read_zero
, write_ignore
},
3427 [CSR_MVIPH
] = { "mviph", aia_any32
, read_zero
, write_ignore
},
3428 [CSR_MIPH
] = { "miph", aia_any32
, NULL
, NULL
, rmw_miph
},
3430 /* Execution environment configuration */
3431 [CSR_MENVCFG
] = { "menvcfg", any
, read_menvcfg
, write_menvcfg
,
3432 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3433 [CSR_MENVCFGH
] = { "menvcfgh", any32
, read_menvcfgh
, write_menvcfgh
,
3434 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3435 [CSR_SENVCFG
] = { "senvcfg", smode
, read_senvcfg
, write_senvcfg
,
3436 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3437 [CSR_HENVCFG
] = { "henvcfg", hmode
, read_henvcfg
, write_henvcfg
,
3438 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3439 [CSR_HENVCFGH
] = { "henvcfgh", hmode32
, read_henvcfgh
, write_henvcfgh
,
3440 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3442 /* Supervisor Trap Setup */
3443 [CSR_SSTATUS
] = { "sstatus", smode
, read_sstatus
, write_sstatus
, NULL
,
3444 read_sstatus_i128
},
3445 [CSR_SIE
] = { "sie", smode
, NULL
, NULL
, rmw_sie
},
3446 [CSR_STVEC
] = { "stvec", smode
, read_stvec
, write_stvec
},
3447 [CSR_SCOUNTEREN
] = { "scounteren", smode
, read_scounteren
, write_scounteren
},
3449 /* Supervisor Trap Handling */
3450 [CSR_SSCRATCH
] = { "sscratch", smode
, read_sscratch
, write_sscratch
, NULL
,
3451 read_sscratch_i128
, write_sscratch_i128
},
3452 [CSR_SEPC
] = { "sepc", smode
, read_sepc
, write_sepc
},
3453 [CSR_SCAUSE
] = { "scause", smode
, read_scause
, write_scause
},
3454 [CSR_STVAL
] = { "stval", smode
, read_stval
, write_stval
},
3455 [CSR_SIP
] = { "sip", smode
, NULL
, NULL
, rmw_sip
},
3457 /* Supervisor Protection and Translation */
3458 [CSR_SATP
] = { "satp", smode
, read_satp
, write_satp
},
3460 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
3461 [CSR_SISELECT
] = { "siselect", aia_smode
, NULL
, NULL
, rmw_xiselect
},
3462 [CSR_SIREG
] = { "sireg", aia_smode
, NULL
, NULL
, rmw_xireg
},
3464 /* Supervisor-Level Interrupts (AIA) */
3465 [CSR_STOPI
] = { "stopi", aia_smode
, read_stopi
},
3467 /* Supervisor-Level IMSIC Interface (AIA) */
3468 [CSR_SSETEIPNUM
] = { "sseteipnum", aia_smode
, NULL
, NULL
, rmw_xsetclreinum
},
3469 [CSR_SCLREIPNUM
] = { "sclreipnum", aia_smode
, NULL
, NULL
, rmw_xsetclreinum
},
3470 [CSR_SSETEIENUM
] = { "sseteienum", aia_smode
, NULL
, NULL
, rmw_xsetclreinum
},
3471 [CSR_SCLREIENUM
] = { "sclreienum", aia_smode
, NULL
, NULL
, rmw_xsetclreinum
},
3472 [CSR_STOPEI
] = { "stopei", aia_smode
, NULL
, NULL
, rmw_xtopei
},
3474 /* Supervisor-Level High-Half CSRs (AIA) */
3475 [CSR_SIEH
] = { "sieh", aia_smode32
, NULL
, NULL
, rmw_sieh
},
3476 [CSR_SIPH
] = { "siph", aia_smode32
, NULL
, NULL
, rmw_siph
},
3478 [CSR_HSTATUS
] = { "hstatus", hmode
, read_hstatus
, write_hstatus
,
3479 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3480 [CSR_HEDELEG
] = { "hedeleg", hmode
, read_hedeleg
, write_hedeleg
,
3481 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3482 [CSR_HIDELEG
] = { "hideleg", hmode
, NULL
, NULL
, rmw_hideleg
,
3483 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3484 [CSR_HVIP
] = { "hvip", hmode
, NULL
, NULL
, rmw_hvip
,
3485 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3486 [CSR_HIP
] = { "hip", hmode
, NULL
, NULL
, rmw_hip
,
3487 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3488 [CSR_HIE
] = { "hie", hmode
, NULL
, NULL
, rmw_hie
,
3489 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3490 [CSR_HCOUNTEREN
] = { "hcounteren", hmode
, read_hcounteren
, write_hcounteren
,
3491 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3492 [CSR_HGEIE
] = { "hgeie", hmode
, read_hgeie
, write_hgeie
,
3493 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3494 [CSR_HTVAL
] = { "htval", hmode
, read_htval
, write_htval
,
3495 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3496 [CSR_HTINST
] = { "htinst", hmode
, read_htinst
, write_htinst
,
3497 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3498 [CSR_HGEIP
] = { "hgeip", hmode
, read_hgeip
,
3499 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3500 [CSR_HGATP
] = { "hgatp", hmode
, read_hgatp
, write_hgatp
,
3501 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3502 [CSR_HTIMEDELTA
] = { "htimedelta", hmode
, read_htimedelta
, write_htimedelta
,
3503 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3504 [CSR_HTIMEDELTAH
] = { "htimedeltah", hmode32
, read_htimedeltah
, write_htimedeltah
,
3505 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3507 [CSR_VSSTATUS
] = { "vsstatus", hmode
, read_vsstatus
, write_vsstatus
,
3508 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3509 [CSR_VSIP
] = { "vsip", hmode
, NULL
, NULL
, rmw_vsip
,
3510 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3511 [CSR_VSIE
] = { "vsie", hmode
, NULL
, NULL
, rmw_vsie
,
3512 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3513 [CSR_VSTVEC
] = { "vstvec", hmode
, read_vstvec
, write_vstvec
,
3514 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3515 [CSR_VSSCRATCH
] = { "vsscratch", hmode
, read_vsscratch
, write_vsscratch
,
3516 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3517 [CSR_VSEPC
] = { "vsepc", hmode
, read_vsepc
, write_vsepc
,
3518 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3519 [CSR_VSCAUSE
] = { "vscause", hmode
, read_vscause
, write_vscause
,
3520 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3521 [CSR_VSTVAL
] = { "vstval", hmode
, read_vstval
, write_vstval
,
3522 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3523 [CSR_VSATP
] = { "vsatp", hmode
, read_vsatp
, write_vsatp
,
3524 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3526 [CSR_MTVAL2
] = { "mtval2", hmode
, read_mtval2
, write_mtval2
,
3527 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3528 [CSR_MTINST
] = { "mtinst", hmode
, read_mtinst
, write_mtinst
,
3529 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3531 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
3532 [CSR_HVIEN
] = { "hvien", aia_hmode
, read_zero
, write_ignore
},
3533 [CSR_HVICTL
] = { "hvictl", aia_hmode
, read_hvictl
, write_hvictl
},
3534 [CSR_HVIPRIO1
] = { "hviprio1", aia_hmode
, read_hviprio1
, write_hviprio1
},
3535 [CSR_HVIPRIO2
] = { "hviprio2", aia_hmode
, read_hviprio2
, write_hviprio2
},
3538 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
3540 [CSR_VSISELECT
] = { "vsiselect", aia_hmode
, NULL
, NULL
, rmw_xiselect
},
3541 [CSR_VSIREG
] = { "vsireg", aia_hmode
, NULL
, NULL
, rmw_xireg
},
3543 /* VS-Level Interrupts (H-extension with AIA) */
3544 [CSR_VSTOPI
] = { "vstopi", aia_hmode
, read_vstopi
},
3546 /* VS-Level IMSIC Interface (H-extension with AIA) */
3547 [CSR_VSSETEIPNUM
] = { "vsseteipnum", aia_hmode
, NULL
, NULL
, rmw_xsetclreinum
},
3548 [CSR_VSCLREIPNUM
] = { "vsclreipnum", aia_hmode
, NULL
, NULL
, rmw_xsetclreinum
},
3549 [CSR_VSSETEIENUM
] = { "vsseteienum", aia_hmode
, NULL
, NULL
, rmw_xsetclreinum
},
3550 [CSR_VSCLREIENUM
] = { "vsclreienum", aia_hmode
, NULL
, NULL
, rmw_xsetclreinum
},
3551 [CSR_VSTOPEI
] = { "vstopei", aia_hmode
, NULL
, NULL
, rmw_xtopei
},
3553 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
3554 [CSR_HIDELEGH
] = { "hidelegh", aia_hmode32
, NULL
, NULL
, rmw_hidelegh
},
3555 [CSR_HVIENH
] = { "hvienh", aia_hmode32
, read_zero
, write_ignore
},
3556 [CSR_HVIPH
] = { "hviph", aia_hmode32
, NULL
, NULL
, rmw_hviph
},
3557 [CSR_HVIPRIO1H
] = { "hviprio1h", aia_hmode32
, read_hviprio1h
, write_hviprio1h
},
3558 [CSR_HVIPRIO2H
] = { "hviprio2h", aia_hmode32
, read_hviprio2h
, write_hviprio2h
},
3559 [CSR_VSIEH
] = { "vsieh", aia_hmode32
, NULL
, NULL
, rmw_vsieh
},
3560 [CSR_VSIPH
] = { "vsiph", aia_hmode32
, NULL
, NULL
, rmw_vsiph
},
3562 /* Physical Memory Protection */
3563 [CSR_MSECCFG
] = { "mseccfg", epmp
, read_mseccfg
, write_mseccfg
,
3564 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3565 [CSR_PMPCFG0
] = { "pmpcfg0", pmp
, read_pmpcfg
, write_pmpcfg
},
3566 [CSR_PMPCFG1
] = { "pmpcfg1", pmp
, read_pmpcfg
, write_pmpcfg
},
3567 [CSR_PMPCFG2
] = { "pmpcfg2", pmp
, read_pmpcfg
, write_pmpcfg
},
3568 [CSR_PMPCFG3
] = { "pmpcfg3", pmp
, read_pmpcfg
, write_pmpcfg
},
3569 [CSR_PMPADDR0
] = { "pmpaddr0", pmp
, read_pmpaddr
, write_pmpaddr
},
3570 [CSR_PMPADDR1
] = { "pmpaddr1", pmp
, read_pmpaddr
, write_pmpaddr
},
3571 [CSR_PMPADDR2
] = { "pmpaddr2", pmp
, read_pmpaddr
, write_pmpaddr
},
3572 [CSR_PMPADDR3
] = { "pmpaddr3", pmp
, read_pmpaddr
, write_pmpaddr
},
3573 [CSR_PMPADDR4
] = { "pmpaddr4", pmp
, read_pmpaddr
, write_pmpaddr
},
3574 [CSR_PMPADDR5
] = { "pmpaddr5", pmp
, read_pmpaddr
, write_pmpaddr
},
3575 [CSR_PMPADDR6
] = { "pmpaddr6", pmp
, read_pmpaddr
, write_pmpaddr
},
3576 [CSR_PMPADDR7
] = { "pmpaddr7", pmp
, read_pmpaddr
, write_pmpaddr
},
3577 [CSR_PMPADDR8
] = { "pmpaddr8", pmp
, read_pmpaddr
, write_pmpaddr
},
3578 [CSR_PMPADDR9
] = { "pmpaddr9", pmp
, read_pmpaddr
, write_pmpaddr
},
3579 [CSR_PMPADDR10
] = { "pmpaddr10", pmp
, read_pmpaddr
, write_pmpaddr
},
3580 [CSR_PMPADDR11
] = { "pmpaddr11", pmp
, read_pmpaddr
, write_pmpaddr
},
3581 [CSR_PMPADDR12
] = { "pmpaddr12", pmp
, read_pmpaddr
, write_pmpaddr
},
3582 [CSR_PMPADDR13
] = { "pmpaddr13", pmp
, read_pmpaddr
, write_pmpaddr
},
3583 [CSR_PMPADDR14
] = { "pmpaddr14", pmp
, read_pmpaddr
, write_pmpaddr
},
3584 [CSR_PMPADDR15
] = { "pmpaddr15", pmp
, read_pmpaddr
, write_pmpaddr
},
3587 [CSR_TSELECT
] = { "tselect", debug
, read_tselect
, write_tselect
},
3588 [CSR_TDATA1
] = { "tdata1", debug
, read_tdata
, write_tdata
},
3589 [CSR_TDATA2
] = { "tdata2", debug
, read_tdata
, write_tdata
},
3590 [CSR_TDATA3
] = { "tdata3", debug
, read_tdata
, write_tdata
},
3592 /* User Pointer Masking */
3593 [CSR_UMTE
] = { "umte", pointer_masking
, read_umte
, write_umte
},
3594 [CSR_UPMMASK
] = { "upmmask", pointer_masking
, read_upmmask
, write_upmmask
},
3595 [CSR_UPMBASE
] = { "upmbase", pointer_masking
, read_upmbase
, write_upmbase
},
3596 /* Machine Pointer Masking */
3597 [CSR_MMTE
] = { "mmte", pointer_masking
, read_mmte
, write_mmte
},
3598 [CSR_MPMMASK
] = { "mpmmask", pointer_masking
, read_mpmmask
, write_mpmmask
},
3599 [CSR_MPMBASE
] = { "mpmbase", pointer_masking
, read_mpmbase
, write_mpmbase
},
3600 /* Supervisor Pointer Masking */
3601 [CSR_SMTE
] = { "smte", pointer_masking
, read_smte
, write_smte
},
3602 [CSR_SPMMASK
] = { "spmmask", pointer_masking
, read_spmmask
, write_spmmask
},
3603 [CSR_SPMBASE
] = { "spmbase", pointer_masking
, read_spmbase
, write_spmbase
},
3605 /* Performance Counters */
3606 [CSR_HPMCOUNTER3
] = { "hpmcounter3", ctr
, read_zero
},
3607 [CSR_HPMCOUNTER4
] = { "hpmcounter4", ctr
, read_zero
},
3608 [CSR_HPMCOUNTER5
] = { "hpmcounter5", ctr
, read_zero
},
3609 [CSR_HPMCOUNTER6
] = { "hpmcounter6", ctr
, read_zero
},
3610 [CSR_HPMCOUNTER7
] = { "hpmcounter7", ctr
, read_zero
},
3611 [CSR_HPMCOUNTER8
] = { "hpmcounter8", ctr
, read_zero
},
3612 [CSR_HPMCOUNTER9
] = { "hpmcounter9", ctr
, read_zero
},
3613 [CSR_HPMCOUNTER10
] = { "hpmcounter10", ctr
, read_zero
},
3614 [CSR_HPMCOUNTER11
] = { "hpmcounter11", ctr
, read_zero
},
3615 [CSR_HPMCOUNTER12
] = { "hpmcounter12", ctr
, read_zero
},
3616 [CSR_HPMCOUNTER13
] = { "hpmcounter13", ctr
, read_zero
},
3617 [CSR_HPMCOUNTER14
] = { "hpmcounter14", ctr
, read_zero
},
3618 [CSR_HPMCOUNTER15
] = { "hpmcounter15", ctr
, read_zero
},
3619 [CSR_HPMCOUNTER16
] = { "hpmcounter16", ctr
, read_zero
},
3620 [CSR_HPMCOUNTER17
] = { "hpmcounter17", ctr
, read_zero
},
3621 [CSR_HPMCOUNTER18
] = { "hpmcounter18", ctr
, read_zero
},
3622 [CSR_HPMCOUNTER19
] = { "hpmcounter19", ctr
, read_zero
},
3623 [CSR_HPMCOUNTER20
] = { "hpmcounter20", ctr
, read_zero
},
3624 [CSR_HPMCOUNTER21
] = { "hpmcounter21", ctr
, read_zero
},
3625 [CSR_HPMCOUNTER22
] = { "hpmcounter22", ctr
, read_zero
},
3626 [CSR_HPMCOUNTER23
] = { "hpmcounter23", ctr
, read_zero
},
3627 [CSR_HPMCOUNTER24
] = { "hpmcounter24", ctr
, read_zero
},
3628 [CSR_HPMCOUNTER25
] = { "hpmcounter25", ctr
, read_zero
},
3629 [CSR_HPMCOUNTER26
] = { "hpmcounter26", ctr
, read_zero
},
3630 [CSR_HPMCOUNTER27
] = { "hpmcounter27", ctr
, read_zero
},
3631 [CSR_HPMCOUNTER28
] = { "hpmcounter28", ctr
, read_zero
},
3632 [CSR_HPMCOUNTER29
] = { "hpmcounter29", ctr
, read_zero
},
3633 [CSR_HPMCOUNTER30
] = { "hpmcounter30", ctr
, read_zero
},
3634 [CSR_HPMCOUNTER31
] = { "hpmcounter31", ctr
, read_zero
},
3636 [CSR_MHPMCOUNTER3
] = { "mhpmcounter3", any
, read_zero
},
3637 [CSR_MHPMCOUNTER4
] = { "mhpmcounter4", any
, read_zero
},
3638 [CSR_MHPMCOUNTER5
] = { "mhpmcounter5", any
, read_zero
},
3639 [CSR_MHPMCOUNTER6
] = { "mhpmcounter6", any
, read_zero
},
3640 [CSR_MHPMCOUNTER7
] = { "mhpmcounter7", any
, read_zero
},
3641 [CSR_MHPMCOUNTER8
] = { "mhpmcounter8", any
, read_zero
},
3642 [CSR_MHPMCOUNTER9
] = { "mhpmcounter9", any
, read_zero
},
3643 [CSR_MHPMCOUNTER10
] = { "mhpmcounter10", any
, read_zero
},
3644 [CSR_MHPMCOUNTER11
] = { "mhpmcounter11", any
, read_zero
},
3645 [CSR_MHPMCOUNTER12
] = { "mhpmcounter12", any
, read_zero
},
3646 [CSR_MHPMCOUNTER13
] = { "mhpmcounter13", any
, read_zero
},
3647 [CSR_MHPMCOUNTER14
] = { "mhpmcounter14", any
, read_zero
},
3648 [CSR_MHPMCOUNTER15
] = { "mhpmcounter15", any
, read_zero
},
3649 [CSR_MHPMCOUNTER16
] = { "mhpmcounter16", any
, read_zero
},
3650 [CSR_MHPMCOUNTER17
] = { "mhpmcounter17", any
, read_zero
},
3651 [CSR_MHPMCOUNTER18
] = { "mhpmcounter18", any
, read_zero
},
3652 [CSR_MHPMCOUNTER19
] = { "mhpmcounter19", any
, read_zero
},
3653 [CSR_MHPMCOUNTER20
] = { "mhpmcounter20", any
, read_zero
},
3654 [CSR_MHPMCOUNTER21
] = { "mhpmcounter21", any
, read_zero
},
3655 [CSR_MHPMCOUNTER22
] = { "mhpmcounter22", any
, read_zero
},
3656 [CSR_MHPMCOUNTER23
] = { "mhpmcounter23", any
, read_zero
},
3657 [CSR_MHPMCOUNTER24
] = { "mhpmcounter24", any
, read_zero
},
3658 [CSR_MHPMCOUNTER25
] = { "mhpmcounter25", any
, read_zero
},
3659 [CSR_MHPMCOUNTER26
] = { "mhpmcounter26", any
, read_zero
},
3660 [CSR_MHPMCOUNTER27
] = { "mhpmcounter27", any
, read_zero
},
3661 [CSR_MHPMCOUNTER28
] = { "mhpmcounter28", any
, read_zero
},
3662 [CSR_MHPMCOUNTER29
] = { "mhpmcounter29", any
, read_zero
},
3663 [CSR_MHPMCOUNTER30
] = { "mhpmcounter30", any
, read_zero
},
3664 [CSR_MHPMCOUNTER31
] = { "mhpmcounter31", any
, read_zero
},
3666 [CSR_MHPMEVENT3
] = { "mhpmevent3", any
, read_zero
},
3667 [CSR_MHPMEVENT4
] = { "mhpmevent4", any
, read_zero
},
3668 [CSR_MHPMEVENT5
] = { "mhpmevent5", any
, read_zero
},
3669 [CSR_MHPMEVENT6
] = { "mhpmevent6", any
, read_zero
},
3670 [CSR_MHPMEVENT7
] = { "mhpmevent7", any
, read_zero
},
3671 [CSR_MHPMEVENT8
] = { "mhpmevent8", any
, read_zero
},
3672 [CSR_MHPMEVENT9
] = { "mhpmevent9", any
, read_zero
},
3673 [CSR_MHPMEVENT10
] = { "mhpmevent10", any
, read_zero
},
3674 [CSR_MHPMEVENT11
] = { "mhpmevent11", any
, read_zero
},
3675 [CSR_MHPMEVENT12
] = { "mhpmevent12", any
, read_zero
},
3676 [CSR_MHPMEVENT13
] = { "mhpmevent13", any
, read_zero
},
3677 [CSR_MHPMEVENT14
] = { "mhpmevent14", any
, read_zero
},
3678 [CSR_MHPMEVENT15
] = { "mhpmevent15", any
, read_zero
},
3679 [CSR_MHPMEVENT16
] = { "mhpmevent16", any
, read_zero
},
3680 [CSR_MHPMEVENT17
] = { "mhpmevent17", any
, read_zero
},
3681 [CSR_MHPMEVENT18
] = { "mhpmevent18", any
, read_zero
},
3682 [CSR_MHPMEVENT19
] = { "mhpmevent19", any
, read_zero
},
3683 [CSR_MHPMEVENT20
] = { "mhpmevent20", any
, read_zero
},
3684 [CSR_MHPMEVENT21
] = { "mhpmevent21", any
, read_zero
},
3685 [CSR_MHPMEVENT22
] = { "mhpmevent22", any
, read_zero
},
3686 [CSR_MHPMEVENT23
] = { "mhpmevent23", any
, read_zero
},
3687 [CSR_MHPMEVENT24
] = { "mhpmevent24", any
, read_zero
},
3688 [CSR_MHPMEVENT25
] = { "mhpmevent25", any
, read_zero
},
3689 [CSR_MHPMEVENT26
] = { "mhpmevent26", any
, read_zero
},
3690 [CSR_MHPMEVENT27
] = { "mhpmevent27", any
, read_zero
},
3691 [CSR_MHPMEVENT28
] = { "mhpmevent28", any
, read_zero
},
3692 [CSR_MHPMEVENT29
] = { "mhpmevent29", any
, read_zero
},
3693 [CSR_MHPMEVENT30
] = { "mhpmevent30", any
, read_zero
},
3694 [CSR_MHPMEVENT31
] = { "mhpmevent31", any
, read_zero
},
3696 [CSR_HPMCOUNTER3H
] = { "hpmcounter3h", ctr32
, read_zero
},
3697 [CSR_HPMCOUNTER4H
] = { "hpmcounter4h", ctr32
, read_zero
},
3698 [CSR_HPMCOUNTER5H
] = { "hpmcounter5h", ctr32
, read_zero
},
3699 [CSR_HPMCOUNTER6H
] = { "hpmcounter6h", ctr32
, read_zero
},
3700 [CSR_HPMCOUNTER7H
] = { "hpmcounter7h", ctr32
, read_zero
},
3701 [CSR_HPMCOUNTER8H
] = { "hpmcounter8h", ctr32
, read_zero
},
3702 [CSR_HPMCOUNTER9H
] = { "hpmcounter9h", ctr32
, read_zero
},
3703 [CSR_HPMCOUNTER10H
] = { "hpmcounter10h", ctr32
, read_zero
},
3704 [CSR_HPMCOUNTER11H
] = { "hpmcounter11h", ctr32
, read_zero
},
3705 [CSR_HPMCOUNTER12H
] = { "hpmcounter12h", ctr32
, read_zero
},
3706 [CSR_HPMCOUNTER13H
] = { "hpmcounter13h", ctr32
, read_zero
},
3707 [CSR_HPMCOUNTER14H
] = { "hpmcounter14h", ctr32
, read_zero
},
3708 [CSR_HPMCOUNTER15H
] = { "hpmcounter15h", ctr32
, read_zero
},
3709 [CSR_HPMCOUNTER16H
] = { "hpmcounter16h", ctr32
, read_zero
},
3710 [CSR_HPMCOUNTER17H
] = { "hpmcounter17h", ctr32
, read_zero
},
3711 [CSR_HPMCOUNTER18H
] = { "hpmcounter18h", ctr32
, read_zero
},
3712 [CSR_HPMCOUNTER19H
] = { "hpmcounter19h", ctr32
, read_zero
},
3713 [CSR_HPMCOUNTER20H
] = { "hpmcounter20h", ctr32
, read_zero
},
3714 [CSR_HPMCOUNTER21H
] = { "hpmcounter21h", ctr32
, read_zero
},
3715 [CSR_HPMCOUNTER22H
] = { "hpmcounter22h", ctr32
, read_zero
},
3716 [CSR_HPMCOUNTER23H
] = { "hpmcounter23h", ctr32
, read_zero
},
3717 [CSR_HPMCOUNTER24H
] = { "hpmcounter24h", ctr32
, read_zero
},
3718 [CSR_HPMCOUNTER25H
] = { "hpmcounter25h", ctr32
, read_zero
},
3719 [CSR_HPMCOUNTER26H
] = { "hpmcounter26h", ctr32
, read_zero
},
3720 [CSR_HPMCOUNTER27H
] = { "hpmcounter27h", ctr32
, read_zero
},
3721 [CSR_HPMCOUNTER28H
] = { "hpmcounter28h", ctr32
, read_zero
},
3722 [CSR_HPMCOUNTER29H
] = { "hpmcounter29h", ctr32
, read_zero
},
3723 [CSR_HPMCOUNTER30H
] = { "hpmcounter30h", ctr32
, read_zero
},
3724 [CSR_HPMCOUNTER31H
] = { "hpmcounter31h", ctr32
, read_zero
},
3726 [CSR_MHPMCOUNTER3H
] = { "mhpmcounter3h", any32
, read_zero
},
3727 [CSR_MHPMCOUNTER4H
] = { "mhpmcounter4h", any32
, read_zero
},
3728 [CSR_MHPMCOUNTER5H
] = { "mhpmcounter5h", any32
, read_zero
},
3729 [CSR_MHPMCOUNTER6H
] = { "mhpmcounter6h", any32
, read_zero
},
3730 [CSR_MHPMCOUNTER7H
] = { "mhpmcounter7h", any32
, read_zero
},
3731 [CSR_MHPMCOUNTER8H
] = { "mhpmcounter8h", any32
, read_zero
},
3732 [CSR_MHPMCOUNTER9H
] = { "mhpmcounter9h", any32
, read_zero
},
3733 [CSR_MHPMCOUNTER10H
] = { "mhpmcounter10h", any32
, read_zero
},
3734 [CSR_MHPMCOUNTER11H
] = { "mhpmcounter11h", any32
, read_zero
},
3735 [CSR_MHPMCOUNTER12H
] = { "mhpmcounter12h", any32
, read_zero
},
3736 [CSR_MHPMCOUNTER13H
] = { "mhpmcounter13h", any32
, read_zero
},
3737 [CSR_MHPMCOUNTER14H
] = { "mhpmcounter14h", any32
, read_zero
},
3738 [CSR_MHPMCOUNTER15H
] = { "mhpmcounter15h", any32
, read_zero
},
3739 [CSR_MHPMCOUNTER16H
] = { "mhpmcounter16h", any32
, read_zero
},
3740 [CSR_MHPMCOUNTER17H
] = { "mhpmcounter17h", any32
, read_zero
},
3741 [CSR_MHPMCOUNTER18H
] = { "mhpmcounter18h", any32
, read_zero
},
3742 [CSR_MHPMCOUNTER19H
] = { "mhpmcounter19h", any32
, read_zero
},
3743 [CSR_MHPMCOUNTER20H
] = { "mhpmcounter20h", any32
, read_zero
},
3744 [CSR_MHPMCOUNTER21H
] = { "mhpmcounter21h", any32
, read_zero
},
3745 [CSR_MHPMCOUNTER22H
] = { "mhpmcounter22h", any32
, read_zero
},
3746 [CSR_MHPMCOUNTER23H
] = { "mhpmcounter23h", any32
, read_zero
},
3747 [CSR_MHPMCOUNTER24H
] = { "mhpmcounter24h", any32
, read_zero
},
3748 [CSR_MHPMCOUNTER25H
] = { "mhpmcounter25h", any32
, read_zero
},
3749 [CSR_MHPMCOUNTER26H
] = { "mhpmcounter26h", any32
, read_zero
},
3750 [CSR_MHPMCOUNTER27H
] = { "mhpmcounter27h", any32
, read_zero
},
3751 [CSR_MHPMCOUNTER28H
] = { "mhpmcounter28h", any32
, read_zero
},
3752 [CSR_MHPMCOUNTER29H
] = { "mhpmcounter29h", any32
, read_zero
},
3753 [CSR_MHPMCOUNTER30H
] = { "mhpmcounter30h", any32
, read_zero
},
3754 [CSR_MHPMCOUNTER31H
] = { "mhpmcounter31h", any32
, read_zero
},
3755 #endif /* !CONFIG_USER_ONLY */