2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/timer.h"
24 #include "tcg/tcg-cpu.h"
26 #include "time_helper.h"
27 #include "exec/exec-all.h"
28 #include "exec/tb-flush.h"
29 #include "sysemu/cpu-timers.h"
30 #include "qemu/guest-random.h"
31 #include "qapi/error.h"
34 /* CSR function table public API */
35 void riscv_get_csr_ops(int csrno
, riscv_csr_operations
*ops
)
37 *ops
= csr_ops
[csrno
& (CSR_TABLE_SIZE
- 1)];
40 void riscv_set_csr_ops(int csrno
, riscv_csr_operations
*ops
)
42 csr_ops
[csrno
& (CSR_TABLE_SIZE
- 1)] = *ops
;
46 #if !defined(CONFIG_USER_ONLY)
47 RISCVException
smstateen_acc_ok(CPURISCVState
*env
, int index
, uint64_t bit
)
49 bool virt
= env
->virt_enabled
;
51 if (env
->priv
== PRV_M
|| !riscv_cpu_cfg(env
)->ext_smstateen
) {
52 return RISCV_EXCP_NONE
;
55 if (!(env
->mstateen
[index
] & bit
)) {
56 return RISCV_EXCP_ILLEGAL_INST
;
60 if (!(env
->hstateen
[index
] & bit
)) {
61 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
64 if (env
->priv
== PRV_U
&& !(env
->sstateen
[index
] & bit
)) {
65 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
69 if (env
->priv
== PRV_U
&& riscv_has_ext(env
, RVS
)) {
70 if (!(env
->sstateen
[index
] & bit
)) {
71 return RISCV_EXCP_ILLEGAL_INST
;
75 return RISCV_EXCP_NONE
;
79 static RISCVException
fs(CPURISCVState
*env
, int csrno
)
81 #if !defined(CONFIG_USER_ONLY)
82 if (!env
->debugger
&& !riscv_cpu_fp_enabled(env
) &&
83 !riscv_cpu_cfg(env
)->ext_zfinx
) {
84 return RISCV_EXCP_ILLEGAL_INST
;
87 if (!env
->debugger
&& !riscv_cpu_fp_enabled(env
)) {
88 return smstateen_acc_ok(env
, 0, SMSTATEEN0_FCSR
);
91 return RISCV_EXCP_NONE
;
94 static RISCVException
vs(CPURISCVState
*env
, int csrno
)
96 if (riscv_cpu_cfg(env
)->ext_zve32x
) {
97 #if !defined(CONFIG_USER_ONLY)
98 if (!env
->debugger
&& !riscv_cpu_vector_enabled(env
)) {
99 return RISCV_EXCP_ILLEGAL_INST
;
102 return RISCV_EXCP_NONE
;
104 return RISCV_EXCP_ILLEGAL_INST
;
107 static RISCVException
ctr(CPURISCVState
*env
, int csrno
)
109 #if !defined(CONFIG_USER_ONLY)
110 RISCVCPU
*cpu
= env_archcpu(env
);
112 target_ulong ctr_mask
;
113 int base_csrno
= CSR_CYCLE
;
114 bool rv32
= riscv_cpu_mxl(env
) == MXL_RV32
? true : false;
116 if (rv32
&& csrno
>= CSR_CYCLEH
) {
117 /* Offset for RV32 hpmcounternh counters */
120 ctr_index
= csrno
- base_csrno
;
121 ctr_mask
= BIT(ctr_index
);
123 if ((csrno
>= CSR_CYCLE
&& csrno
<= CSR_INSTRET
) ||
124 (csrno
>= CSR_CYCLEH
&& csrno
<= CSR_INSTRETH
)) {
125 if (!riscv_cpu_cfg(env
)->ext_zicntr
) {
126 return RISCV_EXCP_ILLEGAL_INST
;
129 goto skip_ext_pmu_check
;
132 if (!(cpu
->pmu_avail_ctrs
& ctr_mask
)) {
133 /* No counter is enabled in PMU or the counter is out of range */
134 return RISCV_EXCP_ILLEGAL_INST
;
140 return RISCV_EXCP_NONE
;
143 if (env
->priv
< PRV_M
&& !get_field(env
->mcounteren
, ctr_mask
)) {
144 return RISCV_EXCP_ILLEGAL_INST
;
147 if (env
->virt_enabled
) {
148 if (!get_field(env
->hcounteren
, ctr_mask
) ||
149 (env
->priv
== PRV_U
&& !get_field(env
->scounteren
, ctr_mask
))) {
150 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
154 if (riscv_has_ext(env
, RVS
) && env
->priv
== PRV_U
&&
155 !get_field(env
->scounteren
, ctr_mask
)) {
156 return RISCV_EXCP_ILLEGAL_INST
;
160 return RISCV_EXCP_NONE
;
163 static RISCVException
ctr32(CPURISCVState
*env
, int csrno
)
165 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
166 return RISCV_EXCP_ILLEGAL_INST
;
169 return ctr(env
, csrno
);
172 static RISCVException
zcmt(CPURISCVState
*env
, int csrno
)
174 if (!riscv_cpu_cfg(env
)->ext_zcmt
) {
175 return RISCV_EXCP_ILLEGAL_INST
;
178 #if !defined(CONFIG_USER_ONLY)
179 RISCVException ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_JVT
);
180 if (ret
!= RISCV_EXCP_NONE
) {
185 return RISCV_EXCP_NONE
;
188 #if !defined(CONFIG_USER_ONLY)
189 static RISCVException
mctr(CPURISCVState
*env
, int csrno
)
191 RISCVCPU
*cpu
= env_archcpu(env
);
192 uint32_t pmu_avail_ctrs
= cpu
->pmu_avail_ctrs
;
194 int base_csrno
= CSR_MHPMCOUNTER3
;
196 if ((riscv_cpu_mxl(env
) == MXL_RV32
) && csrno
>= CSR_MCYCLEH
) {
197 /* Offset for RV32 mhpmcounternh counters */
201 g_assert(csrno
>= CSR_MHPMCOUNTER3
&& csrno
<= CSR_MHPMCOUNTER31
);
203 ctr_index
= csrno
- base_csrno
;
204 if ((BIT(ctr_index
) & pmu_avail_ctrs
>> 3) == 0) {
205 /* The PMU is not enabled or counter is out of range */
206 return RISCV_EXCP_ILLEGAL_INST
;
209 return RISCV_EXCP_NONE
;
212 static RISCVException
mctr32(CPURISCVState
*env
, int csrno
)
214 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
215 return RISCV_EXCP_ILLEGAL_INST
;
218 return mctr(env
, csrno
);
221 static RISCVException
sscofpmf(CPURISCVState
*env
, int csrno
)
223 if (!riscv_cpu_cfg(env
)->ext_sscofpmf
) {
224 return RISCV_EXCP_ILLEGAL_INST
;
227 return RISCV_EXCP_NONE
;
230 static RISCVException
any(CPURISCVState
*env
, int csrno
)
232 return RISCV_EXCP_NONE
;
235 static RISCVException
any32(CPURISCVState
*env
, int csrno
)
237 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
238 return RISCV_EXCP_ILLEGAL_INST
;
241 return any(env
, csrno
);
245 static RISCVException
aia_any(CPURISCVState
*env
, int csrno
)
247 if (!riscv_cpu_cfg(env
)->ext_smaia
) {
248 return RISCV_EXCP_ILLEGAL_INST
;
251 return any(env
, csrno
);
254 static RISCVException
aia_any32(CPURISCVState
*env
, int csrno
)
256 if (!riscv_cpu_cfg(env
)->ext_smaia
) {
257 return RISCV_EXCP_ILLEGAL_INST
;
260 return any32(env
, csrno
);
263 static RISCVException
smode(CPURISCVState
*env
, int csrno
)
265 if (riscv_has_ext(env
, RVS
)) {
266 return RISCV_EXCP_NONE
;
269 return RISCV_EXCP_ILLEGAL_INST
;
272 static RISCVException
smode32(CPURISCVState
*env
, int csrno
)
274 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
275 return RISCV_EXCP_ILLEGAL_INST
;
278 return smode(env
, csrno
);
281 static RISCVException
aia_smode(CPURISCVState
*env
, int csrno
)
283 if (!riscv_cpu_cfg(env
)->ext_ssaia
) {
284 return RISCV_EXCP_ILLEGAL_INST
;
287 return smode(env
, csrno
);
290 static RISCVException
aia_smode32(CPURISCVState
*env
, int csrno
)
292 if (!riscv_cpu_cfg(env
)->ext_ssaia
) {
293 return RISCV_EXCP_ILLEGAL_INST
;
296 return smode32(env
, csrno
);
299 static RISCVException
hmode(CPURISCVState
*env
, int csrno
)
301 if (riscv_has_ext(env
, RVH
)) {
302 return RISCV_EXCP_NONE
;
305 return RISCV_EXCP_ILLEGAL_INST
;
308 static RISCVException
hmode32(CPURISCVState
*env
, int csrno
)
310 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
311 return RISCV_EXCP_ILLEGAL_INST
;
314 return hmode(env
, csrno
);
318 static RISCVException
umode(CPURISCVState
*env
, int csrno
)
320 if (riscv_has_ext(env
, RVU
)) {
321 return RISCV_EXCP_NONE
;
324 return RISCV_EXCP_ILLEGAL_INST
;
327 static RISCVException
umode32(CPURISCVState
*env
, int csrno
)
329 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
330 return RISCV_EXCP_ILLEGAL_INST
;
333 return umode(env
, csrno
);
336 static RISCVException
mstateen(CPURISCVState
*env
, int csrno
)
338 if (!riscv_cpu_cfg(env
)->ext_smstateen
) {
339 return RISCV_EXCP_ILLEGAL_INST
;
342 return any(env
, csrno
);
345 static RISCVException
hstateen_pred(CPURISCVState
*env
, int csrno
, int base
)
347 if (!riscv_cpu_cfg(env
)->ext_smstateen
) {
348 return RISCV_EXCP_ILLEGAL_INST
;
351 RISCVException ret
= hmode(env
, csrno
);
352 if (ret
!= RISCV_EXCP_NONE
) {
357 return RISCV_EXCP_NONE
;
360 if (env
->priv
< PRV_M
) {
361 if (!(env
->mstateen
[csrno
- base
] & SMSTATEEN_STATEEN
)) {
362 return RISCV_EXCP_ILLEGAL_INST
;
366 return RISCV_EXCP_NONE
;
369 static RISCVException
hstateen(CPURISCVState
*env
, int csrno
)
371 return hstateen_pred(env
, csrno
, CSR_HSTATEEN0
);
374 static RISCVException
hstateenh(CPURISCVState
*env
, int csrno
)
376 return hstateen_pred(env
, csrno
, CSR_HSTATEEN0H
);
379 static RISCVException
sstateen(CPURISCVState
*env
, int csrno
)
381 bool virt
= env
->virt_enabled
;
382 int index
= csrno
- CSR_SSTATEEN0
;
384 if (!riscv_cpu_cfg(env
)->ext_smstateen
) {
385 return RISCV_EXCP_ILLEGAL_INST
;
388 RISCVException ret
= smode(env
, csrno
);
389 if (ret
!= RISCV_EXCP_NONE
) {
394 return RISCV_EXCP_NONE
;
397 if (env
->priv
< PRV_M
) {
398 if (!(env
->mstateen
[index
] & SMSTATEEN_STATEEN
)) {
399 return RISCV_EXCP_ILLEGAL_INST
;
403 if (!(env
->hstateen
[index
] & SMSTATEEN_STATEEN
)) {
404 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
409 return RISCV_EXCP_NONE
;
412 static RISCVException
sstc(CPURISCVState
*env
, int csrno
)
414 bool hmode_check
= false;
416 if (!riscv_cpu_cfg(env
)->ext_sstc
|| !env
->rdtime_fn
) {
417 return RISCV_EXCP_ILLEGAL_INST
;
420 if ((csrno
== CSR_VSTIMECMP
) || (csrno
== CSR_VSTIMECMPH
)) {
424 RISCVException ret
= hmode_check
? hmode(env
, csrno
) : smode(env
, csrno
);
425 if (ret
!= RISCV_EXCP_NONE
) {
430 return RISCV_EXCP_NONE
;
433 if (env
->priv
== PRV_M
) {
434 return RISCV_EXCP_NONE
;
438 * No need of separate function for rv32 as menvcfg stores both menvcfg
441 if (!(get_field(env
->mcounteren
, COUNTEREN_TM
) &&
442 get_field(env
->menvcfg
, MENVCFG_STCE
))) {
443 return RISCV_EXCP_ILLEGAL_INST
;
446 if (env
->virt_enabled
) {
447 if (!(get_field(env
->hcounteren
, COUNTEREN_TM
) &&
448 get_field(env
->henvcfg
, HENVCFG_STCE
))) {
449 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
453 return RISCV_EXCP_NONE
;
456 static RISCVException
sstc_32(CPURISCVState
*env
, int csrno
)
458 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
459 return RISCV_EXCP_ILLEGAL_INST
;
462 return sstc(env
, csrno
);
465 static RISCVException
satp(CPURISCVState
*env
, int csrno
)
467 if (env
->priv
== PRV_S
&& !env
->virt_enabled
&&
468 get_field(env
->mstatus
, MSTATUS_TVM
)) {
469 return RISCV_EXCP_ILLEGAL_INST
;
471 if (env
->priv
== PRV_S
&& env
->virt_enabled
&&
472 get_field(env
->hstatus
, HSTATUS_VTVM
)) {
473 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
476 return smode(env
, csrno
);
479 static RISCVException
hgatp(CPURISCVState
*env
, int csrno
)
481 if (env
->priv
== PRV_S
&& !env
->virt_enabled
&&
482 get_field(env
->mstatus
, MSTATUS_TVM
)) {
483 return RISCV_EXCP_ILLEGAL_INST
;
486 return hmode(env
, csrno
);
489 /* Checks if PointerMasking registers could be accessed */
490 static RISCVException
pointer_masking(CPURISCVState
*env
, int csrno
)
492 /* Check if j-ext is present */
493 if (riscv_has_ext(env
, RVJ
)) {
494 return RISCV_EXCP_NONE
;
496 return RISCV_EXCP_ILLEGAL_INST
;
499 static RISCVException
aia_hmode(CPURISCVState
*env
, int csrno
)
501 if (!riscv_cpu_cfg(env
)->ext_ssaia
) {
502 return RISCV_EXCP_ILLEGAL_INST
;
505 return hmode(env
, csrno
);
508 static RISCVException
aia_hmode32(CPURISCVState
*env
, int csrno
)
510 if (!riscv_cpu_cfg(env
)->ext_ssaia
) {
511 return RISCV_EXCP_ILLEGAL_INST
;
514 return hmode32(env
, csrno
);
517 static RISCVException
pmp(CPURISCVState
*env
, int csrno
)
519 if (riscv_cpu_cfg(env
)->pmp
) {
520 if (csrno
<= CSR_PMPCFG3
) {
521 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
523 /* TODO: RV128 restriction check */
524 if ((reg_index
& 1) && (riscv_cpu_mxl(env
) == MXL_RV64
)) {
525 return RISCV_EXCP_ILLEGAL_INST
;
529 return RISCV_EXCP_NONE
;
532 return RISCV_EXCP_ILLEGAL_INST
;
535 static RISCVException
have_mseccfg(CPURISCVState
*env
, int csrno
)
537 if (riscv_cpu_cfg(env
)->ext_smepmp
) {
538 return RISCV_EXCP_NONE
;
540 if (riscv_cpu_cfg(env
)->ext_zkr
) {
541 return RISCV_EXCP_NONE
;
544 return RISCV_EXCP_ILLEGAL_INST
;
547 static RISCVException
debug(CPURISCVState
*env
, int csrno
)
549 if (riscv_cpu_cfg(env
)->debug
) {
550 return RISCV_EXCP_NONE
;
553 return RISCV_EXCP_ILLEGAL_INST
;
557 static RISCVException
seed(CPURISCVState
*env
, int csrno
)
559 if (!riscv_cpu_cfg(env
)->ext_zkr
) {
560 return RISCV_EXCP_ILLEGAL_INST
;
563 #if !defined(CONFIG_USER_ONLY)
565 return RISCV_EXCP_NONE
;
569 * With a CSR read-write instruction:
570 * 1) The seed CSR is always available in machine mode as normal.
571 * 2) Attempted access to seed from virtual modes VS and VU always raises
572 * an exception(virtual instruction exception only if mseccfg.sseed=1).
573 * 3) Without the corresponding access control bit set to 1, any attempted
574 * access to seed from U, S or HS modes will raise an illegal instruction
577 if (env
->priv
== PRV_M
) {
578 return RISCV_EXCP_NONE
;
579 } else if (env
->virt_enabled
) {
580 if (env
->mseccfg
& MSECCFG_SSEED
) {
581 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
583 return RISCV_EXCP_ILLEGAL_INST
;
586 if (env
->priv
== PRV_S
&& (env
->mseccfg
& MSECCFG_SSEED
)) {
587 return RISCV_EXCP_NONE
;
588 } else if (env
->priv
== PRV_U
&& (env
->mseccfg
& MSECCFG_USEED
)) {
589 return RISCV_EXCP_NONE
;
591 return RISCV_EXCP_ILLEGAL_INST
;
595 return RISCV_EXCP_NONE
;
599 /* User Floating-Point CSRs */
600 static RISCVException
read_fflags(CPURISCVState
*env
, int csrno
,
603 *val
= riscv_cpu_get_fflags(env
);
604 return RISCV_EXCP_NONE
;
607 static RISCVException
write_fflags(CPURISCVState
*env
, int csrno
,
610 #if !defined(CONFIG_USER_ONLY)
611 if (riscv_has_ext(env
, RVF
)) {
612 env
->mstatus
|= MSTATUS_FS
;
615 riscv_cpu_set_fflags(env
, val
& (FSR_AEXC
>> FSR_AEXC_SHIFT
));
616 return RISCV_EXCP_NONE
;
619 static RISCVException
read_frm(CPURISCVState
*env
, int csrno
,
623 return RISCV_EXCP_NONE
;
626 static RISCVException
write_frm(CPURISCVState
*env
, int csrno
,
629 #if !defined(CONFIG_USER_ONLY)
630 if (riscv_has_ext(env
, RVF
)) {
631 env
->mstatus
|= MSTATUS_FS
;
634 env
->frm
= val
& (FSR_RD
>> FSR_RD_SHIFT
);
635 return RISCV_EXCP_NONE
;
638 static RISCVException
read_fcsr(CPURISCVState
*env
, int csrno
,
641 *val
= (riscv_cpu_get_fflags(env
) << FSR_AEXC_SHIFT
)
642 | (env
->frm
<< FSR_RD_SHIFT
);
643 return RISCV_EXCP_NONE
;
646 static RISCVException
write_fcsr(CPURISCVState
*env
, int csrno
,
649 #if !defined(CONFIG_USER_ONLY)
650 if (riscv_has_ext(env
, RVF
)) {
651 env
->mstatus
|= MSTATUS_FS
;
654 env
->frm
= (val
& FSR_RD
) >> FSR_RD_SHIFT
;
655 riscv_cpu_set_fflags(env
, (val
& FSR_AEXC
) >> FSR_AEXC_SHIFT
);
656 return RISCV_EXCP_NONE
;
659 static RISCVException
read_vtype(CPURISCVState
*env
, int csrno
,
665 vill
= (uint32_t)env
->vill
<< 31;
668 vill
= (uint64_t)env
->vill
<< 63;
671 g_assert_not_reached();
673 *val
= (target_ulong
)vill
| env
->vtype
;
674 return RISCV_EXCP_NONE
;
677 static RISCVException
read_vl(CPURISCVState
*env
, int csrno
,
681 return RISCV_EXCP_NONE
;
684 static RISCVException
read_vlenb(CPURISCVState
*env
, int csrno
,
687 *val
= riscv_cpu_cfg(env
)->vlenb
;
688 return RISCV_EXCP_NONE
;
691 static RISCVException
read_vxrm(CPURISCVState
*env
, int csrno
,
695 return RISCV_EXCP_NONE
;
698 static RISCVException
write_vxrm(CPURISCVState
*env
, int csrno
,
701 #if !defined(CONFIG_USER_ONLY)
702 env
->mstatus
|= MSTATUS_VS
;
705 return RISCV_EXCP_NONE
;
708 static RISCVException
read_vxsat(CPURISCVState
*env
, int csrno
,
712 return RISCV_EXCP_NONE
;
715 static RISCVException
write_vxsat(CPURISCVState
*env
, int csrno
,
718 #if !defined(CONFIG_USER_ONLY)
719 env
->mstatus
|= MSTATUS_VS
;
722 return RISCV_EXCP_NONE
;
725 static RISCVException
read_vstart(CPURISCVState
*env
, int csrno
,
729 return RISCV_EXCP_NONE
;
732 static RISCVException
write_vstart(CPURISCVState
*env
, int csrno
,
735 #if !defined(CONFIG_USER_ONLY)
736 env
->mstatus
|= MSTATUS_VS
;
739 * The vstart CSR is defined to have only enough writable bits
740 * to hold the largest element index, i.e. lg2(VLEN) bits.
742 env
->vstart
= val
& ~(~0ULL << ctzl(riscv_cpu_cfg(env
)->vlenb
<< 3));
743 return RISCV_EXCP_NONE
;
746 static RISCVException
read_vcsr(CPURISCVState
*env
, int csrno
,
749 *val
= (env
->vxrm
<< VCSR_VXRM_SHIFT
) | (env
->vxsat
<< VCSR_VXSAT_SHIFT
);
750 return RISCV_EXCP_NONE
;
753 static RISCVException
write_vcsr(CPURISCVState
*env
, int csrno
,
756 #if !defined(CONFIG_USER_ONLY)
757 env
->mstatus
|= MSTATUS_VS
;
759 env
->vxrm
= (val
& VCSR_VXRM
) >> VCSR_VXRM_SHIFT
;
760 env
->vxsat
= (val
& VCSR_VXSAT
) >> VCSR_VXSAT_SHIFT
;
761 return RISCV_EXCP_NONE
;
764 /* User Timers and Counters */
765 static target_ulong
get_ticks(bool shift
, bool instructions
)
770 #if !defined(CONFIG_USER_ONLY)
771 if (icount_enabled()) {
773 val
= icount_get_raw();
778 val
= cpu_get_host_ticks();
781 val
= cpu_get_host_ticks();
793 #if defined(CONFIG_USER_ONLY)
794 static RISCVException
read_time(CPURISCVState
*env
, int csrno
,
797 *val
= cpu_get_host_ticks();
798 return RISCV_EXCP_NONE
;
801 static RISCVException
read_timeh(CPURISCVState
*env
, int csrno
,
804 *val
= cpu_get_host_ticks() >> 32;
805 return RISCV_EXCP_NONE
;
808 static RISCVException
read_hpmcounter(CPURISCVState
*env
, int csrno
,
811 *val
= get_ticks(false, (csrno
== CSR_INSTRET
));
812 return RISCV_EXCP_NONE
;
815 static RISCVException
read_hpmcounterh(CPURISCVState
*env
, int csrno
,
818 *val
= get_ticks(true, (csrno
== CSR_INSTRETH
));
819 return RISCV_EXCP_NONE
;
822 #else /* CONFIG_USER_ONLY */
824 static RISCVException
read_mhpmevent(CPURISCVState
*env
, int csrno
,
827 int evt_index
= csrno
- CSR_MCOUNTINHIBIT
;
829 *val
= env
->mhpmevent_val
[evt_index
];
831 return RISCV_EXCP_NONE
;
834 static RISCVException
write_mhpmevent(CPURISCVState
*env
, int csrno
,
837 int evt_index
= csrno
- CSR_MCOUNTINHIBIT
;
838 uint64_t mhpmevt_val
= val
;
840 env
->mhpmevent_val
[evt_index
] = val
;
842 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
843 mhpmevt_val
= mhpmevt_val
|
844 ((uint64_t)env
->mhpmeventh_val
[evt_index
] << 32);
846 riscv_pmu_update_event_map(env
, mhpmevt_val
, evt_index
);
848 return RISCV_EXCP_NONE
;
851 static RISCVException
read_mhpmeventh(CPURISCVState
*env
, int csrno
,
854 int evt_index
= csrno
- CSR_MHPMEVENT3H
+ 3;
856 *val
= env
->mhpmeventh_val
[evt_index
];
858 return RISCV_EXCP_NONE
;
861 static RISCVException
write_mhpmeventh(CPURISCVState
*env
, int csrno
,
864 int evt_index
= csrno
- CSR_MHPMEVENT3H
+ 3;
865 uint64_t mhpmevth_val
= val
;
866 uint64_t mhpmevt_val
= env
->mhpmevent_val
[evt_index
];
868 mhpmevt_val
= mhpmevt_val
| (mhpmevth_val
<< 32);
869 env
->mhpmeventh_val
[evt_index
] = val
;
871 riscv_pmu_update_event_map(env
, mhpmevt_val
, evt_index
);
873 return RISCV_EXCP_NONE
;
876 static RISCVException
write_mhpmcounter(CPURISCVState
*env
, int csrno
,
879 int ctr_idx
= csrno
- CSR_MCYCLE
;
880 PMUCTRState
*counter
= &env
->pmu_ctrs
[ctr_idx
];
881 uint64_t mhpmctr_val
= val
;
882 bool instr
= riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
);
884 counter
->mhpmcounter_val
= val
;
885 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) || instr
) {
886 counter
->mhpmcounter_prev
= get_ticks(false, instr
);
888 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
889 mhpmctr_val
= mhpmctr_val
|
890 ((uint64_t)counter
->mhpmcounterh_val
<< 32);
892 riscv_pmu_setup_timer(env
, mhpmctr_val
, ctr_idx
);
895 /* Other counters can keep incrementing from the given value */
896 counter
->mhpmcounter_prev
= val
;
899 return RISCV_EXCP_NONE
;
902 static RISCVException
write_mhpmcounterh(CPURISCVState
*env
, int csrno
,
905 int ctr_idx
= csrno
- CSR_MCYCLEH
;
906 PMUCTRState
*counter
= &env
->pmu_ctrs
[ctr_idx
];
907 uint64_t mhpmctr_val
= counter
->mhpmcounter_val
;
908 uint64_t mhpmctrh_val
= val
;
909 bool instr
= riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
);
911 counter
->mhpmcounterh_val
= val
;
912 mhpmctr_val
= mhpmctr_val
| (mhpmctrh_val
<< 32);
913 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) || instr
) {
914 counter
->mhpmcounterh_prev
= get_ticks(true, instr
);
916 riscv_pmu_setup_timer(env
, mhpmctr_val
, ctr_idx
);
919 counter
->mhpmcounterh_prev
= val
;
922 return RISCV_EXCP_NONE
;
925 static RISCVException
riscv_pmu_read_ctr(CPURISCVState
*env
, target_ulong
*val
,
926 bool upper_half
, uint32_t ctr_idx
)
928 PMUCTRState
*counter
= &env
->pmu_ctrs
[ctr_idx
];
929 target_ulong ctr_prev
= upper_half
? counter
->mhpmcounterh_prev
:
930 counter
->mhpmcounter_prev
;
931 target_ulong ctr_val
= upper_half
? counter
->mhpmcounterh_val
:
932 counter
->mhpmcounter_val
;
933 bool instr
= riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
);
935 if (get_field(env
->mcountinhibit
, BIT(ctr_idx
))) {
937 * Counter should not increment if inhibit bit is set. We can't really
938 * stop the icount counting. Just return the counter value written by
939 * the supervisor to indicate that counter was not incremented.
941 if (!counter
->started
) {
943 return RISCV_EXCP_NONE
;
945 /* Mark that the counter has been stopped */
946 counter
->started
= false;
951 * The kernel computes the perf delta by subtracting the current value from
952 * the value it initialized previously (ctr_val).
954 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) || instr
) {
955 *val
= get_ticks(upper_half
, instr
) - ctr_prev
+ ctr_val
;
960 return RISCV_EXCP_NONE
;
963 static RISCVException
read_hpmcounter(CPURISCVState
*env
, int csrno
,
968 if (csrno
>= CSR_MCYCLE
&& csrno
<= CSR_MHPMCOUNTER31
) {
969 ctr_index
= csrno
- CSR_MCYCLE
;
970 } else if (csrno
>= CSR_CYCLE
&& csrno
<= CSR_HPMCOUNTER31
) {
971 ctr_index
= csrno
- CSR_CYCLE
;
973 return RISCV_EXCP_ILLEGAL_INST
;
976 return riscv_pmu_read_ctr(env
, val
, false, ctr_index
);
979 static RISCVException
read_hpmcounterh(CPURISCVState
*env
, int csrno
,
984 if (csrno
>= CSR_MCYCLEH
&& csrno
<= CSR_MHPMCOUNTER31H
) {
985 ctr_index
= csrno
- CSR_MCYCLEH
;
986 } else if (csrno
>= CSR_CYCLEH
&& csrno
<= CSR_HPMCOUNTER31H
) {
987 ctr_index
= csrno
- CSR_CYCLEH
;
989 return RISCV_EXCP_ILLEGAL_INST
;
992 return riscv_pmu_read_ctr(env
, val
, true, ctr_index
);
995 static RISCVException
read_scountovf(CPURISCVState
*env
, int csrno
,
998 int mhpmevt_start
= CSR_MHPMEVENT3
- CSR_MCOUNTINHIBIT
;
1001 target_ulong
*mhpm_evt_val
;
1002 uint64_t of_bit_mask
;
1004 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
1005 mhpm_evt_val
= env
->mhpmeventh_val
;
1006 of_bit_mask
= MHPMEVENTH_BIT_OF
;
1008 mhpm_evt_val
= env
->mhpmevent_val
;
1009 of_bit_mask
= MHPMEVENT_BIT_OF
;
1012 for (i
= mhpmevt_start
; i
< RV_MAX_MHPMEVENTS
; i
++) {
1013 if ((get_field(env
->mcounteren
, BIT(i
))) &&
1014 (mhpm_evt_val
[i
] & of_bit_mask
)) {
1019 return RISCV_EXCP_NONE
;
1022 static RISCVException
read_time(CPURISCVState
*env
, int csrno
,
1025 uint64_t delta
= env
->virt_enabled
? env
->htimedelta
: 0;
1027 if (!env
->rdtime_fn
) {
1028 return RISCV_EXCP_ILLEGAL_INST
;
1031 *val
= env
->rdtime_fn(env
->rdtime_fn_arg
) + delta
;
1032 return RISCV_EXCP_NONE
;
1035 static RISCVException
read_timeh(CPURISCVState
*env
, int csrno
,
1038 uint64_t delta
= env
->virt_enabled
? env
->htimedelta
: 0;
1040 if (!env
->rdtime_fn
) {
1041 return RISCV_EXCP_ILLEGAL_INST
;
1044 *val
= (env
->rdtime_fn(env
->rdtime_fn_arg
) + delta
) >> 32;
1045 return RISCV_EXCP_NONE
;
1048 static RISCVException
read_vstimecmp(CPURISCVState
*env
, int csrno
,
1051 *val
= env
->vstimecmp
;
1053 return RISCV_EXCP_NONE
;
1056 static RISCVException
read_vstimecmph(CPURISCVState
*env
, int csrno
,
1059 *val
= env
->vstimecmp
>> 32;
1061 return RISCV_EXCP_NONE
;
1064 static RISCVException
write_vstimecmp(CPURISCVState
*env
, int csrno
,
1067 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
1068 env
->vstimecmp
= deposit64(env
->vstimecmp
, 0, 32, (uint64_t)val
);
1070 env
->vstimecmp
= val
;
1073 riscv_timer_write_timecmp(env
, env
->vstimer
, env
->vstimecmp
,
1074 env
->htimedelta
, MIP_VSTIP
);
1076 return RISCV_EXCP_NONE
;
1079 static RISCVException
write_vstimecmph(CPURISCVState
*env
, int csrno
,
1082 env
->vstimecmp
= deposit64(env
->vstimecmp
, 32, 32, (uint64_t)val
);
1083 riscv_timer_write_timecmp(env
, env
->vstimer
, env
->vstimecmp
,
1084 env
->htimedelta
, MIP_VSTIP
);
1086 return RISCV_EXCP_NONE
;
1089 static RISCVException
read_stimecmp(CPURISCVState
*env
, int csrno
,
1092 if (env
->virt_enabled
) {
1093 *val
= env
->vstimecmp
;
1095 *val
= env
->stimecmp
;
1098 return RISCV_EXCP_NONE
;
1101 static RISCVException
read_stimecmph(CPURISCVState
*env
, int csrno
,
1104 if (env
->virt_enabled
) {
1105 *val
= env
->vstimecmp
>> 32;
1107 *val
= env
->stimecmp
>> 32;
1110 return RISCV_EXCP_NONE
;
1113 static RISCVException
write_stimecmp(CPURISCVState
*env
, int csrno
,
1116 if (env
->virt_enabled
) {
1117 if (env
->hvictl
& HVICTL_VTI
) {
1118 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
1120 return write_vstimecmp(env
, csrno
, val
);
1123 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
1124 env
->stimecmp
= deposit64(env
->stimecmp
, 0, 32, (uint64_t)val
);
1126 env
->stimecmp
= val
;
1129 riscv_timer_write_timecmp(env
, env
->stimer
, env
->stimecmp
, 0, MIP_STIP
);
1131 return RISCV_EXCP_NONE
;
1134 static RISCVException
write_stimecmph(CPURISCVState
*env
, int csrno
,
1137 if (env
->virt_enabled
) {
1138 if (env
->hvictl
& HVICTL_VTI
) {
1139 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
1141 return write_vstimecmph(env
, csrno
, val
);
1144 env
->stimecmp
= deposit64(env
->stimecmp
, 32, 32, (uint64_t)val
);
1145 riscv_timer_write_timecmp(env
, env
->stimer
, env
->stimecmp
, 0, MIP_STIP
);
1147 return RISCV_EXCP_NONE
;
1150 #define VSTOPI_NUM_SRCS 5
1153 * All core local interrupts except the fixed ones 0:12. This macro is for
1154 * virtual interrupts logic so please don't change this to avoid messing up
1155 * the whole support, For reference see AIA spec: `5.3 Interrupt filtering and
1156 * virtual interrupts for supervisor level` and `6.3.2 Virtual interrupts for
1159 #define LOCAL_INTERRUPTS (~0x1FFFULL)
1161 static const uint64_t delegable_ints
=
1162 S_MODE_INTERRUPTS
| VS_MODE_INTERRUPTS
| MIP_LCOFIP
;
1163 static const uint64_t vs_delegable_ints
=
1164 (VS_MODE_INTERRUPTS
| LOCAL_INTERRUPTS
) & ~MIP_LCOFIP
;
1165 static const uint64_t all_ints
= M_MODE_INTERRUPTS
| S_MODE_INTERRUPTS
|
1166 HS_MODE_INTERRUPTS
| LOCAL_INTERRUPTS
;
1167 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1168 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1169 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1170 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1171 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1172 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1173 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1174 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1175 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1176 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1177 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1178 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1179 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1180 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1181 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1182 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1183 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1184 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1185 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1186 static const target_ulong vs_delegable_excps
= DELEGABLE_EXCPS
&
1187 ~((1ULL << (RISCV_EXCP_S_ECALL
)) |
1188 (1ULL << (RISCV_EXCP_VS_ECALL
)) |
1189 (1ULL << (RISCV_EXCP_M_ECALL
)) |
1190 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT
)) |
1191 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
)) |
1192 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT
)) |
1193 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
)));
1194 static const target_ulong sstatus_v1_10_mask
= SSTATUS_SIE
| SSTATUS_SPIE
|
1195 SSTATUS_UIE
| SSTATUS_UPIE
| SSTATUS_SPP
| SSTATUS_FS
| SSTATUS_XS
|
1196 SSTATUS_SUM
| SSTATUS_MXR
| SSTATUS_VS
;
1199 * Spec allows for bits 13:63 to be either read-only or writable.
1200 * So far we have interrupt LCOFIP in that region which is writable.
1202 * Also, spec allows to inject virtual interrupts in this region even
1203 * without any hardware interrupts for that interrupt number.
1205 * For now interrupt in 13:63 region are all kept writable. 13 being
1206 * LCOFIP and 14:63 being virtual only. Change this in future if we
1207 * introduce more interrupts that are not writable.
1210 /* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
1211 static const uint64_t mvip_writable_mask
= MIP_SSIP
| MIP_STIP
| MIP_SEIP
|
1213 static const uint64_t mvien_writable_mask
= MIP_SSIP
| MIP_SEIP
|
1216 static const uint64_t sip_writable_mask
= SIP_SSIP
| LOCAL_INTERRUPTS
;
1217 static const uint64_t hip_writable_mask
= MIP_VSSIP
;
1218 static const uint64_t hvip_writable_mask
= MIP_VSSIP
| MIP_VSTIP
|
1219 MIP_VSEIP
| LOCAL_INTERRUPTS
;
1220 static const uint64_t hvien_writable_mask
= LOCAL_INTERRUPTS
;
1222 static const uint64_t vsip_writable_mask
= MIP_VSSIP
| LOCAL_INTERRUPTS
;
1224 const bool valid_vm_1_10_32
[16] = {
1225 [VM_1_10_MBARE
] = true,
1226 [VM_1_10_SV32
] = true
1229 const bool valid_vm_1_10_64
[16] = {
1230 [VM_1_10_MBARE
] = true,
1231 [VM_1_10_SV39
] = true,
1232 [VM_1_10_SV48
] = true,
1233 [VM_1_10_SV57
] = true
1236 /* Machine Information Registers */
1237 static RISCVException
read_zero(CPURISCVState
*env
, int csrno
,
1241 return RISCV_EXCP_NONE
;
1244 static RISCVException
write_ignore(CPURISCVState
*env
, int csrno
,
1247 return RISCV_EXCP_NONE
;
1250 static RISCVException
read_mvendorid(CPURISCVState
*env
, int csrno
,
1253 *val
= riscv_cpu_cfg(env
)->mvendorid
;
1254 return RISCV_EXCP_NONE
;
1257 static RISCVException
read_marchid(CPURISCVState
*env
, int csrno
,
1260 *val
= riscv_cpu_cfg(env
)->marchid
;
1261 return RISCV_EXCP_NONE
;
1264 static RISCVException
read_mimpid(CPURISCVState
*env
, int csrno
,
1267 *val
= riscv_cpu_cfg(env
)->mimpid
;
1268 return RISCV_EXCP_NONE
;
1271 static RISCVException
read_mhartid(CPURISCVState
*env
, int csrno
,
1274 *val
= env
->mhartid
;
1275 return RISCV_EXCP_NONE
;
1278 /* Machine Trap Setup */
1280 /* We do not store SD explicitly, only compute it on demand. */
1281 static uint64_t add_status_sd(RISCVMXL xl
, uint64_t status
)
1283 if ((status
& MSTATUS_FS
) == MSTATUS_FS
||
1284 (status
& MSTATUS_VS
) == MSTATUS_VS
||
1285 (status
& MSTATUS_XS
) == MSTATUS_XS
) {
1288 return status
| MSTATUS32_SD
;
1290 return status
| MSTATUS64_SD
;
1292 return MSTATUSH128_SD
;
1294 g_assert_not_reached();
1300 static RISCVException
read_mstatus(CPURISCVState
*env
, int csrno
,
1303 *val
= add_status_sd(riscv_cpu_mxl(env
), env
->mstatus
);
1304 return RISCV_EXCP_NONE
;
1307 static bool validate_vm(CPURISCVState
*env
, target_ulong vm
)
1309 uint64_t mode_supported
= riscv_cpu_cfg(env
)->satp_mode
.map
;
1310 return get_field(mode_supported
, (1 << vm
));
1313 static target_ulong
legalize_xatp(CPURISCVState
*env
, target_ulong old_xatp
,
1318 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
1319 vm
= validate_vm(env
, get_field(val
, SATP32_MODE
));
1320 mask
= (val
^ old_xatp
) & (SATP32_MODE
| SATP32_ASID
| SATP32_PPN
);
1322 vm
= validate_vm(env
, get_field(val
, SATP64_MODE
));
1323 mask
= (val
^ old_xatp
) & (SATP64_MODE
| SATP64_ASID
| SATP64_PPN
);
1328 * The ISA defines SATP.MODE=Bare as "no translation", but we still
1329 * pass these through QEMU's TLB emulation as it improves
1330 * performance. Flushing the TLB on SATP writes with paging
1331 * enabled avoids leaking those invalid cached mappings.
1333 tlb_flush(env_cpu(env
));
1339 static target_ulong
legalize_mpp(CPURISCVState
*env
, target_ulong old_mpp
,
1343 target_ulong new_mpp
= get_field(val
, MSTATUS_MPP
);
1350 valid
= riscv_has_ext(env
, RVS
);
1353 valid
= riscv_has_ext(env
, RVU
);
1357 /* Remain field unchanged if new_mpp value is invalid */
1359 val
= set_field(val
, MSTATUS_MPP
, old_mpp
);
1365 static RISCVException
write_mstatus(CPURISCVState
*env
, int csrno
,
1368 uint64_t mstatus
= env
->mstatus
;
1370 RISCVMXL xl
= riscv_cpu_mxl(env
);
1373 * MPP field have been made WARL since priv version 1.11. However,
1374 * legalization for it will not break any software running on 1.10.
1376 val
= legalize_mpp(env
, get_field(mstatus
, MSTATUS_MPP
), val
);
1378 /* flush tlb on mstatus fields that affect VM */
1379 if ((val
^ mstatus
) & MSTATUS_MXR
) {
1380 tlb_flush(env_cpu(env
));
1382 mask
= MSTATUS_SIE
| MSTATUS_SPIE
| MSTATUS_MIE
| MSTATUS_MPIE
|
1383 MSTATUS_SPP
| MSTATUS_MPRV
| MSTATUS_SUM
|
1384 MSTATUS_MPP
| MSTATUS_MXR
| MSTATUS_TVM
| MSTATUS_TSR
|
1387 if (riscv_has_ext(env
, RVF
)) {
1390 if (riscv_has_ext(env
, RVV
)) {
1394 if (xl
!= MXL_RV32
|| env
->debugger
) {
1395 if (riscv_has_ext(env
, RVH
)) {
1396 mask
|= MSTATUS_MPV
| MSTATUS_GVA
;
1398 if ((val
& MSTATUS64_UXL
) != 0) {
1399 mask
|= MSTATUS64_UXL
;
1403 mstatus
= (mstatus
& ~mask
) | (val
& mask
);
1405 env
->mstatus
= mstatus
;
1408 * Except in debug mode, UXL/SXL can only be modified by higher
1409 * privilege mode. So xl will not be changed in normal mode.
1411 if (env
->debugger
) {
1412 env
->xl
= cpu_recompute_xl(env
);
1415 riscv_cpu_update_mask(env
);
1416 return RISCV_EXCP_NONE
;
1419 static RISCVException
read_mstatush(CPURISCVState
*env
, int csrno
,
1422 *val
= env
->mstatus
>> 32;
1423 return RISCV_EXCP_NONE
;
1426 static RISCVException
write_mstatush(CPURISCVState
*env
, int csrno
,
1429 uint64_t valh
= (uint64_t)val
<< 32;
1430 uint64_t mask
= riscv_has_ext(env
, RVH
) ? MSTATUS_MPV
| MSTATUS_GVA
: 0;
1432 env
->mstatus
= (env
->mstatus
& ~mask
) | (valh
& mask
);
1434 return RISCV_EXCP_NONE
;
1437 static RISCVException
read_mstatus_i128(CPURISCVState
*env
, int csrno
,
1440 *val
= int128_make128(env
->mstatus
, add_status_sd(MXL_RV128
,
1442 return RISCV_EXCP_NONE
;
1445 static RISCVException
read_misa_i128(CPURISCVState
*env
, int csrno
,
1448 *val
= int128_make128(env
->misa_ext
, (uint64_t)MXL_RV128
<< 62);
1449 return RISCV_EXCP_NONE
;
1452 static RISCVException
read_misa(CPURISCVState
*env
, int csrno
,
1457 switch (env
->misa_mxl
) {
1459 misa
= (target_ulong
)MXL_RV32
<< 30;
1461 #ifdef TARGET_RISCV64
1463 misa
= (target_ulong
)MXL_RV64
<< 62;
1467 g_assert_not_reached();
1470 *val
= misa
| env
->misa_ext
;
1471 return RISCV_EXCP_NONE
;
1474 static RISCVException
write_misa(CPURISCVState
*env
, int csrno
,
1477 RISCVCPU
*cpu
= env_archcpu(env
);
1478 uint32_t orig_misa_ext
= env
->misa_ext
;
1479 Error
*local_err
= NULL
;
1481 if (!riscv_cpu_cfg(env
)->misa_w
) {
1482 /* drop write to misa */
1483 return RISCV_EXCP_NONE
;
1486 /* Mask extensions that are not supported by this hart */
1487 val
&= env
->misa_ext_mask
;
1490 * Suppress 'C' if next instruction is not aligned
1491 * TODO: this should check next_pc
1493 if ((val
& RVC
) && (GETPC() & ~3) != 0) {
1497 /* Disable RVG if any of its dependencies are disabled */
1498 if (!(val
& RVI
&& val
& RVM
&& val
& RVA
&&
1499 val
& RVF
&& val
& RVD
)) {
1503 /* If nothing changed, do nothing. */
1504 if (val
== env
->misa_ext
) {
1505 return RISCV_EXCP_NONE
;
1508 env
->misa_ext
= val
;
1509 riscv_cpu_validate_set_extensions(cpu
, &local_err
);
1510 if (local_err
!= NULL
) {
1511 /* Rollback on validation error */
1512 qemu_log_mask(LOG_GUEST_ERROR
, "Unable to write MISA ext value "
1513 "0x%x, keeping existing MISA ext 0x%x\n",
1514 env
->misa_ext
, orig_misa_ext
);
1516 env
->misa_ext
= orig_misa_ext
;
1518 return RISCV_EXCP_NONE
;
1521 if (!(env
->misa_ext
& RVF
)) {
1522 env
->mstatus
&= ~MSTATUS_FS
;
1525 /* flush translation cache */
1526 tb_flush(env_cpu(env
));
1527 env
->xl
= riscv_cpu_mxl(env
);
1528 return RISCV_EXCP_NONE
;
1531 static RISCVException
read_medeleg(CPURISCVState
*env
, int csrno
,
1534 *val
= env
->medeleg
;
1535 return RISCV_EXCP_NONE
;
1538 static RISCVException
write_medeleg(CPURISCVState
*env
, int csrno
,
1541 env
->medeleg
= (env
->medeleg
& ~DELEGABLE_EXCPS
) | (val
& DELEGABLE_EXCPS
);
1542 return RISCV_EXCP_NONE
;
1545 static RISCVException
rmw_mideleg64(CPURISCVState
*env
, int csrno
,
1547 uint64_t new_val
, uint64_t wr_mask
)
1549 uint64_t mask
= wr_mask
& delegable_ints
;
1552 *ret_val
= env
->mideleg
;
1555 env
->mideleg
= (env
->mideleg
& ~mask
) | (new_val
& mask
);
1557 if (riscv_has_ext(env
, RVH
)) {
1558 env
->mideleg
|= HS_MODE_INTERRUPTS
;
1561 return RISCV_EXCP_NONE
;
1564 static RISCVException
rmw_mideleg(CPURISCVState
*env
, int csrno
,
1565 target_ulong
*ret_val
,
1566 target_ulong new_val
, target_ulong wr_mask
)
1571 ret
= rmw_mideleg64(env
, csrno
, &rval
, new_val
, wr_mask
);
1579 static RISCVException
rmw_midelegh(CPURISCVState
*env
, int csrno
,
1580 target_ulong
*ret_val
,
1581 target_ulong new_val
,
1582 target_ulong wr_mask
)
1587 ret
= rmw_mideleg64(env
, csrno
, &rval
,
1588 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1590 *ret_val
= rval
>> 32;
1596 static RISCVException
rmw_mie64(CPURISCVState
*env
, int csrno
,
1598 uint64_t new_val
, uint64_t wr_mask
)
1600 uint64_t mask
= wr_mask
& all_ints
;
1603 *ret_val
= env
->mie
;
1606 env
->mie
= (env
->mie
& ~mask
) | (new_val
& mask
);
1608 if (!riscv_has_ext(env
, RVH
)) {
1609 env
->mie
&= ~((uint64_t)HS_MODE_INTERRUPTS
);
1612 return RISCV_EXCP_NONE
;
1615 static RISCVException
rmw_mie(CPURISCVState
*env
, int csrno
,
1616 target_ulong
*ret_val
,
1617 target_ulong new_val
, target_ulong wr_mask
)
1622 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
);
1630 static RISCVException
rmw_mieh(CPURISCVState
*env
, int csrno
,
1631 target_ulong
*ret_val
,
1632 target_ulong new_val
, target_ulong wr_mask
)
1637 ret
= rmw_mie64(env
, csrno
, &rval
,
1638 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1640 *ret_val
= rval
>> 32;
1646 static RISCVException
rmw_mvien64(CPURISCVState
*env
, int csrno
,
1648 uint64_t new_val
, uint64_t wr_mask
)
1650 uint64_t mask
= wr_mask
& mvien_writable_mask
;
1653 *ret_val
= env
->mvien
;
1656 env
->mvien
= (env
->mvien
& ~mask
) | (new_val
& mask
);
1658 return RISCV_EXCP_NONE
;
1661 static RISCVException
rmw_mvien(CPURISCVState
*env
, int csrno
,
1662 target_ulong
*ret_val
,
1663 target_ulong new_val
, target_ulong wr_mask
)
1668 ret
= rmw_mvien64(env
, csrno
, &rval
, new_val
, wr_mask
);
1676 static RISCVException
rmw_mvienh(CPURISCVState
*env
, int csrno
,
1677 target_ulong
*ret_val
,
1678 target_ulong new_val
, target_ulong wr_mask
)
1683 ret
= rmw_mvien64(env
, csrno
, &rval
,
1684 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1686 *ret_val
= rval
>> 32;
1692 static RISCVException
read_mtopi(CPURISCVState
*env
, int csrno
,
1698 irq
= riscv_cpu_mirq_pending(env
);
1699 if (irq
<= 0 || irq
> 63) {
1702 iprio
= env
->miprio
[irq
];
1704 if (riscv_cpu_default_priority(irq
) > IPRIO_DEFAULT_M
) {
1705 iprio
= IPRIO_MMAXIPRIO
;
1708 *val
= (irq
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
1712 return RISCV_EXCP_NONE
;
1715 static int aia_xlate_vs_csrno(CPURISCVState
*env
, int csrno
)
1717 if (!env
->virt_enabled
) {
1723 return CSR_VSISELECT
;
1733 static RISCVException
rmw_xiselect(CPURISCVState
*env
, int csrno
,
1734 target_ulong
*val
, target_ulong new_val
,
1735 target_ulong wr_mask
)
1737 target_ulong
*iselect
;
1739 /* Translate CSR number for VS-mode */
1740 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1742 /* Find the iselect CSR based on CSR number */
1745 iselect
= &env
->miselect
;
1748 iselect
= &env
->siselect
;
1751 iselect
= &env
->vsiselect
;
1754 return RISCV_EXCP_ILLEGAL_INST
;
1761 wr_mask
&= ISELECT_MASK
;
1763 *iselect
= (*iselect
& ~wr_mask
) | (new_val
& wr_mask
);
1766 return RISCV_EXCP_NONE
;
1769 static int rmw_iprio(target_ulong xlen
,
1770 target_ulong iselect
, uint8_t *iprio
,
1771 target_ulong
*val
, target_ulong new_val
,
1772 target_ulong wr_mask
, int ext_irq_no
)
1775 target_ulong old_val
;
1777 if (iselect
< ISELECT_IPRIO0
|| ISELECT_IPRIO15
< iselect
) {
1780 if (xlen
!= 32 && iselect
& 0x1) {
1784 nirqs
= 4 * (xlen
/ 32);
1785 firq
= ((iselect
- ISELECT_IPRIO0
) / (xlen
/ 32)) * (nirqs
);
1788 for (i
= 0; i
< nirqs
; i
++) {
1789 old_val
|= ((target_ulong
)iprio
[firq
+ i
]) << (IPRIO_IRQ_BITS
* i
);
1797 new_val
= (old_val
& ~wr_mask
) | (new_val
& wr_mask
);
1798 for (i
= 0; i
< nirqs
; i
++) {
1800 * M-level and S-level external IRQ priority always read-only
1801 * zero. This means default priority order is always preferred
1802 * for M-level and S-level external IRQs.
1804 if ((firq
+ i
) == ext_irq_no
) {
1807 iprio
[firq
+ i
] = (new_val
>> (IPRIO_IRQ_BITS
* i
)) & 0xff;
1814 static RISCVException
rmw_xireg(CPURISCVState
*env
, int csrno
,
1815 target_ulong
*val
, target_ulong new_val
,
1816 target_ulong wr_mask
)
1818 bool virt
, isel_reserved
;
1821 target_ulong priv
, isel
, vgein
;
1823 /* Translate CSR number for VS-mode */
1824 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1826 /* Decode register details from CSR number */
1828 isel_reserved
= false;
1831 iprio
= env
->miprio
;
1832 isel
= env
->miselect
;
1836 if (env
->priv
== PRV_S
&& env
->mvien
& MIP_SEIP
&&
1837 env
->siselect
>= ISELECT_IMSIC_EIDELIVERY
&&
1838 env
->siselect
<= ISELECT_IMSIC_EIE63
) {
1841 iprio
= env
->siprio
;
1842 isel
= env
->siselect
;
1846 iprio
= env
->hviprio
;
1847 isel
= env
->vsiselect
;
1855 /* Find the selected guest interrupt file */
1856 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1858 if (ISELECT_IPRIO0
<= isel
&& isel
<= ISELECT_IPRIO15
) {
1859 /* Local interrupt priority registers not available for VS-mode */
1861 ret
= rmw_iprio(riscv_cpu_mxl_bits(env
),
1862 isel
, iprio
, val
, new_val
, wr_mask
,
1863 (priv
== PRV_M
) ? IRQ_M_EXT
: IRQ_S_EXT
);
1865 } else if (ISELECT_IMSIC_FIRST
<= isel
&& isel
<= ISELECT_IMSIC_LAST
) {
1866 /* IMSIC registers only available when machine implements it. */
1867 if (env
->aia_ireg_rmw_fn
[priv
]) {
1868 /* Selected guest interrupt file should not be zero */
1869 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1872 /* Call machine specific IMSIC register emulation */
1873 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1874 AIA_MAKE_IREG(isel
, priv
, virt
, vgein
,
1875 riscv_cpu_mxl_bits(env
)),
1876 val
, new_val
, wr_mask
);
1879 isel_reserved
= true;
1884 return (env
->virt_enabled
&& virt
&& !isel_reserved
) ?
1885 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1887 return RISCV_EXCP_NONE
;
1890 static RISCVException
rmw_xtopei(CPURISCVState
*env
, int csrno
,
1891 target_ulong
*val
, target_ulong new_val
,
1892 target_ulong wr_mask
)
1896 target_ulong priv
, vgein
;
1898 /* Translate CSR number for VS-mode */
1899 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1901 /* Decode register details from CSR number */
1908 if (env
->mvien
& MIP_SEIP
&& env
->priv
== PRV_S
) {
1921 /* IMSIC CSRs only available when machine implements IMSIC. */
1922 if (!env
->aia_ireg_rmw_fn
[priv
]) {
1926 /* Find the selected guest interrupt file */
1927 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1929 /* Selected guest interrupt file should be valid */
1930 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1934 /* Call machine specific IMSIC register emulation for TOPEI */
1935 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1936 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI
, priv
, virt
, vgein
,
1937 riscv_cpu_mxl_bits(env
)),
1938 val
, new_val
, wr_mask
);
1942 return (env
->virt_enabled
&& virt
) ?
1943 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1945 return RISCV_EXCP_NONE
;
1948 static RISCVException
read_mtvec(CPURISCVState
*env
, int csrno
,
1952 return RISCV_EXCP_NONE
;
1955 static RISCVException
write_mtvec(CPURISCVState
*env
, int csrno
,
1958 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1959 if ((val
& 3) < 2) {
1962 qemu_log_mask(LOG_UNIMP
, "CSR_MTVEC: reserved mode not supported\n");
1964 return RISCV_EXCP_NONE
;
1967 static RISCVException
read_mcountinhibit(CPURISCVState
*env
, int csrno
,
1970 *val
= env
->mcountinhibit
;
1971 return RISCV_EXCP_NONE
;
1974 static RISCVException
write_mcountinhibit(CPURISCVState
*env
, int csrno
,
1978 PMUCTRState
*counter
;
1979 RISCVCPU
*cpu
= env_archcpu(env
);
1981 /* WARL register - disable unavailable counters; TM bit is always 0 */
1982 env
->mcountinhibit
=
1983 val
& (cpu
->pmu_avail_ctrs
| COUNTEREN_CY
| COUNTEREN_IR
);
1985 /* Check if any other counter is also monitoring cycles/instructions */
1986 for (cidx
= 0; cidx
< RV_MAX_MHPMCOUNTERS
; cidx
++) {
1987 if (!get_field(env
->mcountinhibit
, BIT(cidx
))) {
1988 counter
= &env
->pmu_ctrs
[cidx
];
1989 counter
->started
= true;
1993 return RISCV_EXCP_NONE
;
1996 static RISCVException
read_mcounteren(CPURISCVState
*env
, int csrno
,
1999 *val
= env
->mcounteren
;
2000 return RISCV_EXCP_NONE
;
2003 static RISCVException
write_mcounteren(CPURISCVState
*env
, int csrno
,
2006 RISCVCPU
*cpu
= env_archcpu(env
);
2008 /* WARL register - disable unavailable counters */
2009 env
->mcounteren
= val
& (cpu
->pmu_avail_ctrs
| COUNTEREN_CY
| COUNTEREN_TM
|
2011 return RISCV_EXCP_NONE
;
2014 /* Machine Trap Handling */
2015 static RISCVException
read_mscratch_i128(CPURISCVState
*env
, int csrno
,
2018 *val
= int128_make128(env
->mscratch
, env
->mscratchh
);
2019 return RISCV_EXCP_NONE
;
2022 static RISCVException
write_mscratch_i128(CPURISCVState
*env
, int csrno
,
2025 env
->mscratch
= int128_getlo(val
);
2026 env
->mscratchh
= int128_gethi(val
);
2027 return RISCV_EXCP_NONE
;
2030 static RISCVException
read_mscratch(CPURISCVState
*env
, int csrno
,
2033 *val
= env
->mscratch
;
2034 return RISCV_EXCP_NONE
;
2037 static RISCVException
write_mscratch(CPURISCVState
*env
, int csrno
,
2040 env
->mscratch
= val
;
2041 return RISCV_EXCP_NONE
;
2044 static RISCVException
read_mepc(CPURISCVState
*env
, int csrno
,
2048 return RISCV_EXCP_NONE
;
2051 static RISCVException
write_mepc(CPURISCVState
*env
, int csrno
,
2055 return RISCV_EXCP_NONE
;
2058 static RISCVException
read_mcause(CPURISCVState
*env
, int csrno
,
2062 return RISCV_EXCP_NONE
;
2065 static RISCVException
write_mcause(CPURISCVState
*env
, int csrno
,
2069 return RISCV_EXCP_NONE
;
2072 static RISCVException
read_mtval(CPURISCVState
*env
, int csrno
,
2076 return RISCV_EXCP_NONE
;
2079 static RISCVException
write_mtval(CPURISCVState
*env
, int csrno
,
2083 return RISCV_EXCP_NONE
;
2086 /* Execution environment configuration setup */
2087 static RISCVException
read_menvcfg(CPURISCVState
*env
, int csrno
,
2090 *val
= env
->menvcfg
;
2091 return RISCV_EXCP_NONE
;
2094 static RISCVException
write_menvcfg(CPURISCVState
*env
, int csrno
,
2097 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg(env
);
2098 uint64_t mask
= MENVCFG_FIOM
| MENVCFG_CBIE
| MENVCFG_CBCFE
| MENVCFG_CBZE
;
2100 if (riscv_cpu_mxl(env
) == MXL_RV64
) {
2101 mask
|= (cfg
->ext_svpbmt
? MENVCFG_PBMTE
: 0) |
2102 (cfg
->ext_sstc
? MENVCFG_STCE
: 0) |
2103 (cfg
->ext_svadu
? MENVCFG_ADUE
: 0);
2105 env
->menvcfg
= (env
->menvcfg
& ~mask
) | (val
& mask
);
2107 return RISCV_EXCP_NONE
;
2110 static RISCVException
read_menvcfgh(CPURISCVState
*env
, int csrno
,
2113 *val
= env
->menvcfg
>> 32;
2114 return RISCV_EXCP_NONE
;
2117 static RISCVException
write_menvcfgh(CPURISCVState
*env
, int csrno
,
2120 const RISCVCPUConfig
*cfg
= riscv_cpu_cfg(env
);
2121 uint64_t mask
= (cfg
->ext_svpbmt
? MENVCFG_PBMTE
: 0) |
2122 (cfg
->ext_sstc
? MENVCFG_STCE
: 0) |
2123 (cfg
->ext_svadu
? MENVCFG_ADUE
: 0);
2124 uint64_t valh
= (uint64_t)val
<< 32;
2126 env
->menvcfg
= (env
->menvcfg
& ~mask
) | (valh
& mask
);
2128 return RISCV_EXCP_NONE
;
2131 static RISCVException
read_senvcfg(CPURISCVState
*env
, int csrno
,
2136 ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_HSENVCFG
);
2137 if (ret
!= RISCV_EXCP_NONE
) {
2141 *val
= env
->senvcfg
;
2142 return RISCV_EXCP_NONE
;
2145 static RISCVException
write_senvcfg(CPURISCVState
*env
, int csrno
,
2148 uint64_t mask
= SENVCFG_FIOM
| SENVCFG_CBIE
| SENVCFG_CBCFE
| SENVCFG_CBZE
;
2151 ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_HSENVCFG
);
2152 if (ret
!= RISCV_EXCP_NONE
) {
2156 env
->senvcfg
= (env
->senvcfg
& ~mask
) | (val
& mask
);
2157 return RISCV_EXCP_NONE
;
2160 static RISCVException
read_henvcfg(CPURISCVState
*env
, int csrno
,
2165 ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_HSENVCFG
);
2166 if (ret
!= RISCV_EXCP_NONE
) {
2171 * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0
2172 * henvcfg.stce is read_only 0 when menvcfg.stce = 0
2173 * henvcfg.adue is read_only 0 when menvcfg.adue = 0
2175 *val
= env
->henvcfg
& (~(HENVCFG_PBMTE
| HENVCFG_STCE
| HENVCFG_ADUE
) |
2177 return RISCV_EXCP_NONE
;
2180 static RISCVException
write_henvcfg(CPURISCVState
*env
, int csrno
,
2183 uint64_t mask
= HENVCFG_FIOM
| HENVCFG_CBIE
| HENVCFG_CBCFE
| HENVCFG_CBZE
;
2186 ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_HSENVCFG
);
2187 if (ret
!= RISCV_EXCP_NONE
) {
2191 if (riscv_cpu_mxl(env
) == MXL_RV64
) {
2192 mask
|= env
->menvcfg
& (HENVCFG_PBMTE
| HENVCFG_STCE
| HENVCFG_ADUE
);
2195 env
->henvcfg
= (env
->henvcfg
& ~mask
) | (val
& mask
);
2197 return RISCV_EXCP_NONE
;
2200 static RISCVException
read_henvcfgh(CPURISCVState
*env
, int csrno
,
2205 ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_HSENVCFG
);
2206 if (ret
!= RISCV_EXCP_NONE
) {
2210 *val
= (env
->henvcfg
& (~(HENVCFG_PBMTE
| HENVCFG_STCE
| HENVCFG_ADUE
) |
2211 env
->menvcfg
)) >> 32;
2212 return RISCV_EXCP_NONE
;
2215 static RISCVException
write_henvcfgh(CPURISCVState
*env
, int csrno
,
2218 uint64_t mask
= env
->menvcfg
& (HENVCFG_PBMTE
| HENVCFG_STCE
|
2220 uint64_t valh
= (uint64_t)val
<< 32;
2223 ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_HSENVCFG
);
2224 if (ret
!= RISCV_EXCP_NONE
) {
2228 env
->henvcfg
= (env
->henvcfg
& ~mask
) | (valh
& mask
);
2229 return RISCV_EXCP_NONE
;
2232 static RISCVException
read_mstateen(CPURISCVState
*env
, int csrno
,
2235 *val
= env
->mstateen
[csrno
- CSR_MSTATEEN0
];
2237 return RISCV_EXCP_NONE
;
2240 static RISCVException
write_mstateen(CPURISCVState
*env
, int csrno
,
2241 uint64_t wr_mask
, target_ulong new_val
)
2245 reg
= &env
->mstateen
[csrno
- CSR_MSTATEEN0
];
2246 *reg
= (*reg
& ~wr_mask
) | (new_val
& wr_mask
);
2248 return RISCV_EXCP_NONE
;
2251 static RISCVException
write_mstateen0(CPURISCVState
*env
, int csrno
,
2252 target_ulong new_val
)
2254 uint64_t wr_mask
= SMSTATEEN_STATEEN
| SMSTATEEN0_HSENVCFG
;
2255 if (!riscv_has_ext(env
, RVF
)) {
2256 wr_mask
|= SMSTATEEN0_FCSR
;
2259 if (env
->priv_ver
>= PRIV_VERSION_1_13_0
) {
2260 wr_mask
|= SMSTATEEN0_P1P13
;
2263 return write_mstateen(env
, csrno
, wr_mask
, new_val
);
2266 static RISCVException
write_mstateen_1_3(CPURISCVState
*env
, int csrno
,
2267 target_ulong new_val
)
2269 return write_mstateen(env
, csrno
, SMSTATEEN_STATEEN
, new_val
);
2272 static RISCVException
read_mstateenh(CPURISCVState
*env
, int csrno
,
2275 *val
= env
->mstateen
[csrno
- CSR_MSTATEEN0H
] >> 32;
2277 return RISCV_EXCP_NONE
;
2280 static RISCVException
write_mstateenh(CPURISCVState
*env
, int csrno
,
2281 uint64_t wr_mask
, target_ulong new_val
)
2285 reg
= &env
->mstateen
[csrno
- CSR_MSTATEEN0H
];
2286 val
= (uint64_t)new_val
<< 32;
2287 val
|= *reg
& 0xFFFFFFFF;
2288 *reg
= (*reg
& ~wr_mask
) | (val
& wr_mask
);
2290 return RISCV_EXCP_NONE
;
2293 static RISCVException
write_mstateen0h(CPURISCVState
*env
, int csrno
,
2294 target_ulong new_val
)
2296 uint64_t wr_mask
= SMSTATEEN_STATEEN
| SMSTATEEN0_HSENVCFG
;
2298 if (env
->priv_ver
>= PRIV_VERSION_1_13_0
) {
2299 wr_mask
|= SMSTATEEN0_P1P13
;
2302 return write_mstateenh(env
, csrno
, wr_mask
, new_val
);
2305 static RISCVException
write_mstateenh_1_3(CPURISCVState
*env
, int csrno
,
2306 target_ulong new_val
)
2308 return write_mstateenh(env
, csrno
, SMSTATEEN_STATEEN
, new_val
);
2311 static RISCVException
read_hstateen(CPURISCVState
*env
, int csrno
,
2314 int index
= csrno
- CSR_HSTATEEN0
;
2316 *val
= env
->hstateen
[index
] & env
->mstateen
[index
];
2318 return RISCV_EXCP_NONE
;
2321 static RISCVException
write_hstateen(CPURISCVState
*env
, int csrno
,
2322 uint64_t mask
, target_ulong new_val
)
2324 int index
= csrno
- CSR_HSTATEEN0
;
2325 uint64_t *reg
, wr_mask
;
2327 reg
= &env
->hstateen
[index
];
2328 wr_mask
= env
->mstateen
[index
] & mask
;
2329 *reg
= (*reg
& ~wr_mask
) | (new_val
& wr_mask
);
2331 return RISCV_EXCP_NONE
;
2334 static RISCVException
write_hstateen0(CPURISCVState
*env
, int csrno
,
2335 target_ulong new_val
)
2337 uint64_t wr_mask
= SMSTATEEN_STATEEN
| SMSTATEEN0_HSENVCFG
;
2339 if (!riscv_has_ext(env
, RVF
)) {
2340 wr_mask
|= SMSTATEEN0_FCSR
;
2343 return write_hstateen(env
, csrno
, wr_mask
, new_val
);
2346 static RISCVException
write_hstateen_1_3(CPURISCVState
*env
, int csrno
,
2347 target_ulong new_val
)
2349 return write_hstateen(env
, csrno
, SMSTATEEN_STATEEN
, new_val
);
2352 static RISCVException
read_hstateenh(CPURISCVState
*env
, int csrno
,
2355 int index
= csrno
- CSR_HSTATEEN0H
;
2357 *val
= (env
->hstateen
[index
] >> 32) & (env
->mstateen
[index
] >> 32);
2359 return RISCV_EXCP_NONE
;
2362 static RISCVException
write_hstateenh(CPURISCVState
*env
, int csrno
,
2363 uint64_t mask
, target_ulong new_val
)
2365 int index
= csrno
- CSR_HSTATEEN0H
;
2366 uint64_t *reg
, wr_mask
, val
;
2368 reg
= &env
->hstateen
[index
];
2369 val
= (uint64_t)new_val
<< 32;
2370 val
|= *reg
& 0xFFFFFFFF;
2371 wr_mask
= env
->mstateen
[index
] & mask
;
2372 *reg
= (*reg
& ~wr_mask
) | (val
& wr_mask
);
2374 return RISCV_EXCP_NONE
;
2377 static RISCVException
write_hstateen0h(CPURISCVState
*env
, int csrno
,
2378 target_ulong new_val
)
2380 uint64_t wr_mask
= SMSTATEEN_STATEEN
| SMSTATEEN0_HSENVCFG
;
2382 return write_hstateenh(env
, csrno
, wr_mask
, new_val
);
2385 static RISCVException
write_hstateenh_1_3(CPURISCVState
*env
, int csrno
,
2386 target_ulong new_val
)
2388 return write_hstateenh(env
, csrno
, SMSTATEEN_STATEEN
, new_val
);
2391 static RISCVException
read_sstateen(CPURISCVState
*env
, int csrno
,
2394 bool virt
= env
->virt_enabled
;
2395 int index
= csrno
- CSR_SSTATEEN0
;
2397 *val
= env
->sstateen
[index
] & env
->mstateen
[index
];
2399 *val
&= env
->hstateen
[index
];
2402 return RISCV_EXCP_NONE
;
2405 static RISCVException
write_sstateen(CPURISCVState
*env
, int csrno
,
2406 uint64_t mask
, target_ulong new_val
)
2408 bool virt
= env
->virt_enabled
;
2409 int index
= csrno
- CSR_SSTATEEN0
;
2413 wr_mask
= env
->mstateen
[index
] & mask
;
2415 wr_mask
&= env
->hstateen
[index
];
2418 reg
= &env
->sstateen
[index
];
2419 *reg
= (*reg
& ~wr_mask
) | (new_val
& wr_mask
);
2421 return RISCV_EXCP_NONE
;
2424 static RISCVException
write_sstateen0(CPURISCVState
*env
, int csrno
,
2425 target_ulong new_val
)
2427 uint64_t wr_mask
= SMSTATEEN_STATEEN
| SMSTATEEN0_HSENVCFG
;
2429 if (!riscv_has_ext(env
, RVF
)) {
2430 wr_mask
|= SMSTATEEN0_FCSR
;
2433 return write_sstateen(env
, csrno
, wr_mask
, new_val
);
2436 static RISCVException
write_sstateen_1_3(CPURISCVState
*env
, int csrno
,
2437 target_ulong new_val
)
2439 return write_sstateen(env
, csrno
, SMSTATEEN_STATEEN
, new_val
);
2442 static RISCVException
rmw_mip64(CPURISCVState
*env
, int csrno
,
2444 uint64_t new_val
, uint64_t wr_mask
)
2446 uint64_t old_mip
, mask
= wr_mask
& delegable_ints
;
2449 if (mask
& MIP_SEIP
) {
2450 env
->software_seip
= new_val
& MIP_SEIP
;
2451 new_val
|= env
->external_seip
* MIP_SEIP
;
2454 if (riscv_cpu_cfg(env
)->ext_sstc
&& (env
->priv
== PRV_M
) &&
2455 get_field(env
->menvcfg
, MENVCFG_STCE
)) {
2456 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
2457 mask
= mask
& ~(MIP_STIP
| MIP_VSTIP
);
2461 old_mip
= riscv_cpu_update_mip(env
, mask
, (new_val
& mask
));
2466 if (csrno
!= CSR_HVIP
) {
2467 gin
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
2468 old_mip
|= (env
->hgeip
& ((target_ulong
)1 << gin
)) ? MIP_VSEIP
: 0;
2469 old_mip
|= env
->vstime_irq
? MIP_VSTIP
: 0;
2476 return RISCV_EXCP_NONE
;
2479 static RISCVException
rmw_mip(CPURISCVState
*env
, int csrno
,
2480 target_ulong
*ret_val
,
2481 target_ulong new_val
, target_ulong wr_mask
)
2486 ret
= rmw_mip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2494 static RISCVException
rmw_miph(CPURISCVState
*env
, int csrno
,
2495 target_ulong
*ret_val
,
2496 target_ulong new_val
, target_ulong wr_mask
)
2501 ret
= rmw_mip64(env
, csrno
, &rval
,
2502 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2504 *ret_val
= rval
>> 32;
2511 * The function is written for two use-cases:
2512 * 1- To access mvip csr as is for m-mode access.
2513 * 2- To access sip as a combination of mip and mvip for s-mode.
2515 * Both report bits 1, 5, 9 and 13:63 but with the exception of
2516 * STIP being read-only zero in case of mvip when sstc extension
2518 * Also, sip needs to be read-only zero when both mideleg[i] and
2519 * mvien[i] are zero but mvip needs to be an alias of mip.
2521 static RISCVException
rmw_mvip64(CPURISCVState
*env
, int csrno
,
2523 uint64_t new_val
, uint64_t wr_mask
)
2525 RISCVCPU
*cpu
= env_archcpu(env
);
2526 target_ulong ret_mip
= 0;
2531 * mideleg[i] mvien[i]
2532 * 0 0 No delegation. mvip[i] is alias of mip[i].
2533 * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
2534 * 1 X mip[i] is source of interrupt and mvip[i] aliases
2537 * So alias condition would be for bits:
2538 * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
2539 * (!sstc & MIP_STIP)
2541 * Non-alias condition will be for bits:
2542 * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
2544 * alias_mask denotes the bits that come from mip nalias_mask denotes bits
2545 * that come from hvip.
2547 uint64_t alias_mask
= ((S_MODE_INTERRUPTS
| LOCAL_INTERRUPTS
) &
2548 (env
->mideleg
| ~env
->mvien
)) | MIP_STIP
;
2549 uint64_t nalias_mask
= (S_MODE_INTERRUPTS
| LOCAL_INTERRUPTS
) &
2550 (~env
->mideleg
& env
->mvien
);
2551 uint64_t wr_mask_mvip
;
2552 uint64_t wr_mask_mip
;
2555 * mideleg[i] mvien[i]
2556 * 0 0 sip[i] read-only zero.
2557 * 0 1 sip[i] alias of mvip[i].
2558 * 1 X sip[i] alias of mip[i].
2560 * Both alias and non-alias mask remain same for sip except for bits
2561 * which are zero in both mideleg and mvien.
2563 if (csrno
== CSR_SIP
) {
2564 /* Remove bits that are zero in both mideleg and mvien. */
2565 alias_mask
&= (env
->mideleg
| env
->mvien
);
2566 nalias_mask
&= (env
->mideleg
| env
->mvien
);
2570 * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
2571 * that our in mip returned value.
2573 if (cpu
->cfg
.ext_sstc
&& (env
->priv
== PRV_M
) &&
2574 get_field(env
->menvcfg
, MENVCFG_STCE
)) {
2575 alias_mask
&= ~MIP_STIP
;
2578 wr_mask_mip
= wr_mask
& alias_mask
& mvip_writable_mask
;
2579 wr_mask_mvip
= wr_mask
& nalias_mask
& mvip_writable_mask
;
2582 * For bits set in alias_mask, mvip needs to be alias of mip, so forward
2585 ret
= rmw_mip(env
, CSR_MIP
, &ret_mip
, new_val
, wr_mask_mip
);
2586 if (ret
!= RISCV_EXCP_NONE
) {
2590 old_mvip
= env
->mvip
;
2593 * Write to mvip. Update only non-alias bits. Alias bits were updated
2594 * in mip in rmw_mip above.
2597 env
->mvip
= (env
->mvip
& ~wr_mask_mvip
) | (new_val
& wr_mask_mvip
);
2600 * Given mvip is separate source from mip, we need to trigger interrupt
2601 * from here separately. Normally this happen from riscv_cpu_update_mip.
2603 riscv_cpu_interrupt(env
);
2607 ret_mip
&= alias_mask
;
2608 old_mvip
&= nalias_mask
;
2610 *ret_val
= old_mvip
| ret_mip
;
2613 return RISCV_EXCP_NONE
;
2616 static RISCVException
rmw_mvip(CPURISCVState
*env
, int csrno
,
2617 target_ulong
*ret_val
,
2618 target_ulong new_val
, target_ulong wr_mask
)
2623 ret
= rmw_mvip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2631 static RISCVException
rmw_mviph(CPURISCVState
*env
, int csrno
,
2632 target_ulong
*ret_val
,
2633 target_ulong new_val
, target_ulong wr_mask
)
2638 ret
= rmw_mvip64(env
, csrno
, &rval
,
2639 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2641 *ret_val
= rval
>> 32;
2647 /* Supervisor Trap Setup */
2648 static RISCVException
read_sstatus_i128(CPURISCVState
*env
, int csrno
,
2651 uint64_t mask
= sstatus_v1_10_mask
;
2652 uint64_t sstatus
= env
->mstatus
& mask
;
2653 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
2654 mask
|= SSTATUS64_UXL
;
2657 *val
= int128_make128(sstatus
, add_status_sd(MXL_RV128
, sstatus
));
2658 return RISCV_EXCP_NONE
;
2661 static RISCVException
read_sstatus(CPURISCVState
*env
, int csrno
,
2664 target_ulong mask
= (sstatus_v1_10_mask
);
2665 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
2666 mask
|= SSTATUS64_UXL
;
2668 /* TODO: Use SXL not MXL. */
2669 *val
= add_status_sd(riscv_cpu_mxl(env
), env
->mstatus
& mask
);
2670 return RISCV_EXCP_NONE
;
2673 static RISCVException
write_sstatus(CPURISCVState
*env
, int csrno
,
2676 target_ulong mask
= (sstatus_v1_10_mask
);
2678 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
2679 if ((val
& SSTATUS64_UXL
) != 0) {
2680 mask
|= SSTATUS64_UXL
;
2683 target_ulong newval
= (env
->mstatus
& ~mask
) | (val
& mask
);
2684 return write_mstatus(env
, CSR_MSTATUS
, newval
);
2687 static RISCVException
rmw_vsie64(CPURISCVState
*env
, int csrno
,
2689 uint64_t new_val
, uint64_t wr_mask
)
2691 uint64_t alias_mask
= (LOCAL_INTERRUPTS
| VS_MODE_INTERRUPTS
) &
2693 uint64_t nalias_mask
= LOCAL_INTERRUPTS
& (~env
->hideleg
& env
->hvien
);
2694 uint64_t rval
, rval_vs
, vsbits
;
2695 uint64_t wr_mask_vsie
;
2696 uint64_t wr_mask_mie
;
2699 /* Bring VS-level bits to correct position */
2700 vsbits
= new_val
& (VS_MODE_INTERRUPTS
>> 1);
2701 new_val
&= ~(VS_MODE_INTERRUPTS
>> 1);
2702 new_val
|= vsbits
<< 1;
2704 vsbits
= wr_mask
& (VS_MODE_INTERRUPTS
>> 1);
2705 wr_mask
&= ~(VS_MODE_INTERRUPTS
>> 1);
2706 wr_mask
|= vsbits
<< 1;
2708 wr_mask_mie
= wr_mask
& alias_mask
;
2709 wr_mask_vsie
= wr_mask
& nalias_mask
;
2711 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask_mie
);
2713 rval_vs
= env
->vsie
& nalias_mask
;
2714 env
->vsie
= (env
->vsie
& ~wr_mask_vsie
) | (new_val
& wr_mask_vsie
);
2718 vsbits
= rval
& VS_MODE_INTERRUPTS
;
2719 rval
&= ~VS_MODE_INTERRUPTS
;
2720 *ret_val
= rval
| (vsbits
>> 1) | rval_vs
;
2726 static RISCVException
rmw_vsie(CPURISCVState
*env
, int csrno
,
2727 target_ulong
*ret_val
,
2728 target_ulong new_val
, target_ulong wr_mask
)
2733 ret
= rmw_vsie64(env
, csrno
, &rval
, new_val
, wr_mask
);
2741 static RISCVException
rmw_vsieh(CPURISCVState
*env
, int csrno
,
2742 target_ulong
*ret_val
,
2743 target_ulong new_val
, target_ulong wr_mask
)
2748 ret
= rmw_vsie64(env
, csrno
, &rval
,
2749 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2751 *ret_val
= rval
>> 32;
2757 static RISCVException
rmw_sie64(CPURISCVState
*env
, int csrno
,
2759 uint64_t new_val
, uint64_t wr_mask
)
2761 uint64_t nalias_mask
= (S_MODE_INTERRUPTS
| LOCAL_INTERRUPTS
) &
2762 (~env
->mideleg
& env
->mvien
);
2763 uint64_t alias_mask
= (S_MODE_INTERRUPTS
| LOCAL_INTERRUPTS
) & env
->mideleg
;
2764 uint64_t sie_mask
= wr_mask
& nalias_mask
;
2768 * mideleg[i] mvien[i]
2769 * 0 0 sie[i] read-only zero.
2770 * 0 1 sie[i] is a separate writable bit.
2771 * 1 X sie[i] alias of mie[i].
2773 * Both alias and non-alias mask remain same for sip except for bits
2774 * which are zero in both mideleg and mvien.
2776 if (env
->virt_enabled
) {
2777 if (env
->hvictl
& HVICTL_VTI
) {
2778 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
2780 ret
= rmw_vsie64(env
, CSR_VSIE
, ret_val
, new_val
, wr_mask
);
2782 *ret_val
&= alias_mask
;
2785 ret
= rmw_mie64(env
, csrno
, ret_val
, new_val
, wr_mask
& alias_mask
);
2787 *ret_val
&= alias_mask
;
2788 *ret_val
|= env
->sie
& nalias_mask
;
2791 env
->sie
= (env
->sie
& ~sie_mask
) | (new_val
& sie_mask
);
2797 static RISCVException
rmw_sie(CPURISCVState
*env
, int csrno
,
2798 target_ulong
*ret_val
,
2799 target_ulong new_val
, target_ulong wr_mask
)
2804 ret
= rmw_sie64(env
, csrno
, &rval
, new_val
, wr_mask
);
2805 if (ret
== RISCV_EXCP_NONE
&& ret_val
) {
2812 static RISCVException
rmw_sieh(CPURISCVState
*env
, int csrno
,
2813 target_ulong
*ret_val
,
2814 target_ulong new_val
, target_ulong wr_mask
)
2819 ret
= rmw_sie64(env
, csrno
, &rval
,
2820 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2822 *ret_val
= rval
>> 32;
2828 static RISCVException
read_stvec(CPURISCVState
*env
, int csrno
,
2832 return RISCV_EXCP_NONE
;
2835 static RISCVException
write_stvec(CPURISCVState
*env
, int csrno
,
2838 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2839 if ((val
& 3) < 2) {
2842 qemu_log_mask(LOG_UNIMP
, "CSR_STVEC: reserved mode not supported\n");
2844 return RISCV_EXCP_NONE
;
2847 static RISCVException
read_scounteren(CPURISCVState
*env
, int csrno
,
2850 *val
= env
->scounteren
;
2851 return RISCV_EXCP_NONE
;
2854 static RISCVException
write_scounteren(CPURISCVState
*env
, int csrno
,
2857 env
->scounteren
= val
;
2858 return RISCV_EXCP_NONE
;
2861 /* Supervisor Trap Handling */
2862 static RISCVException
read_sscratch_i128(CPURISCVState
*env
, int csrno
,
2865 *val
= int128_make128(env
->sscratch
, env
->sscratchh
);
2866 return RISCV_EXCP_NONE
;
2869 static RISCVException
write_sscratch_i128(CPURISCVState
*env
, int csrno
,
2872 env
->sscratch
= int128_getlo(val
);
2873 env
->sscratchh
= int128_gethi(val
);
2874 return RISCV_EXCP_NONE
;
2877 static RISCVException
read_sscratch(CPURISCVState
*env
, int csrno
,
2880 *val
= env
->sscratch
;
2881 return RISCV_EXCP_NONE
;
2884 static RISCVException
write_sscratch(CPURISCVState
*env
, int csrno
,
2887 env
->sscratch
= val
;
2888 return RISCV_EXCP_NONE
;
2891 static RISCVException
read_sepc(CPURISCVState
*env
, int csrno
,
2895 return RISCV_EXCP_NONE
;
2898 static RISCVException
write_sepc(CPURISCVState
*env
, int csrno
,
2902 return RISCV_EXCP_NONE
;
2905 static RISCVException
read_scause(CPURISCVState
*env
, int csrno
,
2909 return RISCV_EXCP_NONE
;
2912 static RISCVException
write_scause(CPURISCVState
*env
, int csrno
,
2916 return RISCV_EXCP_NONE
;
2919 static RISCVException
read_stval(CPURISCVState
*env
, int csrno
,
2923 return RISCV_EXCP_NONE
;
2926 static RISCVException
write_stval(CPURISCVState
*env
, int csrno
,
2930 return RISCV_EXCP_NONE
;
2933 static RISCVException
rmw_hvip64(CPURISCVState
*env
, int csrno
,
2935 uint64_t new_val
, uint64_t wr_mask
);
2937 static RISCVException
rmw_vsip64(CPURISCVState
*env
, int csrno
,
2939 uint64_t new_val
, uint64_t wr_mask
)
2942 uint64_t rval
, mask
= env
->hideleg
& VS_MODE_INTERRUPTS
;
2945 /* Add virtualized bits into vsip mask. */
2946 mask
|= env
->hvien
& ~env
->hideleg
;
2948 /* Bring VS-level bits to correct position */
2949 vsbits
= new_val
& (VS_MODE_INTERRUPTS
>> 1);
2950 new_val
&= ~(VS_MODE_INTERRUPTS
>> 1);
2951 new_val
|= vsbits
<< 1;
2952 vsbits
= wr_mask
& (VS_MODE_INTERRUPTS
>> 1);
2953 wr_mask
&= ~(VS_MODE_INTERRUPTS
>> 1);
2954 wr_mask
|= vsbits
<< 1;
2956 ret
= rmw_hvip64(env
, csrno
, &rval
, new_val
,
2957 wr_mask
& mask
& vsip_writable_mask
);
2960 vsbits
= rval
& VS_MODE_INTERRUPTS
;
2961 rval
&= ~VS_MODE_INTERRUPTS
;
2962 *ret_val
= rval
| (vsbits
>> 1);
2968 static RISCVException
rmw_vsip(CPURISCVState
*env
, int csrno
,
2969 target_ulong
*ret_val
,
2970 target_ulong new_val
, target_ulong wr_mask
)
2975 ret
= rmw_vsip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2983 static RISCVException
rmw_vsiph(CPURISCVState
*env
, int csrno
,
2984 target_ulong
*ret_val
,
2985 target_ulong new_val
, target_ulong wr_mask
)
2990 ret
= rmw_vsip64(env
, csrno
, &rval
,
2991 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2993 *ret_val
= rval
>> 32;
2999 static RISCVException
rmw_sip64(CPURISCVState
*env
, int csrno
,
3001 uint64_t new_val
, uint64_t wr_mask
)
3004 uint64_t mask
= (env
->mideleg
| env
->mvien
) & sip_writable_mask
;
3006 if (env
->virt_enabled
) {
3007 if (env
->hvictl
& HVICTL_VTI
) {
3008 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
3010 ret
= rmw_vsip64(env
, CSR_VSIP
, ret_val
, new_val
, wr_mask
);
3012 ret
= rmw_mvip64(env
, csrno
, ret_val
, new_val
, wr_mask
& mask
);
3016 *ret_val
&= (env
->mideleg
| env
->mvien
) &
3017 (S_MODE_INTERRUPTS
| LOCAL_INTERRUPTS
);
3023 static RISCVException
rmw_sip(CPURISCVState
*env
, int csrno
,
3024 target_ulong
*ret_val
,
3025 target_ulong new_val
, target_ulong wr_mask
)
3030 ret
= rmw_sip64(env
, csrno
, &rval
, new_val
, wr_mask
);
3038 static RISCVException
rmw_siph(CPURISCVState
*env
, int csrno
,
3039 target_ulong
*ret_val
,
3040 target_ulong new_val
, target_ulong wr_mask
)
3045 ret
= rmw_sip64(env
, csrno
, &rval
,
3046 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
3048 *ret_val
= rval
>> 32;
3054 /* Supervisor Protection and Translation */
3055 static RISCVException
read_satp(CPURISCVState
*env
, int csrno
,
3058 if (!riscv_cpu_cfg(env
)->mmu
) {
3060 return RISCV_EXCP_NONE
;
3063 return RISCV_EXCP_NONE
;
3066 static RISCVException
write_satp(CPURISCVState
*env
, int csrno
,
3069 if (!riscv_cpu_cfg(env
)->mmu
) {
3070 return RISCV_EXCP_NONE
;
3073 env
->satp
= legalize_xatp(env
, env
->satp
, val
);
3074 return RISCV_EXCP_NONE
;
3077 static RISCVException
read_vstopi(CPURISCVState
*env
, int csrno
,
3082 uint64_t vseip
, vsgein
;
3083 uint32_t iid
, iprio
, hviid
, hviprio
, gein
;
3084 uint32_t s
, scount
= 0, siid
[VSTOPI_NUM_SRCS
], siprio
[VSTOPI_NUM_SRCS
];
3086 gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
3087 hviid
= get_field(env
->hvictl
, HVICTL_IID
);
3088 hviprio
= get_field(env
->hvictl
, HVICTL_IPRIO
);
3091 vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
3092 vseip
= env
->mie
& (env
->mip
| vsgein
) & MIP_VSEIP
;
3093 if (gein
<= env
->geilen
&& vseip
) {
3094 siid
[scount
] = IRQ_S_EXT
;
3095 siprio
[scount
] = IPRIO_MMAXIPRIO
+ 1;
3096 if (env
->aia_ireg_rmw_fn
[PRV_S
]) {
3098 * Call machine specific IMSIC register emulation for
3101 ret
= env
->aia_ireg_rmw_fn
[PRV_S
](
3102 env
->aia_ireg_rmw_fn_arg
[PRV_S
],
3103 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI
, PRV_S
, true, gein
,
3104 riscv_cpu_mxl_bits(env
)),
3106 if (!ret
&& topei
) {
3107 siprio
[scount
] = topei
& IMSIC_TOPEI_IPRIO_MASK
;
3113 if (hviid
== IRQ_S_EXT
&& hviprio
) {
3114 siid
[scount
] = IRQ_S_EXT
;
3115 siprio
[scount
] = hviprio
;
3120 if (env
->hvictl
& HVICTL_VTI
) {
3121 if (hviid
!= IRQ_S_EXT
) {
3122 siid
[scount
] = hviid
;
3123 siprio
[scount
] = hviprio
;
3127 irq
= riscv_cpu_vsirq_pending(env
);
3128 if (irq
!= IRQ_S_EXT
&& 0 < irq
&& irq
<= 63) {
3130 siprio
[scount
] = env
->hviprio
[irq
];
3137 for (s
= 0; s
< scount
; s
++) {
3138 if (siprio
[s
] < iprio
) {
3145 if (env
->hvictl
& HVICTL_IPRIOM
) {
3146 if (iprio
> IPRIO_MMAXIPRIO
) {
3147 iprio
= IPRIO_MMAXIPRIO
;
3150 if (riscv_cpu_default_priority(iid
) > IPRIO_DEFAULT_S
) {
3151 iprio
= IPRIO_MMAXIPRIO
;
3161 *val
= (iid
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
3164 return RISCV_EXCP_NONE
;
3167 static RISCVException
read_stopi(CPURISCVState
*env
, int csrno
,
3173 if (env
->virt_enabled
) {
3174 return read_vstopi(env
, CSR_VSTOPI
, val
);
3177 irq
= riscv_cpu_sirq_pending(env
);
3178 if (irq
<= 0 || irq
> 63) {
3181 iprio
= env
->siprio
[irq
];
3183 if (riscv_cpu_default_priority(irq
) > IPRIO_DEFAULT_S
) {
3184 iprio
= IPRIO_MMAXIPRIO
;
3187 *val
= (irq
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
3191 return RISCV_EXCP_NONE
;
3194 /* Hypervisor Extensions */
3195 static RISCVException
read_hstatus(CPURISCVState
*env
, int csrno
,
3198 *val
= env
->hstatus
;
3199 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
3200 /* We only support 64-bit VSXL */
3201 *val
= set_field(*val
, HSTATUS_VSXL
, 2);
3203 /* We only support little endian */
3204 *val
= set_field(*val
, HSTATUS_VSBE
, 0);
3205 return RISCV_EXCP_NONE
;
3208 static RISCVException
write_hstatus(CPURISCVState
*env
, int csrno
,
3212 if (riscv_cpu_mxl(env
) != MXL_RV32
&& get_field(val
, HSTATUS_VSXL
) != 2) {
3213 qemu_log_mask(LOG_UNIMP
,
3214 "QEMU does not support mixed HSXLEN options.");
3216 if (get_field(val
, HSTATUS_VSBE
) != 0) {
3217 qemu_log_mask(LOG_UNIMP
, "QEMU does not support big endian guests.");
3219 return RISCV_EXCP_NONE
;
3222 static RISCVException
read_hedeleg(CPURISCVState
*env
, int csrno
,
3225 *val
= env
->hedeleg
;
3226 return RISCV_EXCP_NONE
;
3229 static RISCVException
write_hedeleg(CPURISCVState
*env
, int csrno
,
3232 env
->hedeleg
= val
& vs_delegable_excps
;
3233 return RISCV_EXCP_NONE
;
3236 static RISCVException
read_hedelegh(CPURISCVState
*env
, int csrno
,
3240 ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_P1P13
);
3241 if (ret
!= RISCV_EXCP_NONE
) {
3245 /* Reserved, now read zero */
3247 return RISCV_EXCP_NONE
;
3250 static RISCVException
write_hedelegh(CPURISCVState
*env
, int csrno
,
3254 ret
= smstateen_acc_ok(env
, 0, SMSTATEEN0_P1P13
);
3255 if (ret
!= RISCV_EXCP_NONE
) {
3259 /* Reserved, now write ignore */
3260 return RISCV_EXCP_NONE
;
3263 static RISCVException
rmw_hvien64(CPURISCVState
*env
, int csrno
,
3265 uint64_t new_val
, uint64_t wr_mask
)
3267 uint64_t mask
= wr_mask
& hvien_writable_mask
;
3270 *ret_val
= env
->hvien
;
3273 env
->hvien
= (env
->hvien
& ~mask
) | (new_val
& mask
);
3275 return RISCV_EXCP_NONE
;
3278 static RISCVException
rmw_hvien(CPURISCVState
*env
, int csrno
,
3279 target_ulong
*ret_val
,
3280 target_ulong new_val
, target_ulong wr_mask
)
3285 ret
= rmw_hvien64(env
, csrno
, &rval
, new_val
, wr_mask
);
3293 static RISCVException
rmw_hvienh(CPURISCVState
*env
, int csrno
,
3294 target_ulong
*ret_val
,
3295 target_ulong new_val
, target_ulong wr_mask
)
3300 ret
= rmw_hvien64(env
, csrno
, &rval
,
3301 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
3303 *ret_val
= rval
>> 32;
3309 static RISCVException
rmw_hideleg64(CPURISCVState
*env
, int csrno
,
3311 uint64_t new_val
, uint64_t wr_mask
)
3313 uint64_t mask
= wr_mask
& vs_delegable_ints
;
3316 *ret_val
= env
->hideleg
& vs_delegable_ints
;
3319 env
->hideleg
= (env
->hideleg
& ~mask
) | (new_val
& mask
);
3320 return RISCV_EXCP_NONE
;
3323 static RISCVException
rmw_hideleg(CPURISCVState
*env
, int csrno
,
3324 target_ulong
*ret_val
,
3325 target_ulong new_val
, target_ulong wr_mask
)
3330 ret
= rmw_hideleg64(env
, csrno
, &rval
, new_val
, wr_mask
);
3338 static RISCVException
rmw_hidelegh(CPURISCVState
*env
, int csrno
,
3339 target_ulong
*ret_val
,
3340 target_ulong new_val
, target_ulong wr_mask
)
3345 ret
= rmw_hideleg64(env
, csrno
, &rval
,
3346 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
3348 *ret_val
= rval
>> 32;
3355 * The function is written for two use-cases:
3356 * 1- To access hvip csr as is for HS-mode access.
3357 * 2- To access vsip as a combination of hvip, and mip for vs-mode.
3359 * Both report bits 2, 6, 10 and 13:63.
3360 * vsip needs to be read-only zero when both hideleg[i] and
3361 * hvien[i] are zero.
3363 static RISCVException
rmw_hvip64(CPURISCVState
*env
, int csrno
,
3365 uint64_t new_val
, uint64_t wr_mask
)
3372 * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
3373 * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
3374 * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
3375 * bits are actually being maintained in mip so we read them from there.
3376 * This way we have a single source of truth and allows for easier
3379 * For bits 13:63 we have:
3381 * hideleg[i] hvien[i]
3382 * 0 0 No delegation. vsip[i] readonly zero.
3383 * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
3384 * 1 X vsip[i] is alias of sip[i], hvip bypassed.
3386 * alias_mask denotes the bits that come from sip (mip here given we
3387 * maintain all bits there). nalias_mask denotes bits that come from
3390 uint64_t alias_mask
= (env
->hideleg
| ~env
->hvien
) | VS_MODE_INTERRUPTS
;
3391 uint64_t nalias_mask
= (~env
->hideleg
& env
->hvien
);
3392 uint64_t wr_mask_hvip
;
3393 uint64_t wr_mask_mip
;
3396 * Both alias and non-alias mask remain same for vsip except:
3397 * 1- For VS* bits if they are zero in hideleg.
3398 * 2- For 13:63 bits if they are zero in both hideleg and hvien.
3400 if (csrno
== CSR_VSIP
) {
3401 /* zero-out VS* bits that are not delegated to VS mode. */
3402 alias_mask
&= (env
->hideleg
| ~VS_MODE_INTERRUPTS
);
3405 * zero-out 13:63 bits that are zero in both hideleg and hvien.
3406 * nalias_mask mask can not contain any VS* bits so only second
3407 * condition applies on it.
3409 nalias_mask
&= (env
->hideleg
| env
->hvien
);
3410 alias_mask
&= (env
->hideleg
| env
->hvien
);
3413 wr_mask_hvip
= wr_mask
& nalias_mask
& hvip_writable_mask
;
3414 wr_mask_mip
= wr_mask
& alias_mask
& hvip_writable_mask
;
3416 /* Aliased bits, bits 10, 6, 2 need to come from mip. */
3417 ret
= rmw_mip64(env
, csrno
, &ret_mip
, new_val
, wr_mask_mip
);
3418 if (ret
!= RISCV_EXCP_NONE
) {
3422 old_hvip
= env
->hvip
;
3425 env
->hvip
= (env
->hvip
& ~wr_mask_hvip
) | (new_val
& wr_mask_hvip
);
3428 * Given hvip is separate source from mip, we need to trigger interrupt
3429 * from here separately. Normally this happen from riscv_cpu_update_mip.
3431 riscv_cpu_interrupt(env
);
3435 /* Only take VS* bits from mip. */
3436 ret_mip
&= alias_mask
;
3438 /* Take in non-delegated 13:63 bits from hvip. */
3439 old_hvip
&= nalias_mask
;
3441 *ret_val
= ret_mip
| old_hvip
;
3447 static RISCVException
rmw_hvip(CPURISCVState
*env
, int csrno
,
3448 target_ulong
*ret_val
,
3449 target_ulong new_val
, target_ulong wr_mask
)
3454 ret
= rmw_hvip64(env
, csrno
, &rval
, new_val
, wr_mask
);
3462 static RISCVException
rmw_hviph(CPURISCVState
*env
, int csrno
,
3463 target_ulong
*ret_val
,
3464 target_ulong new_val
, target_ulong wr_mask
)
3469 ret
= rmw_hvip64(env
, csrno
, &rval
,
3470 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
3472 *ret_val
= rval
>> 32;
3478 static RISCVException
rmw_hip(CPURISCVState
*env
, int csrno
,
3479 target_ulong
*ret_value
,
3480 target_ulong new_value
, target_ulong write_mask
)
3482 int ret
= rmw_mip(env
, csrno
, ret_value
, new_value
,
3483 write_mask
& hip_writable_mask
);
3486 *ret_value
&= HS_MODE_INTERRUPTS
;
3491 static RISCVException
rmw_hie(CPURISCVState
*env
, int csrno
,
3492 target_ulong
*ret_val
,
3493 target_ulong new_val
, target_ulong wr_mask
)
3498 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
& HS_MODE_INTERRUPTS
);
3500 *ret_val
= rval
& HS_MODE_INTERRUPTS
;
3506 static RISCVException
read_hcounteren(CPURISCVState
*env
, int csrno
,
3509 *val
= env
->hcounteren
;
3510 return RISCV_EXCP_NONE
;
3513 static RISCVException
write_hcounteren(CPURISCVState
*env
, int csrno
,
3516 env
->hcounteren
= val
;
3517 return RISCV_EXCP_NONE
;
3520 static RISCVException
read_hgeie(CPURISCVState
*env
, int csrno
,
3526 return RISCV_EXCP_NONE
;
3529 static RISCVException
write_hgeie(CPURISCVState
*env
, int csrno
,
3532 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
3533 val
&= ((((target_ulong
)1) << env
->geilen
) - 1) << 1;
3535 /* Update mip.SGEIP bit */
3536 riscv_cpu_update_mip(env
, MIP_SGEIP
,
3537 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
3538 return RISCV_EXCP_NONE
;
3541 static RISCVException
read_htval(CPURISCVState
*env
, int csrno
,
3545 return RISCV_EXCP_NONE
;
3548 static RISCVException
write_htval(CPURISCVState
*env
, int csrno
,
3552 return RISCV_EXCP_NONE
;
3555 static RISCVException
read_htinst(CPURISCVState
*env
, int csrno
,
3559 return RISCV_EXCP_NONE
;
3562 static RISCVException
write_htinst(CPURISCVState
*env
, int csrno
,
3565 return RISCV_EXCP_NONE
;
3568 static RISCVException
read_hgeip(CPURISCVState
*env
, int csrno
,
3574 return RISCV_EXCP_NONE
;
3577 static RISCVException
read_hgatp(CPURISCVState
*env
, int csrno
,
3581 return RISCV_EXCP_NONE
;
3584 static RISCVException
write_hgatp(CPURISCVState
*env
, int csrno
,
3587 env
->hgatp
= legalize_xatp(env
, env
->hgatp
, val
);
3588 return RISCV_EXCP_NONE
;
3591 static RISCVException
read_htimedelta(CPURISCVState
*env
, int csrno
,
3594 if (!env
->rdtime_fn
) {
3595 return RISCV_EXCP_ILLEGAL_INST
;
3598 *val
= env
->htimedelta
;
3599 return RISCV_EXCP_NONE
;
3602 static RISCVException
write_htimedelta(CPURISCVState
*env
, int csrno
,
3605 if (!env
->rdtime_fn
) {
3606 return RISCV_EXCP_ILLEGAL_INST
;
3609 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
3610 env
->htimedelta
= deposit64(env
->htimedelta
, 0, 32, (uint64_t)val
);
3612 env
->htimedelta
= val
;
3615 if (riscv_cpu_cfg(env
)->ext_sstc
&& env
->rdtime_fn
) {
3616 riscv_timer_write_timecmp(env
, env
->vstimer
, env
->vstimecmp
,
3617 env
->htimedelta
, MIP_VSTIP
);
3620 return RISCV_EXCP_NONE
;
3623 static RISCVException
read_htimedeltah(CPURISCVState
*env
, int csrno
,
3626 if (!env
->rdtime_fn
) {
3627 return RISCV_EXCP_ILLEGAL_INST
;
3630 *val
= env
->htimedelta
>> 32;
3631 return RISCV_EXCP_NONE
;
3634 static RISCVException
write_htimedeltah(CPURISCVState
*env
, int csrno
,
3637 if (!env
->rdtime_fn
) {
3638 return RISCV_EXCP_ILLEGAL_INST
;
3641 env
->htimedelta
= deposit64(env
->htimedelta
, 32, 32, (uint64_t)val
);
3643 if (riscv_cpu_cfg(env
)->ext_sstc
&& env
->rdtime_fn
) {
3644 riscv_timer_write_timecmp(env
, env
->vstimer
, env
->vstimecmp
,
3645 env
->htimedelta
, MIP_VSTIP
);
3648 return RISCV_EXCP_NONE
;
3651 static RISCVException
read_hvictl(CPURISCVState
*env
, int csrno
,
3655 return RISCV_EXCP_NONE
;
3658 static RISCVException
write_hvictl(CPURISCVState
*env
, int csrno
,
3661 env
->hvictl
= val
& HVICTL_VALID_MASK
;
3662 return RISCV_EXCP_NONE
;
3665 static RISCVException
read_hvipriox(CPURISCVState
*env
, int first_index
,
3666 uint8_t *iprio
, target_ulong
*val
)
3668 int i
, irq
, rdzero
, num_irqs
= 4 * (riscv_cpu_mxl_bits(env
) / 32);
3670 /* First index has to be a multiple of number of irqs per register */
3671 if (first_index
% num_irqs
) {
3672 return (env
->virt_enabled
) ?
3673 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
3676 /* Fill-up return value */
3678 for (i
= 0; i
< num_irqs
; i
++) {
3679 if (riscv_cpu_hviprio_index2irq(first_index
+ i
, &irq
, &rdzero
)) {
3685 *val
|= ((target_ulong
)iprio
[irq
]) << (i
* 8);
3688 return RISCV_EXCP_NONE
;
3691 static RISCVException
write_hvipriox(CPURISCVState
*env
, int first_index
,
3692 uint8_t *iprio
, target_ulong val
)
3694 int i
, irq
, rdzero
, num_irqs
= 4 * (riscv_cpu_mxl_bits(env
) / 32);
3696 /* First index has to be a multiple of number of irqs per register */
3697 if (first_index
% num_irqs
) {
3698 return (env
->virt_enabled
) ?
3699 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
3702 /* Fill-up priority array */
3703 for (i
= 0; i
< num_irqs
; i
++) {
3704 if (riscv_cpu_hviprio_index2irq(first_index
+ i
, &irq
, &rdzero
)) {
3710 iprio
[irq
] = (val
>> (i
* 8)) & 0xff;
3714 return RISCV_EXCP_NONE
;
3717 static RISCVException
read_hviprio1(CPURISCVState
*env
, int csrno
,
3720 return read_hvipriox(env
, 0, env
->hviprio
, val
);
3723 static RISCVException
write_hviprio1(CPURISCVState
*env
, int csrno
,
3726 return write_hvipriox(env
, 0, env
->hviprio
, val
);
3729 static RISCVException
read_hviprio1h(CPURISCVState
*env
, int csrno
,
3732 return read_hvipriox(env
, 4, env
->hviprio
, val
);
3735 static RISCVException
write_hviprio1h(CPURISCVState
*env
, int csrno
,
3738 return write_hvipriox(env
, 4, env
->hviprio
, val
);
3741 static RISCVException
read_hviprio2(CPURISCVState
*env
, int csrno
,
3744 return read_hvipriox(env
, 8, env
->hviprio
, val
);
3747 static RISCVException
write_hviprio2(CPURISCVState
*env
, int csrno
,
3750 return write_hvipriox(env
, 8, env
->hviprio
, val
);
3753 static RISCVException
read_hviprio2h(CPURISCVState
*env
, int csrno
,
3756 return read_hvipriox(env
, 12, env
->hviprio
, val
);
3759 static RISCVException
write_hviprio2h(CPURISCVState
*env
, int csrno
,
3762 return write_hvipriox(env
, 12, env
->hviprio
, val
);
3765 /* Virtual CSR Registers */
3766 static RISCVException
read_vsstatus(CPURISCVState
*env
, int csrno
,
3769 *val
= env
->vsstatus
;
3770 return RISCV_EXCP_NONE
;
3773 static RISCVException
write_vsstatus(CPURISCVState
*env
, int csrno
,
3776 uint64_t mask
= (target_ulong
)-1;
3777 if ((val
& VSSTATUS64_UXL
) == 0) {
3778 mask
&= ~VSSTATUS64_UXL
;
3780 env
->vsstatus
= (env
->vsstatus
& ~mask
) | (uint64_t)val
;
3781 return RISCV_EXCP_NONE
;
3784 static RISCVException
read_vstvec(CPURISCVState
*env
, int csrno
,
3788 return RISCV_EXCP_NONE
;
3791 static RISCVException
write_vstvec(CPURISCVState
*env
, int csrno
,
3795 return RISCV_EXCP_NONE
;
3798 static RISCVException
read_vsscratch(CPURISCVState
*env
, int csrno
,
3801 *val
= env
->vsscratch
;
3802 return RISCV_EXCP_NONE
;
3805 static RISCVException
write_vsscratch(CPURISCVState
*env
, int csrno
,
3808 env
->vsscratch
= val
;
3809 return RISCV_EXCP_NONE
;
3812 static RISCVException
read_vsepc(CPURISCVState
*env
, int csrno
,
3816 return RISCV_EXCP_NONE
;
3819 static RISCVException
write_vsepc(CPURISCVState
*env
, int csrno
,
3823 return RISCV_EXCP_NONE
;
3826 static RISCVException
read_vscause(CPURISCVState
*env
, int csrno
,
3829 *val
= env
->vscause
;
3830 return RISCV_EXCP_NONE
;
3833 static RISCVException
write_vscause(CPURISCVState
*env
, int csrno
,
3837 return RISCV_EXCP_NONE
;
3840 static RISCVException
read_vstval(CPURISCVState
*env
, int csrno
,
3844 return RISCV_EXCP_NONE
;
3847 static RISCVException
write_vstval(CPURISCVState
*env
, int csrno
,
3851 return RISCV_EXCP_NONE
;
3854 static RISCVException
read_vsatp(CPURISCVState
*env
, int csrno
,
3858 return RISCV_EXCP_NONE
;
3861 static RISCVException
write_vsatp(CPURISCVState
*env
, int csrno
,
3864 env
->vsatp
= legalize_xatp(env
, env
->vsatp
, val
);
3865 return RISCV_EXCP_NONE
;
3868 static RISCVException
read_mtval2(CPURISCVState
*env
, int csrno
,
3872 return RISCV_EXCP_NONE
;
3875 static RISCVException
write_mtval2(CPURISCVState
*env
, int csrno
,
3879 return RISCV_EXCP_NONE
;
3882 static RISCVException
read_mtinst(CPURISCVState
*env
, int csrno
,
3886 return RISCV_EXCP_NONE
;
3889 static RISCVException
write_mtinst(CPURISCVState
*env
, int csrno
,
3893 return RISCV_EXCP_NONE
;
3896 /* Physical Memory Protection */
3897 static RISCVException
read_mseccfg(CPURISCVState
*env
, int csrno
,
3900 *val
= mseccfg_csr_read(env
);
3901 return RISCV_EXCP_NONE
;
3904 static RISCVException
write_mseccfg(CPURISCVState
*env
, int csrno
,
3907 mseccfg_csr_write(env
, val
);
3908 return RISCV_EXCP_NONE
;
3911 static RISCVException
read_pmpcfg(CPURISCVState
*env
, int csrno
,
3914 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
3916 *val
= pmpcfg_csr_read(env
, reg_index
);
3917 return RISCV_EXCP_NONE
;
3920 static RISCVException
write_pmpcfg(CPURISCVState
*env
, int csrno
,
3923 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
3925 pmpcfg_csr_write(env
, reg_index
, val
);
3926 return RISCV_EXCP_NONE
;
3929 static RISCVException
read_pmpaddr(CPURISCVState
*env
, int csrno
,
3932 *val
= pmpaddr_csr_read(env
, csrno
- CSR_PMPADDR0
);
3933 return RISCV_EXCP_NONE
;
3936 static RISCVException
write_pmpaddr(CPURISCVState
*env
, int csrno
,
3939 pmpaddr_csr_write(env
, csrno
- CSR_PMPADDR0
, val
);
3940 return RISCV_EXCP_NONE
;
3943 static RISCVException
read_tselect(CPURISCVState
*env
, int csrno
,
3946 *val
= tselect_csr_read(env
);
3947 return RISCV_EXCP_NONE
;
3950 static RISCVException
write_tselect(CPURISCVState
*env
, int csrno
,
3953 tselect_csr_write(env
, val
);
3954 return RISCV_EXCP_NONE
;
3957 static RISCVException
read_tdata(CPURISCVState
*env
, int csrno
,
3960 /* return 0 in tdata1 to end the trigger enumeration */
3961 if (env
->trigger_cur
>= RV_MAX_TRIGGERS
&& csrno
== CSR_TDATA1
) {
3963 return RISCV_EXCP_NONE
;
3966 if (!tdata_available(env
, csrno
- CSR_TDATA1
)) {
3967 return RISCV_EXCP_ILLEGAL_INST
;
3970 *val
= tdata_csr_read(env
, csrno
- CSR_TDATA1
);
3971 return RISCV_EXCP_NONE
;
3974 static RISCVException
write_tdata(CPURISCVState
*env
, int csrno
,
3977 if (!tdata_available(env
, csrno
- CSR_TDATA1
)) {
3978 return RISCV_EXCP_ILLEGAL_INST
;
3981 tdata_csr_write(env
, csrno
- CSR_TDATA1
, val
);
3982 return RISCV_EXCP_NONE
;
3985 static RISCVException
read_tinfo(CPURISCVState
*env
, int csrno
,
3988 *val
= tinfo_csr_read(env
);
3989 return RISCV_EXCP_NONE
;
3992 static RISCVException
read_mcontext(CPURISCVState
*env
, int csrno
,
3995 *val
= env
->mcontext
;
3996 return RISCV_EXCP_NONE
;
3999 static RISCVException
write_mcontext(CPURISCVState
*env
, int csrno
,
4002 bool rv32
= riscv_cpu_mxl(env
) == MXL_RV32
? true : false;
4005 if (riscv_has_ext(env
, RVH
)) {
4006 /* Spec suggest 7-bit for RV32 and 14-bit for RV64 w/ H extension */
4007 mask
= rv32
? MCONTEXT32_HCONTEXT
: MCONTEXT64_HCONTEXT
;
4009 /* Spec suggest 6-bit for RV32 and 13-bit for RV64 w/o H extension */
4010 mask
= rv32
? MCONTEXT32
: MCONTEXT64
;
4013 env
->mcontext
= val
& mask
;
4014 return RISCV_EXCP_NONE
;
4018 * Functions to access Pointer Masking feature registers
4019 * We have to check if current priv lvl could modify
4022 static bool check_pm_current_disabled(CPURISCVState
*env
, int csrno
)
4024 int csr_priv
= get_field(csrno
, 0x300);
4027 if (env
->debugger
) {
4031 * If priv lvls differ that means we're accessing csr from higher priv lvl,
4032 * so allow the access
4034 if (env
->priv
!= csr_priv
) {
4037 switch (env
->priv
) {
4039 pm_current
= get_field(env
->mmte
, M_PM_CURRENT
);
4042 pm_current
= get_field(env
->mmte
, S_PM_CURRENT
);
4045 pm_current
= get_field(env
->mmte
, U_PM_CURRENT
);
4048 g_assert_not_reached();
4050 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
4054 static RISCVException
read_mmte(CPURISCVState
*env
, int csrno
,
4057 *val
= env
->mmte
& MMTE_MASK
;
4058 return RISCV_EXCP_NONE
;
4061 static RISCVException
write_mmte(CPURISCVState
*env
, int csrno
,
4065 target_ulong wpri_val
= val
& MMTE_MASK
;
4067 if (val
!= wpri_val
) {
4068 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s"
4069 TARGET_FMT_lx
"\n", "MMTE: WPRI violation written 0x",
4070 val
, "vs expected 0x", wpri_val
);
4072 /* for machine mode pm.current is hardwired to 1 */
4073 wpri_val
|= MMTE_M_PM_CURRENT
;
4075 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
4076 wpri_val
&= ~(MMTE_M_PM_INSN
| MMTE_S_PM_INSN
| MMTE_U_PM_INSN
);
4077 env
->mmte
= wpri_val
| EXT_STATUS_DIRTY
;
4078 riscv_cpu_update_mask(env
);
4080 /* Set XS and SD bits, since PM CSRs are dirty */
4081 mstatus
= env
->mstatus
| MSTATUS_XS
;
4082 write_mstatus(env
, csrno
, mstatus
);
4083 return RISCV_EXCP_NONE
;
4086 static RISCVException
read_smte(CPURISCVState
*env
, int csrno
,
4089 *val
= env
->mmte
& SMTE_MASK
;
4090 return RISCV_EXCP_NONE
;
4093 static RISCVException
write_smte(CPURISCVState
*env
, int csrno
,
4096 target_ulong wpri_val
= val
& SMTE_MASK
;
4098 if (val
!= wpri_val
) {
4099 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s"
4100 TARGET_FMT_lx
"\n", "SMTE: WPRI violation written 0x",
4101 val
, "vs expected 0x", wpri_val
);
4104 /* if pm.current==0 we can't modify current PM CSRs */
4105 if (check_pm_current_disabled(env
, csrno
)) {
4106 return RISCV_EXCP_NONE
;
4109 wpri_val
|= (env
->mmte
& ~SMTE_MASK
);
4110 write_mmte(env
, csrno
, wpri_val
);
4111 return RISCV_EXCP_NONE
;
4114 static RISCVException
read_umte(CPURISCVState
*env
, int csrno
,
4117 *val
= env
->mmte
& UMTE_MASK
;
4118 return RISCV_EXCP_NONE
;
4121 static RISCVException
write_umte(CPURISCVState
*env
, int csrno
,
4124 target_ulong wpri_val
= val
& UMTE_MASK
;
4126 if (val
!= wpri_val
) {
4127 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s"
4128 TARGET_FMT_lx
"\n", "UMTE: WPRI violation written 0x",
4129 val
, "vs expected 0x", wpri_val
);
4132 if (check_pm_current_disabled(env
, csrno
)) {
4133 return RISCV_EXCP_NONE
;
4136 wpri_val
|= (env
->mmte
& ~UMTE_MASK
);
4137 write_mmte(env
, csrno
, wpri_val
);
4138 return RISCV_EXCP_NONE
;
4141 static RISCVException
read_mpmmask(CPURISCVState
*env
, int csrno
,
4144 *val
= env
->mpmmask
;
4145 return RISCV_EXCP_NONE
;
4148 static RISCVException
write_mpmmask(CPURISCVState
*env
, int csrno
,
4154 if ((cpu_address_mode(env
) == PRV_M
) && (env
->mmte
& M_PM_ENABLE
)) {
4155 env
->cur_pmmask
= val
;
4157 env
->mmte
|= EXT_STATUS_DIRTY
;
4159 /* Set XS and SD bits, since PM CSRs are dirty */
4160 mstatus
= env
->mstatus
| MSTATUS_XS
;
4161 write_mstatus(env
, csrno
, mstatus
);
4162 return RISCV_EXCP_NONE
;
4165 static RISCVException
read_spmmask(CPURISCVState
*env
, int csrno
,
4168 *val
= env
->spmmask
;
4169 return RISCV_EXCP_NONE
;
4172 static RISCVException
write_spmmask(CPURISCVState
*env
, int csrno
,
4177 /* if pm.current==0 we can't modify current PM CSRs */
4178 if (check_pm_current_disabled(env
, csrno
)) {
4179 return RISCV_EXCP_NONE
;
4182 if ((cpu_address_mode(env
) == PRV_S
) && (env
->mmte
& S_PM_ENABLE
)) {
4183 env
->cur_pmmask
= val
;
4184 if (cpu_get_xl(env
, PRV_S
) == MXL_RV32
) {
4185 env
->cur_pmmask
&= UINT32_MAX
;
4188 env
->mmte
|= EXT_STATUS_DIRTY
;
4190 /* Set XS and SD bits, since PM CSRs are dirty */
4191 mstatus
= env
->mstatus
| MSTATUS_XS
;
4192 write_mstatus(env
, csrno
, mstatus
);
4193 return RISCV_EXCP_NONE
;
4196 static RISCVException
read_upmmask(CPURISCVState
*env
, int csrno
,
4199 *val
= env
->upmmask
;
4200 return RISCV_EXCP_NONE
;
4203 static RISCVException
write_upmmask(CPURISCVState
*env
, int csrno
,
4208 /* if pm.current==0 we can't modify current PM CSRs */
4209 if (check_pm_current_disabled(env
, csrno
)) {
4210 return RISCV_EXCP_NONE
;
4213 if ((cpu_address_mode(env
) == PRV_U
) && (env
->mmte
& U_PM_ENABLE
)) {
4214 env
->cur_pmmask
= val
;
4215 if (cpu_get_xl(env
, PRV_U
) == MXL_RV32
) {
4216 env
->cur_pmmask
&= UINT32_MAX
;
4219 env
->mmte
|= EXT_STATUS_DIRTY
;
4221 /* Set XS and SD bits, since PM CSRs are dirty */
4222 mstatus
= env
->mstatus
| MSTATUS_XS
;
4223 write_mstatus(env
, csrno
, mstatus
);
4224 return RISCV_EXCP_NONE
;
4227 static RISCVException
read_mpmbase(CPURISCVState
*env
, int csrno
,
4230 *val
= env
->mpmbase
;
4231 return RISCV_EXCP_NONE
;
4234 static RISCVException
write_mpmbase(CPURISCVState
*env
, int csrno
,
4240 if ((cpu_address_mode(env
) == PRV_M
) && (env
->mmte
& M_PM_ENABLE
)) {
4241 env
->cur_pmbase
= val
;
4243 env
->mmte
|= EXT_STATUS_DIRTY
;
4245 /* Set XS and SD bits, since PM CSRs are dirty */
4246 mstatus
= env
->mstatus
| MSTATUS_XS
;
4247 write_mstatus(env
, csrno
, mstatus
);
4248 return RISCV_EXCP_NONE
;
4251 static RISCVException
read_spmbase(CPURISCVState
*env
, int csrno
,
4254 *val
= env
->spmbase
;
4255 return RISCV_EXCP_NONE
;
4258 static RISCVException
write_spmbase(CPURISCVState
*env
, int csrno
,
4263 /* if pm.current==0 we can't modify current PM CSRs */
4264 if (check_pm_current_disabled(env
, csrno
)) {
4265 return RISCV_EXCP_NONE
;
4268 if ((cpu_address_mode(env
) == PRV_S
) && (env
->mmte
& S_PM_ENABLE
)) {
4269 env
->cur_pmbase
= val
;
4270 if (cpu_get_xl(env
, PRV_S
) == MXL_RV32
) {
4271 env
->cur_pmbase
&= UINT32_MAX
;
4274 env
->mmte
|= EXT_STATUS_DIRTY
;
4276 /* Set XS and SD bits, since PM CSRs are dirty */
4277 mstatus
= env
->mstatus
| MSTATUS_XS
;
4278 write_mstatus(env
, csrno
, mstatus
);
4279 return RISCV_EXCP_NONE
;
4282 static RISCVException
read_upmbase(CPURISCVState
*env
, int csrno
,
4285 *val
= env
->upmbase
;
4286 return RISCV_EXCP_NONE
;
4289 static RISCVException
write_upmbase(CPURISCVState
*env
, int csrno
,
4294 /* if pm.current==0 we can't modify current PM CSRs */
4295 if (check_pm_current_disabled(env
, csrno
)) {
4296 return RISCV_EXCP_NONE
;
4299 if ((cpu_address_mode(env
) == PRV_U
) && (env
->mmte
& U_PM_ENABLE
)) {
4300 env
->cur_pmbase
= val
;
4301 if (cpu_get_xl(env
, PRV_U
) == MXL_RV32
) {
4302 env
->cur_pmbase
&= UINT32_MAX
;
4305 env
->mmte
|= EXT_STATUS_DIRTY
;
4307 /* Set XS and SD bits, since PM CSRs are dirty */
4308 mstatus
= env
->mstatus
| MSTATUS_XS
;
4309 write_mstatus(env
, csrno
, mstatus
);
4310 return RISCV_EXCP_NONE
;
4315 /* Crypto Extension */
4316 target_ulong
riscv_new_csr_seed(target_ulong new_value
,
4317 target_ulong write_mask
)
4320 Error
*random_e
= NULL
;
4324 random_r
= qemu_guest_getrandom(&random_v
, 2, &random_e
);
4325 if (unlikely(random_r
< 0)) {
4327 * Failed, for unknown reasons in the crypto subsystem.
4328 * The best we can do is log the reason and return a
4329 * failure indication to the guest. There is no reason
4330 * we know to expect the failure to be transitory, so
4331 * indicate DEAD to avoid having the guest spin on WAIT.
4333 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
4334 __func__
, error_get_pretty(random_e
));
4335 error_free(random_e
);
4336 rval
= SEED_OPST_DEAD
;
4338 rval
= random_v
| SEED_OPST_ES16
;
4344 static RISCVException
rmw_seed(CPURISCVState
*env
, int csrno
,
4345 target_ulong
*ret_value
,
4346 target_ulong new_value
,
4347 target_ulong write_mask
)
4351 rval
= riscv_new_csr_seed(new_value
, write_mask
);
4357 return RISCV_EXCP_NONE
;
4361 * riscv_csrrw - read and/or update control and status register
4363 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
4364 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
4365 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
4366 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
4369 static inline RISCVException
riscv_csrrw_check(CPURISCVState
*env
,
4373 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
4374 bool read_only
= get_field(csrno
, 0xC00) == 3;
4375 int csr_min_priv
= csr_ops
[csrno
].min_priv_ver
;
4377 /* ensure the CSR extension is enabled */
4378 if (!riscv_cpu_cfg(env
)->ext_zicsr
) {
4379 return RISCV_EXCP_ILLEGAL_INST
;
4382 /* ensure CSR is implemented by checking predicate */
4383 if (!csr_ops
[csrno
].predicate
) {
4384 return RISCV_EXCP_ILLEGAL_INST
;
4387 /* privileged spec version check */
4388 if (env
->priv_ver
< csr_min_priv
) {
4389 return RISCV_EXCP_ILLEGAL_INST
;
4392 /* read / write check */
4393 if (write_mask
&& read_only
) {
4394 return RISCV_EXCP_ILLEGAL_INST
;
4398 * The predicate() not only does existence check but also does some
4399 * access control check which triggers for example virtual instruction
4400 * exception in some cases. When writing read-only CSRs in those cases
4401 * illegal instruction exception should be triggered instead of virtual
4402 * instruction exception. Hence this comes after the read / write check.
4404 RISCVException ret
= csr_ops
[csrno
].predicate(env
, csrno
);
4405 if (ret
!= RISCV_EXCP_NONE
) {
4409 #if !defined(CONFIG_USER_ONLY)
4410 int csr_priv
, effective_priv
= env
->priv
;
4412 if (riscv_has_ext(env
, RVH
) && env
->priv
== PRV_S
&&
4413 !env
->virt_enabled
) {
4415 * We are in HS mode. Add 1 to the effective privilege level to
4416 * allow us to access the Hypervisor CSRs.
4421 csr_priv
= get_field(csrno
, 0x300);
4422 if (!env
->debugger
&& (effective_priv
< csr_priv
)) {
4423 if (csr_priv
== (PRV_S
+ 1) && env
->virt_enabled
) {
4424 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
4426 return RISCV_EXCP_ILLEGAL_INST
;
4429 return RISCV_EXCP_NONE
;
4432 static RISCVException
riscv_csrrw_do64(CPURISCVState
*env
, int csrno
,
4433 target_ulong
*ret_value
,
4434 target_ulong new_value
,
4435 target_ulong write_mask
)
4438 target_ulong old_value
= 0;
4440 /* execute combined read/write operation if it exists */
4441 if (csr_ops
[csrno
].op
) {
4442 return csr_ops
[csrno
].op(env
, csrno
, ret_value
, new_value
, write_mask
);
4446 * ret_value == NULL means that rd=x0 and we're coming from helper_csrw()
4447 * and we can't throw side effects caused by CSR reads.
4450 /* if no accessor exists then return failure */
4451 if (!csr_ops
[csrno
].read
) {
4452 return RISCV_EXCP_ILLEGAL_INST
;
4454 /* read old value */
4455 ret
= csr_ops
[csrno
].read(env
, csrno
, &old_value
);
4456 if (ret
!= RISCV_EXCP_NONE
) {
4461 /* write value if writable and write mask set, otherwise drop writes */
4463 new_value
= (old_value
& ~write_mask
) | (new_value
& write_mask
);
4464 if (csr_ops
[csrno
].write
) {
4465 ret
= csr_ops
[csrno
].write(env
, csrno
, new_value
);
4466 if (ret
!= RISCV_EXCP_NONE
) {
4472 /* return old value */
4474 *ret_value
= old_value
;
4477 return RISCV_EXCP_NONE
;
4480 RISCVException
riscv_csrrw(CPURISCVState
*env
, int csrno
,
4481 target_ulong
*ret_value
,
4482 target_ulong new_value
, target_ulong write_mask
)
4484 RISCVException ret
= riscv_csrrw_check(env
, csrno
, write_mask
);
4485 if (ret
!= RISCV_EXCP_NONE
) {
4489 return riscv_csrrw_do64(env
, csrno
, ret_value
, new_value
, write_mask
);
4492 static RISCVException
riscv_csrrw_do128(CPURISCVState
*env
, int csrno
,
4500 /* read old value */
4501 ret
= csr_ops
[csrno
].read128(env
, csrno
, &old_value
);
4502 if (ret
!= RISCV_EXCP_NONE
) {
4506 /* write value if writable and write mask set, otherwise drop writes */
4507 if (int128_nz(write_mask
)) {
4508 new_value
= int128_or(int128_and(old_value
, int128_not(write_mask
)),
4509 int128_and(new_value
, write_mask
));
4510 if (csr_ops
[csrno
].write128
) {
4511 ret
= csr_ops
[csrno
].write128(env
, csrno
, new_value
);
4512 if (ret
!= RISCV_EXCP_NONE
) {
4515 } else if (csr_ops
[csrno
].write
) {
4516 /* avoids having to write wrappers for all registers */
4517 ret
= csr_ops
[csrno
].write(env
, csrno
, int128_getlo(new_value
));
4518 if (ret
!= RISCV_EXCP_NONE
) {
4524 /* return old value */
4526 *ret_value
= old_value
;
4529 return RISCV_EXCP_NONE
;
4532 RISCVException
riscv_csrrw_i128(CPURISCVState
*env
, int csrno
,
4534 Int128 new_value
, Int128 write_mask
)
4538 ret
= riscv_csrrw_check(env
, csrno
, int128_nz(write_mask
));
4539 if (ret
!= RISCV_EXCP_NONE
) {
4543 if (csr_ops
[csrno
].read128
) {
4544 return riscv_csrrw_do128(env
, csrno
, ret_value
, new_value
, write_mask
);
4548 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
4550 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
4551 * significant), for those, this fallback is correctly handling the
4554 target_ulong old_value
;
4555 ret
= riscv_csrrw_do64(env
, csrno
, &old_value
,
4556 int128_getlo(new_value
),
4557 int128_getlo(write_mask
));
4558 if (ret
== RISCV_EXCP_NONE
&& ret_value
) {
4559 *ret_value
= int128_make64(old_value
);
4565 * Debugger support. If not in user mode, set env->debugger before the
4566 * riscv_csrrw call and clear it after the call.
4568 RISCVException
riscv_csrrw_debug(CPURISCVState
*env
, int csrno
,
4569 target_ulong
*ret_value
,
4570 target_ulong new_value
,
4571 target_ulong write_mask
)
4574 #if !defined(CONFIG_USER_ONLY)
4575 env
->debugger
= true;
4577 ret
= riscv_csrrw(env
, csrno
, ret_value
, new_value
, write_mask
);
4578 #if !defined(CONFIG_USER_ONLY)
4579 env
->debugger
= false;
4584 static RISCVException
read_jvt(CPURISCVState
*env
, int csrno
,
4588 return RISCV_EXCP_NONE
;
4591 static RISCVException
write_jvt(CPURISCVState
*env
, int csrno
,
4595 return RISCV_EXCP_NONE
;
4599 * Control and Status Register function table
4600 * riscv_csr_operations::predicate() must be provided for an implemented CSR
4602 riscv_csr_operations csr_ops
[CSR_TABLE_SIZE
] = {
4603 /* User Floating-Point CSRs */
4604 [CSR_FFLAGS
] = { "fflags", fs
, read_fflags
, write_fflags
},
4605 [CSR_FRM
] = { "frm", fs
, read_frm
, write_frm
},
4606 [CSR_FCSR
] = { "fcsr", fs
, read_fcsr
, write_fcsr
},
4608 [CSR_VSTART
] = { "vstart", vs
, read_vstart
, write_vstart
},
4609 [CSR_VXSAT
] = { "vxsat", vs
, read_vxsat
, write_vxsat
},
4610 [CSR_VXRM
] = { "vxrm", vs
, read_vxrm
, write_vxrm
},
4611 [CSR_VCSR
] = { "vcsr", vs
, read_vcsr
, write_vcsr
},
4612 [CSR_VL
] = { "vl", vs
, read_vl
},
4613 [CSR_VTYPE
] = { "vtype", vs
, read_vtype
},
4614 [CSR_VLENB
] = { "vlenb", vs
, read_vlenb
},
4615 /* User Timers and Counters */
4616 [CSR_CYCLE
] = { "cycle", ctr
, read_hpmcounter
},
4617 [CSR_INSTRET
] = { "instret", ctr
, read_hpmcounter
},
4618 [CSR_CYCLEH
] = { "cycleh", ctr32
, read_hpmcounterh
},
4619 [CSR_INSTRETH
] = { "instreth", ctr32
, read_hpmcounterh
},
4622 * In privileged mode, the monitor will have to emulate TIME CSRs only if
4623 * rdtime callback is not provided by machine/platform emulation.
4625 [CSR_TIME
] = { "time", ctr
, read_time
},
4626 [CSR_TIMEH
] = { "timeh", ctr32
, read_timeh
},
4628 /* Crypto Extension */
4629 [CSR_SEED
] = { "seed", seed
, NULL
, NULL
, rmw_seed
},
4631 /* Zcmt Extension */
4632 [CSR_JVT
] = {"jvt", zcmt
, read_jvt
, write_jvt
},
4634 #if !defined(CONFIG_USER_ONLY)
4635 /* Machine Timers and Counters */
4636 [CSR_MCYCLE
] = { "mcycle", any
, read_hpmcounter
,
4637 write_mhpmcounter
},
4638 [CSR_MINSTRET
] = { "minstret", any
, read_hpmcounter
,
4639 write_mhpmcounter
},
4640 [CSR_MCYCLEH
] = { "mcycleh", any32
, read_hpmcounterh
,
4641 write_mhpmcounterh
},
4642 [CSR_MINSTRETH
] = { "minstreth", any32
, read_hpmcounterh
,
4643 write_mhpmcounterh
},
4645 /* Machine Information Registers */
4646 [CSR_MVENDORID
] = { "mvendorid", any
, read_mvendorid
},
4647 [CSR_MARCHID
] = { "marchid", any
, read_marchid
},
4648 [CSR_MIMPID
] = { "mimpid", any
, read_mimpid
},
4649 [CSR_MHARTID
] = { "mhartid", any
, read_mhartid
},
4651 [CSR_MCONFIGPTR
] = { "mconfigptr", any
, read_zero
,
4652 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4653 /* Machine Trap Setup */
4654 [CSR_MSTATUS
] = { "mstatus", any
, read_mstatus
, write_mstatus
,
4655 NULL
, read_mstatus_i128
},
4656 [CSR_MISA
] = { "misa", any
, read_misa
, write_misa
,
4657 NULL
, read_misa_i128
},
4658 [CSR_MIDELEG
] = { "mideleg", any
, NULL
, NULL
, rmw_mideleg
},
4659 [CSR_MEDELEG
] = { "medeleg", any
, read_medeleg
, write_medeleg
},
4660 [CSR_MIE
] = { "mie", any
, NULL
, NULL
, rmw_mie
},
4661 [CSR_MTVEC
] = { "mtvec", any
, read_mtvec
, write_mtvec
},
4662 [CSR_MCOUNTEREN
] = { "mcounteren", umode
, read_mcounteren
,
4665 [CSR_MSTATUSH
] = { "mstatush", any32
, read_mstatush
,
4667 [CSR_MEDELEGH
] = { "medelegh", any32
, read_zero
, write_ignore
,
4668 .min_priv_ver
= PRIV_VERSION_1_13_0
},
4669 [CSR_HEDELEGH
] = { "hedelegh", hmode32
, read_hedelegh
, write_hedelegh
,
4670 .min_priv_ver
= PRIV_VERSION_1_13_0
},
4672 /* Machine Trap Handling */
4673 [CSR_MSCRATCH
] = { "mscratch", any
, read_mscratch
, write_mscratch
,
4674 NULL
, read_mscratch_i128
, write_mscratch_i128
},
4675 [CSR_MEPC
] = { "mepc", any
, read_mepc
, write_mepc
},
4676 [CSR_MCAUSE
] = { "mcause", any
, read_mcause
, write_mcause
},
4677 [CSR_MTVAL
] = { "mtval", any
, read_mtval
, write_mtval
},
4678 [CSR_MIP
] = { "mip", any
, NULL
, NULL
, rmw_mip
},
4680 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
4681 [CSR_MISELECT
] = { "miselect", aia_any
, NULL
, NULL
, rmw_xiselect
},
4682 [CSR_MIREG
] = { "mireg", aia_any
, NULL
, NULL
, rmw_xireg
},
4684 /* Machine-Level Interrupts (AIA) */
4685 [CSR_MTOPEI
] = { "mtopei", aia_any
, NULL
, NULL
, rmw_xtopei
},
4686 [CSR_MTOPI
] = { "mtopi", aia_any
, read_mtopi
},
4688 /* Virtual Interrupts for Supervisor Level (AIA) */
4689 [CSR_MVIEN
] = { "mvien", aia_any
, NULL
, NULL
, rmw_mvien
},
4690 [CSR_MVIP
] = { "mvip", aia_any
, NULL
, NULL
, rmw_mvip
},
4692 /* Machine-Level High-Half CSRs (AIA) */
4693 [CSR_MIDELEGH
] = { "midelegh", aia_any32
, NULL
, NULL
, rmw_midelegh
},
4694 [CSR_MIEH
] = { "mieh", aia_any32
, NULL
, NULL
, rmw_mieh
},
4695 [CSR_MVIENH
] = { "mvienh", aia_any32
, NULL
, NULL
, rmw_mvienh
},
4696 [CSR_MVIPH
] = { "mviph", aia_any32
, NULL
, NULL
, rmw_mviph
},
4697 [CSR_MIPH
] = { "miph", aia_any32
, NULL
, NULL
, rmw_miph
},
4699 /* Execution environment configuration */
4700 [CSR_MENVCFG
] = { "menvcfg", umode
, read_menvcfg
, write_menvcfg
,
4701 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4702 [CSR_MENVCFGH
] = { "menvcfgh", umode32
, read_menvcfgh
, write_menvcfgh
,
4703 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4704 [CSR_SENVCFG
] = { "senvcfg", smode
, read_senvcfg
, write_senvcfg
,
4705 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4706 [CSR_HENVCFG
] = { "henvcfg", hmode
, read_henvcfg
, write_henvcfg
,
4707 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4708 [CSR_HENVCFGH
] = { "henvcfgh", hmode32
, read_henvcfgh
, write_henvcfgh
,
4709 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4711 /* Smstateen extension CSRs */
4712 [CSR_MSTATEEN0
] = { "mstateen0", mstateen
, read_mstateen
, write_mstateen0
,
4713 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4714 [CSR_MSTATEEN0H
] = { "mstateen0h", mstateen
, read_mstateenh
,
4716 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4717 [CSR_MSTATEEN1
] = { "mstateen1", mstateen
, read_mstateen
,
4719 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4720 [CSR_MSTATEEN1H
] = { "mstateen1h", mstateen
, read_mstateenh
,
4721 write_mstateenh_1_3
,
4722 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4723 [CSR_MSTATEEN2
] = { "mstateen2", mstateen
, read_mstateen
,
4725 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4726 [CSR_MSTATEEN2H
] = { "mstateen2h", mstateen
, read_mstateenh
,
4727 write_mstateenh_1_3
,
4728 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4729 [CSR_MSTATEEN3
] = { "mstateen3", mstateen
, read_mstateen
,
4731 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4732 [CSR_MSTATEEN3H
] = { "mstateen3h", mstateen
, read_mstateenh
,
4733 write_mstateenh_1_3
,
4734 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4735 [CSR_HSTATEEN0
] = { "hstateen0", hstateen
, read_hstateen
, write_hstateen0
,
4736 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4737 [CSR_HSTATEEN0H
] = { "hstateen0h", hstateenh
, read_hstateenh
,
4739 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4740 [CSR_HSTATEEN1
] = { "hstateen1", hstateen
, read_hstateen
,
4742 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4743 [CSR_HSTATEEN1H
] = { "hstateen1h", hstateenh
, read_hstateenh
,
4744 write_hstateenh_1_3
,
4745 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4746 [CSR_HSTATEEN2
] = { "hstateen2", hstateen
, read_hstateen
,
4748 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4749 [CSR_HSTATEEN2H
] = { "hstateen2h", hstateenh
, read_hstateenh
,
4750 write_hstateenh_1_3
,
4751 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4752 [CSR_HSTATEEN3
] = { "hstateen3", hstateen
, read_hstateen
,
4754 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4755 [CSR_HSTATEEN3H
] = { "hstateen3h", hstateenh
, read_hstateenh
,
4756 write_hstateenh_1_3
,
4757 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4758 [CSR_SSTATEEN0
] = { "sstateen0", sstateen
, read_sstateen
, write_sstateen0
,
4759 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4760 [CSR_SSTATEEN1
] = { "sstateen1", sstateen
, read_sstateen
,
4762 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4763 [CSR_SSTATEEN2
] = { "sstateen2", sstateen
, read_sstateen
,
4765 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4766 [CSR_SSTATEEN3
] = { "sstateen3", sstateen
, read_sstateen
,
4768 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4770 /* Supervisor Trap Setup */
4771 [CSR_SSTATUS
] = { "sstatus", smode
, read_sstatus
, write_sstatus
,
4772 NULL
, read_sstatus_i128
},
4773 [CSR_SIE
] = { "sie", smode
, NULL
, NULL
, rmw_sie
},
4774 [CSR_STVEC
] = { "stvec", smode
, read_stvec
, write_stvec
},
4775 [CSR_SCOUNTEREN
] = { "scounteren", smode
, read_scounteren
,
4778 /* Supervisor Trap Handling */
4779 [CSR_SSCRATCH
] = { "sscratch", smode
, read_sscratch
, write_sscratch
,
4780 NULL
, read_sscratch_i128
, write_sscratch_i128
},
4781 [CSR_SEPC
] = { "sepc", smode
, read_sepc
, write_sepc
},
4782 [CSR_SCAUSE
] = { "scause", smode
, read_scause
, write_scause
},
4783 [CSR_STVAL
] = { "stval", smode
, read_stval
, write_stval
},
4784 [CSR_SIP
] = { "sip", smode
, NULL
, NULL
, rmw_sip
},
4785 [CSR_STIMECMP
] = { "stimecmp", sstc
, read_stimecmp
, write_stimecmp
,
4786 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4787 [CSR_STIMECMPH
] = { "stimecmph", sstc_32
, read_stimecmph
, write_stimecmph
,
4788 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4789 [CSR_VSTIMECMP
] = { "vstimecmp", sstc
, read_vstimecmp
,
4791 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4792 [CSR_VSTIMECMPH
] = { "vstimecmph", sstc_32
, read_vstimecmph
,
4794 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4796 /* Supervisor Protection and Translation */
4797 [CSR_SATP
] = { "satp", satp
, read_satp
, write_satp
},
4799 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
4800 [CSR_SISELECT
] = { "siselect", aia_smode
, NULL
, NULL
, rmw_xiselect
},
4801 [CSR_SIREG
] = { "sireg", aia_smode
, NULL
, NULL
, rmw_xireg
},
4803 /* Supervisor-Level Interrupts (AIA) */
4804 [CSR_STOPEI
] = { "stopei", aia_smode
, NULL
, NULL
, rmw_xtopei
},
4805 [CSR_STOPI
] = { "stopi", aia_smode
, read_stopi
},
4807 /* Supervisor-Level High-Half CSRs (AIA) */
4808 [CSR_SIEH
] = { "sieh", aia_smode32
, NULL
, NULL
, rmw_sieh
},
4809 [CSR_SIPH
] = { "siph", aia_smode32
, NULL
, NULL
, rmw_siph
},
4811 [CSR_HSTATUS
] = { "hstatus", hmode
, read_hstatus
, write_hstatus
,
4812 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4813 [CSR_HEDELEG
] = { "hedeleg", hmode
, read_hedeleg
, write_hedeleg
,
4814 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4815 [CSR_HIDELEG
] = { "hideleg", hmode
, NULL
, NULL
, rmw_hideleg
,
4816 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4817 [CSR_HVIP
] = { "hvip", hmode
, NULL
, NULL
, rmw_hvip
,
4818 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4819 [CSR_HIP
] = { "hip", hmode
, NULL
, NULL
, rmw_hip
,
4820 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4821 [CSR_HIE
] = { "hie", hmode
, NULL
, NULL
, rmw_hie
,
4822 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4823 [CSR_HCOUNTEREN
] = { "hcounteren", hmode
, read_hcounteren
,
4825 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4826 [CSR_HGEIE
] = { "hgeie", hmode
, read_hgeie
, write_hgeie
,
4827 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4828 [CSR_HTVAL
] = { "htval", hmode
, read_htval
, write_htval
,
4829 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4830 [CSR_HTINST
] = { "htinst", hmode
, read_htinst
, write_htinst
,
4831 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4832 [CSR_HGEIP
] = { "hgeip", hmode
, read_hgeip
,
4833 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4834 [CSR_HGATP
] = { "hgatp", hgatp
, read_hgatp
, write_hgatp
,
4835 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4836 [CSR_HTIMEDELTA
] = { "htimedelta", hmode
, read_htimedelta
,
4838 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4839 [CSR_HTIMEDELTAH
] = { "htimedeltah", hmode32
, read_htimedeltah
,
4841 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4843 [CSR_VSSTATUS
] = { "vsstatus", hmode
, read_vsstatus
,
4845 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4846 [CSR_VSIP
] = { "vsip", hmode
, NULL
, NULL
, rmw_vsip
,
4847 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4848 [CSR_VSIE
] = { "vsie", hmode
, NULL
, NULL
, rmw_vsie
,
4849 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4850 [CSR_VSTVEC
] = { "vstvec", hmode
, read_vstvec
, write_vstvec
,
4851 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4852 [CSR_VSSCRATCH
] = { "vsscratch", hmode
, read_vsscratch
,
4854 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4855 [CSR_VSEPC
] = { "vsepc", hmode
, read_vsepc
, write_vsepc
,
4856 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4857 [CSR_VSCAUSE
] = { "vscause", hmode
, read_vscause
, write_vscause
,
4858 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4859 [CSR_VSTVAL
] = { "vstval", hmode
, read_vstval
, write_vstval
,
4860 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4861 [CSR_VSATP
] = { "vsatp", hmode
, read_vsatp
, write_vsatp
,
4862 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4864 [CSR_MTVAL2
] = { "mtval2", hmode
, read_mtval2
, write_mtval2
,
4865 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4866 [CSR_MTINST
] = { "mtinst", hmode
, read_mtinst
, write_mtinst
,
4867 .min_priv_ver
= PRIV_VERSION_1_12_0
},
4869 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
4870 [CSR_HVIEN
] = { "hvien", aia_hmode
, NULL
, NULL
, rmw_hvien
},
4871 [CSR_HVICTL
] = { "hvictl", aia_hmode
, read_hvictl
,
4873 [CSR_HVIPRIO1
] = { "hviprio1", aia_hmode
, read_hviprio1
,
4875 [CSR_HVIPRIO2
] = { "hviprio2", aia_hmode
, read_hviprio2
,
4878 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
4880 [CSR_VSISELECT
] = { "vsiselect", aia_hmode
, NULL
, NULL
,
4882 [CSR_VSIREG
] = { "vsireg", aia_hmode
, NULL
, NULL
, rmw_xireg
},
4884 /* VS-Level Interrupts (H-extension with AIA) */
4885 [CSR_VSTOPEI
] = { "vstopei", aia_hmode
, NULL
, NULL
, rmw_xtopei
},
4886 [CSR_VSTOPI
] = { "vstopi", aia_hmode
, read_vstopi
},
4888 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
4889 [CSR_HIDELEGH
] = { "hidelegh", aia_hmode32
, NULL
, NULL
,
4891 [CSR_HVIENH
] = { "hvienh", aia_hmode32
, NULL
, NULL
, rmw_hvienh
},
4892 [CSR_HVIPH
] = { "hviph", aia_hmode32
, NULL
, NULL
, rmw_hviph
},
4893 [CSR_HVIPRIO1H
] = { "hviprio1h", aia_hmode32
, read_hviprio1h
,
4895 [CSR_HVIPRIO2H
] = { "hviprio2h", aia_hmode32
, read_hviprio2h
,
4897 [CSR_VSIEH
] = { "vsieh", aia_hmode32
, NULL
, NULL
, rmw_vsieh
},
4898 [CSR_VSIPH
] = { "vsiph", aia_hmode32
, NULL
, NULL
, rmw_vsiph
},
4900 /* Physical Memory Protection */
4901 [CSR_MSECCFG
] = { "mseccfg", have_mseccfg
, read_mseccfg
, write_mseccfg
,
4902 .min_priv_ver
= PRIV_VERSION_1_11_0
},
4903 [CSR_PMPCFG0
] = { "pmpcfg0", pmp
, read_pmpcfg
, write_pmpcfg
},
4904 [CSR_PMPCFG1
] = { "pmpcfg1", pmp
, read_pmpcfg
, write_pmpcfg
},
4905 [CSR_PMPCFG2
] = { "pmpcfg2", pmp
, read_pmpcfg
, write_pmpcfg
},
4906 [CSR_PMPCFG3
] = { "pmpcfg3", pmp
, read_pmpcfg
, write_pmpcfg
},
4907 [CSR_PMPADDR0
] = { "pmpaddr0", pmp
, read_pmpaddr
, write_pmpaddr
},
4908 [CSR_PMPADDR1
] = { "pmpaddr1", pmp
, read_pmpaddr
, write_pmpaddr
},
4909 [CSR_PMPADDR2
] = { "pmpaddr2", pmp
, read_pmpaddr
, write_pmpaddr
},
4910 [CSR_PMPADDR3
] = { "pmpaddr3", pmp
, read_pmpaddr
, write_pmpaddr
},
4911 [CSR_PMPADDR4
] = { "pmpaddr4", pmp
, read_pmpaddr
, write_pmpaddr
},
4912 [CSR_PMPADDR5
] = { "pmpaddr5", pmp
, read_pmpaddr
, write_pmpaddr
},
4913 [CSR_PMPADDR6
] = { "pmpaddr6", pmp
, read_pmpaddr
, write_pmpaddr
},
4914 [CSR_PMPADDR7
] = { "pmpaddr7", pmp
, read_pmpaddr
, write_pmpaddr
},
4915 [CSR_PMPADDR8
] = { "pmpaddr8", pmp
, read_pmpaddr
, write_pmpaddr
},
4916 [CSR_PMPADDR9
] = { "pmpaddr9", pmp
, read_pmpaddr
, write_pmpaddr
},
4917 [CSR_PMPADDR10
] = { "pmpaddr10", pmp
, read_pmpaddr
, write_pmpaddr
},
4918 [CSR_PMPADDR11
] = { "pmpaddr11", pmp
, read_pmpaddr
, write_pmpaddr
},
4919 [CSR_PMPADDR12
] = { "pmpaddr12", pmp
, read_pmpaddr
, write_pmpaddr
},
4920 [CSR_PMPADDR13
] = { "pmpaddr13", pmp
, read_pmpaddr
, write_pmpaddr
},
4921 [CSR_PMPADDR14
] = { "pmpaddr14", pmp
, read_pmpaddr
, write_pmpaddr
},
4922 [CSR_PMPADDR15
] = { "pmpaddr15", pmp
, read_pmpaddr
, write_pmpaddr
},
4925 [CSR_TSELECT
] = { "tselect", debug
, read_tselect
, write_tselect
},
4926 [CSR_TDATA1
] = { "tdata1", debug
, read_tdata
, write_tdata
},
4927 [CSR_TDATA2
] = { "tdata2", debug
, read_tdata
, write_tdata
},
4928 [CSR_TDATA3
] = { "tdata3", debug
, read_tdata
, write_tdata
},
4929 [CSR_TINFO
] = { "tinfo", debug
, read_tinfo
, write_ignore
},
4930 [CSR_MCONTEXT
] = { "mcontext", debug
, read_mcontext
, write_mcontext
},
4932 /* User Pointer Masking */
4933 [CSR_UMTE
] = { "umte", pointer_masking
, read_umte
, write_umte
},
4934 [CSR_UPMMASK
] = { "upmmask", pointer_masking
, read_upmmask
,
4936 [CSR_UPMBASE
] = { "upmbase", pointer_masking
, read_upmbase
,
4938 /* Machine Pointer Masking */
4939 [CSR_MMTE
] = { "mmte", pointer_masking
, read_mmte
, write_mmte
},
4940 [CSR_MPMMASK
] = { "mpmmask", pointer_masking
, read_mpmmask
,
4942 [CSR_MPMBASE
] = { "mpmbase", pointer_masking
, read_mpmbase
,
4944 /* Supervisor Pointer Masking */
4945 [CSR_SMTE
] = { "smte", pointer_masking
, read_smte
, write_smte
},
4946 [CSR_SPMMASK
] = { "spmmask", pointer_masking
, read_spmmask
,
4948 [CSR_SPMBASE
] = { "spmbase", pointer_masking
, read_spmbase
,
4951 /* Performance Counters */
4952 [CSR_HPMCOUNTER3
] = { "hpmcounter3", ctr
, read_hpmcounter
},
4953 [CSR_HPMCOUNTER4
] = { "hpmcounter4", ctr
, read_hpmcounter
},
4954 [CSR_HPMCOUNTER5
] = { "hpmcounter5", ctr
, read_hpmcounter
},
4955 [CSR_HPMCOUNTER6
] = { "hpmcounter6", ctr
, read_hpmcounter
},
4956 [CSR_HPMCOUNTER7
] = { "hpmcounter7", ctr
, read_hpmcounter
},
4957 [CSR_HPMCOUNTER8
] = { "hpmcounter8", ctr
, read_hpmcounter
},
4958 [CSR_HPMCOUNTER9
] = { "hpmcounter9", ctr
, read_hpmcounter
},
4959 [CSR_HPMCOUNTER10
] = { "hpmcounter10", ctr
, read_hpmcounter
},
4960 [CSR_HPMCOUNTER11
] = { "hpmcounter11", ctr
, read_hpmcounter
},
4961 [CSR_HPMCOUNTER12
] = { "hpmcounter12", ctr
, read_hpmcounter
},
4962 [CSR_HPMCOUNTER13
] = { "hpmcounter13", ctr
, read_hpmcounter
},
4963 [CSR_HPMCOUNTER14
] = { "hpmcounter14", ctr
, read_hpmcounter
},
4964 [CSR_HPMCOUNTER15
] = { "hpmcounter15", ctr
, read_hpmcounter
},
4965 [CSR_HPMCOUNTER16
] = { "hpmcounter16", ctr
, read_hpmcounter
},
4966 [CSR_HPMCOUNTER17
] = { "hpmcounter17", ctr
, read_hpmcounter
},
4967 [CSR_HPMCOUNTER18
] = { "hpmcounter18", ctr
, read_hpmcounter
},
4968 [CSR_HPMCOUNTER19
] = { "hpmcounter19", ctr
, read_hpmcounter
},
4969 [CSR_HPMCOUNTER20
] = { "hpmcounter20", ctr
, read_hpmcounter
},
4970 [CSR_HPMCOUNTER21
] = { "hpmcounter21", ctr
, read_hpmcounter
},
4971 [CSR_HPMCOUNTER22
] = { "hpmcounter22", ctr
, read_hpmcounter
},
4972 [CSR_HPMCOUNTER23
] = { "hpmcounter23", ctr
, read_hpmcounter
},
4973 [CSR_HPMCOUNTER24
] = { "hpmcounter24", ctr
, read_hpmcounter
},
4974 [CSR_HPMCOUNTER25
] = { "hpmcounter25", ctr
, read_hpmcounter
},
4975 [CSR_HPMCOUNTER26
] = { "hpmcounter26", ctr
, read_hpmcounter
},
4976 [CSR_HPMCOUNTER27
] = { "hpmcounter27", ctr
, read_hpmcounter
},
4977 [CSR_HPMCOUNTER28
] = { "hpmcounter28", ctr
, read_hpmcounter
},
4978 [CSR_HPMCOUNTER29
] = { "hpmcounter29", ctr
, read_hpmcounter
},
4979 [CSR_HPMCOUNTER30
] = { "hpmcounter30", ctr
, read_hpmcounter
},
4980 [CSR_HPMCOUNTER31
] = { "hpmcounter31", ctr
, read_hpmcounter
},
4982 [CSR_MHPMCOUNTER3
] = { "mhpmcounter3", mctr
, read_hpmcounter
,
4983 write_mhpmcounter
},
4984 [CSR_MHPMCOUNTER4
] = { "mhpmcounter4", mctr
, read_hpmcounter
,
4985 write_mhpmcounter
},
4986 [CSR_MHPMCOUNTER5
] = { "mhpmcounter5", mctr
, read_hpmcounter
,
4987 write_mhpmcounter
},
4988 [CSR_MHPMCOUNTER6
] = { "mhpmcounter6", mctr
, read_hpmcounter
,
4989 write_mhpmcounter
},
4990 [CSR_MHPMCOUNTER7
] = { "mhpmcounter7", mctr
, read_hpmcounter
,
4991 write_mhpmcounter
},
4992 [CSR_MHPMCOUNTER8
] = { "mhpmcounter8", mctr
, read_hpmcounter
,
4993 write_mhpmcounter
},
4994 [CSR_MHPMCOUNTER9
] = { "mhpmcounter9", mctr
, read_hpmcounter
,
4995 write_mhpmcounter
},
4996 [CSR_MHPMCOUNTER10
] = { "mhpmcounter10", mctr
, read_hpmcounter
,
4997 write_mhpmcounter
},
4998 [CSR_MHPMCOUNTER11
] = { "mhpmcounter11", mctr
, read_hpmcounter
,
4999 write_mhpmcounter
},
5000 [CSR_MHPMCOUNTER12
] = { "mhpmcounter12", mctr
, read_hpmcounter
,
5001 write_mhpmcounter
},
5002 [CSR_MHPMCOUNTER13
] = { "mhpmcounter13", mctr
, read_hpmcounter
,
5003 write_mhpmcounter
},
5004 [CSR_MHPMCOUNTER14
] = { "mhpmcounter14", mctr
, read_hpmcounter
,
5005 write_mhpmcounter
},
5006 [CSR_MHPMCOUNTER15
] = { "mhpmcounter15", mctr
, read_hpmcounter
,
5007 write_mhpmcounter
},
5008 [CSR_MHPMCOUNTER16
] = { "mhpmcounter16", mctr
, read_hpmcounter
,
5009 write_mhpmcounter
},
5010 [CSR_MHPMCOUNTER17
] = { "mhpmcounter17", mctr
, read_hpmcounter
,
5011 write_mhpmcounter
},
5012 [CSR_MHPMCOUNTER18
] = { "mhpmcounter18", mctr
, read_hpmcounter
,
5013 write_mhpmcounter
},
5014 [CSR_MHPMCOUNTER19
] = { "mhpmcounter19", mctr
, read_hpmcounter
,
5015 write_mhpmcounter
},
5016 [CSR_MHPMCOUNTER20
] = { "mhpmcounter20", mctr
, read_hpmcounter
,
5017 write_mhpmcounter
},
5018 [CSR_MHPMCOUNTER21
] = { "mhpmcounter21", mctr
, read_hpmcounter
,
5019 write_mhpmcounter
},
5020 [CSR_MHPMCOUNTER22
] = { "mhpmcounter22", mctr
, read_hpmcounter
,
5021 write_mhpmcounter
},
5022 [CSR_MHPMCOUNTER23
] = { "mhpmcounter23", mctr
, read_hpmcounter
,
5023 write_mhpmcounter
},
5024 [CSR_MHPMCOUNTER24
] = { "mhpmcounter24", mctr
, read_hpmcounter
,
5025 write_mhpmcounter
},
5026 [CSR_MHPMCOUNTER25
] = { "mhpmcounter25", mctr
, read_hpmcounter
,
5027 write_mhpmcounter
},
5028 [CSR_MHPMCOUNTER26
] = { "mhpmcounter26", mctr
, read_hpmcounter
,
5029 write_mhpmcounter
},
5030 [CSR_MHPMCOUNTER27
] = { "mhpmcounter27", mctr
, read_hpmcounter
,
5031 write_mhpmcounter
},
5032 [CSR_MHPMCOUNTER28
] = { "mhpmcounter28", mctr
, read_hpmcounter
,
5033 write_mhpmcounter
},
5034 [CSR_MHPMCOUNTER29
] = { "mhpmcounter29", mctr
, read_hpmcounter
,
5035 write_mhpmcounter
},
5036 [CSR_MHPMCOUNTER30
] = { "mhpmcounter30", mctr
, read_hpmcounter
,
5037 write_mhpmcounter
},
5038 [CSR_MHPMCOUNTER31
] = { "mhpmcounter31", mctr
, read_hpmcounter
,
5039 write_mhpmcounter
},
5041 [CSR_MCOUNTINHIBIT
] = { "mcountinhibit", any
, read_mcountinhibit
,
5042 write_mcountinhibit
,
5043 .min_priv_ver
= PRIV_VERSION_1_11_0
},
5045 [CSR_MHPMEVENT3
] = { "mhpmevent3", any
, read_mhpmevent
,
5047 [CSR_MHPMEVENT4
] = { "mhpmevent4", any
, read_mhpmevent
,
5049 [CSR_MHPMEVENT5
] = { "mhpmevent5", any
, read_mhpmevent
,
5051 [CSR_MHPMEVENT6
] = { "mhpmevent6", any
, read_mhpmevent
,
5053 [CSR_MHPMEVENT7
] = { "mhpmevent7", any
, read_mhpmevent
,
5055 [CSR_MHPMEVENT8
] = { "mhpmevent8", any
, read_mhpmevent
,
5057 [CSR_MHPMEVENT9
] = { "mhpmevent9", any
, read_mhpmevent
,
5059 [CSR_MHPMEVENT10
] = { "mhpmevent10", any
, read_mhpmevent
,
5061 [CSR_MHPMEVENT11
] = { "mhpmevent11", any
, read_mhpmevent
,
5063 [CSR_MHPMEVENT12
] = { "mhpmevent12", any
, read_mhpmevent
,
5065 [CSR_MHPMEVENT13
] = { "mhpmevent13", any
, read_mhpmevent
,
5067 [CSR_MHPMEVENT14
] = { "mhpmevent14", any
, read_mhpmevent
,
5069 [CSR_MHPMEVENT15
] = { "mhpmevent15", any
, read_mhpmevent
,
5071 [CSR_MHPMEVENT16
] = { "mhpmevent16", any
, read_mhpmevent
,
5073 [CSR_MHPMEVENT17
] = { "mhpmevent17", any
, read_mhpmevent
,
5075 [CSR_MHPMEVENT18
] = { "mhpmevent18", any
, read_mhpmevent
,
5077 [CSR_MHPMEVENT19
] = { "mhpmevent19", any
, read_mhpmevent
,
5079 [CSR_MHPMEVENT20
] = { "mhpmevent20", any
, read_mhpmevent
,
5081 [CSR_MHPMEVENT21
] = { "mhpmevent21", any
, read_mhpmevent
,
5083 [CSR_MHPMEVENT22
] = { "mhpmevent22", any
, read_mhpmevent
,
5085 [CSR_MHPMEVENT23
] = { "mhpmevent23", any
, read_mhpmevent
,
5087 [CSR_MHPMEVENT24
] = { "mhpmevent24", any
, read_mhpmevent
,
5089 [CSR_MHPMEVENT25
] = { "mhpmevent25", any
, read_mhpmevent
,
5091 [CSR_MHPMEVENT26
] = { "mhpmevent26", any
, read_mhpmevent
,
5093 [CSR_MHPMEVENT27
] = { "mhpmevent27", any
, read_mhpmevent
,
5095 [CSR_MHPMEVENT28
] = { "mhpmevent28", any
, read_mhpmevent
,
5097 [CSR_MHPMEVENT29
] = { "mhpmevent29", any
, read_mhpmevent
,
5099 [CSR_MHPMEVENT30
] = { "mhpmevent30", any
, read_mhpmevent
,
5101 [CSR_MHPMEVENT31
] = { "mhpmevent31", any
, read_mhpmevent
,
5104 [CSR_MHPMEVENT3H
] = { "mhpmevent3h", sscofpmf
, read_mhpmeventh
,
5106 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5107 [CSR_MHPMEVENT4H
] = { "mhpmevent4h", sscofpmf
, read_mhpmeventh
,
5109 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5110 [CSR_MHPMEVENT5H
] = { "mhpmevent5h", sscofpmf
, read_mhpmeventh
,
5112 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5113 [CSR_MHPMEVENT6H
] = { "mhpmevent6h", sscofpmf
, read_mhpmeventh
,
5115 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5116 [CSR_MHPMEVENT7H
] = { "mhpmevent7h", sscofpmf
, read_mhpmeventh
,
5118 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5119 [CSR_MHPMEVENT8H
] = { "mhpmevent8h", sscofpmf
, read_mhpmeventh
,
5121 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5122 [CSR_MHPMEVENT9H
] = { "mhpmevent9h", sscofpmf
, read_mhpmeventh
,
5124 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5125 [CSR_MHPMEVENT10H
] = { "mhpmevent10h", sscofpmf
, read_mhpmeventh
,
5127 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5128 [CSR_MHPMEVENT11H
] = { "mhpmevent11h", sscofpmf
, read_mhpmeventh
,
5130 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5131 [CSR_MHPMEVENT12H
] = { "mhpmevent12h", sscofpmf
, read_mhpmeventh
,
5133 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5134 [CSR_MHPMEVENT13H
] = { "mhpmevent13h", sscofpmf
, read_mhpmeventh
,
5136 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5137 [CSR_MHPMEVENT14H
] = { "mhpmevent14h", sscofpmf
, read_mhpmeventh
,
5139 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5140 [CSR_MHPMEVENT15H
] = { "mhpmevent15h", sscofpmf
, read_mhpmeventh
,
5142 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5143 [CSR_MHPMEVENT16H
] = { "mhpmevent16h", sscofpmf
, read_mhpmeventh
,
5145 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5146 [CSR_MHPMEVENT17H
] = { "mhpmevent17h", sscofpmf
, read_mhpmeventh
,
5148 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5149 [CSR_MHPMEVENT18H
] = { "mhpmevent18h", sscofpmf
, read_mhpmeventh
,
5151 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5152 [CSR_MHPMEVENT19H
] = { "mhpmevent19h", sscofpmf
, read_mhpmeventh
,
5154 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5155 [CSR_MHPMEVENT20H
] = { "mhpmevent20h", sscofpmf
, read_mhpmeventh
,
5157 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5158 [CSR_MHPMEVENT21H
] = { "mhpmevent21h", sscofpmf
, read_mhpmeventh
,
5160 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5161 [CSR_MHPMEVENT22H
] = { "mhpmevent22h", sscofpmf
, read_mhpmeventh
,
5163 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5164 [CSR_MHPMEVENT23H
] = { "mhpmevent23h", sscofpmf
, read_mhpmeventh
,
5166 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5167 [CSR_MHPMEVENT24H
] = { "mhpmevent24h", sscofpmf
, read_mhpmeventh
,
5169 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5170 [CSR_MHPMEVENT25H
] = { "mhpmevent25h", sscofpmf
, read_mhpmeventh
,
5172 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5173 [CSR_MHPMEVENT26H
] = { "mhpmevent26h", sscofpmf
, read_mhpmeventh
,
5175 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5176 [CSR_MHPMEVENT27H
] = { "mhpmevent27h", sscofpmf
, read_mhpmeventh
,
5178 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5179 [CSR_MHPMEVENT28H
] = { "mhpmevent28h", sscofpmf
, read_mhpmeventh
,
5181 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5182 [CSR_MHPMEVENT29H
] = { "mhpmevent29h", sscofpmf
, read_mhpmeventh
,
5184 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5185 [CSR_MHPMEVENT30H
] = { "mhpmevent30h", sscofpmf
, read_mhpmeventh
,
5187 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5188 [CSR_MHPMEVENT31H
] = { "mhpmevent31h", sscofpmf
, read_mhpmeventh
,
5190 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5192 [CSR_HPMCOUNTER3H
] = { "hpmcounter3h", ctr32
, read_hpmcounterh
},
5193 [CSR_HPMCOUNTER4H
] = { "hpmcounter4h", ctr32
, read_hpmcounterh
},
5194 [CSR_HPMCOUNTER5H
] = { "hpmcounter5h", ctr32
, read_hpmcounterh
},
5195 [CSR_HPMCOUNTER6H
] = { "hpmcounter6h", ctr32
, read_hpmcounterh
},
5196 [CSR_HPMCOUNTER7H
] = { "hpmcounter7h", ctr32
, read_hpmcounterh
},
5197 [CSR_HPMCOUNTER8H
] = { "hpmcounter8h", ctr32
, read_hpmcounterh
},
5198 [CSR_HPMCOUNTER9H
] = { "hpmcounter9h", ctr32
, read_hpmcounterh
},
5199 [CSR_HPMCOUNTER10H
] = { "hpmcounter10h", ctr32
, read_hpmcounterh
},
5200 [CSR_HPMCOUNTER11H
] = { "hpmcounter11h", ctr32
, read_hpmcounterh
},
5201 [CSR_HPMCOUNTER12H
] = { "hpmcounter12h", ctr32
, read_hpmcounterh
},
5202 [CSR_HPMCOUNTER13H
] = { "hpmcounter13h", ctr32
, read_hpmcounterh
},
5203 [CSR_HPMCOUNTER14H
] = { "hpmcounter14h", ctr32
, read_hpmcounterh
},
5204 [CSR_HPMCOUNTER15H
] = { "hpmcounter15h", ctr32
, read_hpmcounterh
},
5205 [CSR_HPMCOUNTER16H
] = { "hpmcounter16h", ctr32
, read_hpmcounterh
},
5206 [CSR_HPMCOUNTER17H
] = { "hpmcounter17h", ctr32
, read_hpmcounterh
},
5207 [CSR_HPMCOUNTER18H
] = { "hpmcounter18h", ctr32
, read_hpmcounterh
},
5208 [CSR_HPMCOUNTER19H
] = { "hpmcounter19h", ctr32
, read_hpmcounterh
},
5209 [CSR_HPMCOUNTER20H
] = { "hpmcounter20h", ctr32
, read_hpmcounterh
},
5210 [CSR_HPMCOUNTER21H
] = { "hpmcounter21h", ctr32
, read_hpmcounterh
},
5211 [CSR_HPMCOUNTER22H
] = { "hpmcounter22h", ctr32
, read_hpmcounterh
},
5212 [CSR_HPMCOUNTER23H
] = { "hpmcounter23h", ctr32
, read_hpmcounterh
},
5213 [CSR_HPMCOUNTER24H
] = { "hpmcounter24h", ctr32
, read_hpmcounterh
},
5214 [CSR_HPMCOUNTER25H
] = { "hpmcounter25h", ctr32
, read_hpmcounterh
},
5215 [CSR_HPMCOUNTER26H
] = { "hpmcounter26h", ctr32
, read_hpmcounterh
},
5216 [CSR_HPMCOUNTER27H
] = { "hpmcounter27h", ctr32
, read_hpmcounterh
},
5217 [CSR_HPMCOUNTER28H
] = { "hpmcounter28h", ctr32
, read_hpmcounterh
},
5218 [CSR_HPMCOUNTER29H
] = { "hpmcounter29h", ctr32
, read_hpmcounterh
},
5219 [CSR_HPMCOUNTER30H
] = { "hpmcounter30h", ctr32
, read_hpmcounterh
},
5220 [CSR_HPMCOUNTER31H
] = { "hpmcounter31h", ctr32
, read_hpmcounterh
},
5222 [CSR_MHPMCOUNTER3H
] = { "mhpmcounter3h", mctr32
, read_hpmcounterh
,
5223 write_mhpmcounterh
},
5224 [CSR_MHPMCOUNTER4H
] = { "mhpmcounter4h", mctr32
, read_hpmcounterh
,
5225 write_mhpmcounterh
},
5226 [CSR_MHPMCOUNTER5H
] = { "mhpmcounter5h", mctr32
, read_hpmcounterh
,
5227 write_mhpmcounterh
},
5228 [CSR_MHPMCOUNTER6H
] = { "mhpmcounter6h", mctr32
, read_hpmcounterh
,
5229 write_mhpmcounterh
},
5230 [CSR_MHPMCOUNTER7H
] = { "mhpmcounter7h", mctr32
, read_hpmcounterh
,
5231 write_mhpmcounterh
},
5232 [CSR_MHPMCOUNTER8H
] = { "mhpmcounter8h", mctr32
, read_hpmcounterh
,
5233 write_mhpmcounterh
},
5234 [CSR_MHPMCOUNTER9H
] = { "mhpmcounter9h", mctr32
, read_hpmcounterh
,
5235 write_mhpmcounterh
},
5236 [CSR_MHPMCOUNTER10H
] = { "mhpmcounter10h", mctr32
, read_hpmcounterh
,
5237 write_mhpmcounterh
},
5238 [CSR_MHPMCOUNTER11H
] = { "mhpmcounter11h", mctr32
, read_hpmcounterh
,
5239 write_mhpmcounterh
},
5240 [CSR_MHPMCOUNTER12H
] = { "mhpmcounter12h", mctr32
, read_hpmcounterh
,
5241 write_mhpmcounterh
},
5242 [CSR_MHPMCOUNTER13H
] = { "mhpmcounter13h", mctr32
, read_hpmcounterh
,
5243 write_mhpmcounterh
},
5244 [CSR_MHPMCOUNTER14H
] = { "mhpmcounter14h", mctr32
, read_hpmcounterh
,
5245 write_mhpmcounterh
},
5246 [CSR_MHPMCOUNTER15H
] = { "mhpmcounter15h", mctr32
, read_hpmcounterh
,
5247 write_mhpmcounterh
},
5248 [CSR_MHPMCOUNTER16H
] = { "mhpmcounter16h", mctr32
, read_hpmcounterh
,
5249 write_mhpmcounterh
},
5250 [CSR_MHPMCOUNTER17H
] = { "mhpmcounter17h", mctr32
, read_hpmcounterh
,
5251 write_mhpmcounterh
},
5252 [CSR_MHPMCOUNTER18H
] = { "mhpmcounter18h", mctr32
, read_hpmcounterh
,
5253 write_mhpmcounterh
},
5254 [CSR_MHPMCOUNTER19H
] = { "mhpmcounter19h", mctr32
, read_hpmcounterh
,
5255 write_mhpmcounterh
},
5256 [CSR_MHPMCOUNTER20H
] = { "mhpmcounter20h", mctr32
, read_hpmcounterh
,
5257 write_mhpmcounterh
},
5258 [CSR_MHPMCOUNTER21H
] = { "mhpmcounter21h", mctr32
, read_hpmcounterh
,
5259 write_mhpmcounterh
},
5260 [CSR_MHPMCOUNTER22H
] = { "mhpmcounter22h", mctr32
, read_hpmcounterh
,
5261 write_mhpmcounterh
},
5262 [CSR_MHPMCOUNTER23H
] = { "mhpmcounter23h", mctr32
, read_hpmcounterh
,
5263 write_mhpmcounterh
},
5264 [CSR_MHPMCOUNTER24H
] = { "mhpmcounter24h", mctr32
, read_hpmcounterh
,
5265 write_mhpmcounterh
},
5266 [CSR_MHPMCOUNTER25H
] = { "mhpmcounter25h", mctr32
, read_hpmcounterh
,
5267 write_mhpmcounterh
},
5268 [CSR_MHPMCOUNTER26H
] = { "mhpmcounter26h", mctr32
, read_hpmcounterh
,
5269 write_mhpmcounterh
},
5270 [CSR_MHPMCOUNTER27H
] = { "mhpmcounter27h", mctr32
, read_hpmcounterh
,
5271 write_mhpmcounterh
},
5272 [CSR_MHPMCOUNTER28H
] = { "mhpmcounter28h", mctr32
, read_hpmcounterh
,
5273 write_mhpmcounterh
},
5274 [CSR_MHPMCOUNTER29H
] = { "mhpmcounter29h", mctr32
, read_hpmcounterh
,
5275 write_mhpmcounterh
},
5276 [CSR_MHPMCOUNTER30H
] = { "mhpmcounter30h", mctr32
, read_hpmcounterh
,
5277 write_mhpmcounterh
},
5278 [CSR_MHPMCOUNTER31H
] = { "mhpmcounter31h", mctr32
, read_hpmcounterh
,
5279 write_mhpmcounterh
},
5280 [CSR_SCOUNTOVF
] = { "scountovf", sscofpmf
, read_scountovf
,
5281 .min_priv_ver
= PRIV_VERSION_1_12_0
},
5283 #endif /* !CONFIG_USER_ONLY */