2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu/timer.h"
25 #include "qemu/main-loop.h"
26 #include "exec/exec-all.h"
27 #include "sysemu/cpu-timers.h"
28 #include "qemu/guest-random.h"
29 #include "qapi/error.h"
31 /* CSR function table public API */
32 void riscv_get_csr_ops(int csrno
, riscv_csr_operations
*ops
)
34 *ops
= csr_ops
[csrno
& (CSR_TABLE_SIZE
- 1)];
37 void riscv_set_csr_ops(int csrno
, riscv_csr_operations
*ops
)
39 csr_ops
[csrno
& (CSR_TABLE_SIZE
- 1)] = *ops
;
43 static RISCVException
fs(CPURISCVState
*env
, int csrno
)
45 #if !defined(CONFIG_USER_ONLY)
46 if (!env
->debugger
&& !riscv_cpu_fp_enabled(env
) &&
47 !RISCV_CPU(env_cpu(env
))->cfg
.ext_zfinx
) {
48 return RISCV_EXCP_ILLEGAL_INST
;
51 return RISCV_EXCP_NONE
;
54 static RISCVException
vs(CPURISCVState
*env
, int csrno
)
56 CPUState
*cs
= env_cpu(env
);
57 RISCVCPU
*cpu
= RISCV_CPU(cs
);
59 if (env
->misa_ext
& RVV
||
60 cpu
->cfg
.ext_zve32f
|| cpu
->cfg
.ext_zve64f
) {
61 #if !defined(CONFIG_USER_ONLY)
62 if (!env
->debugger
&& !riscv_cpu_vector_enabled(env
)) {
63 return RISCV_EXCP_ILLEGAL_INST
;
66 return RISCV_EXCP_NONE
;
68 return RISCV_EXCP_ILLEGAL_INST
;
71 static RISCVException
ctr(CPURISCVState
*env
, int csrno
)
73 #if !defined(CONFIG_USER_ONLY)
74 CPUState
*cs
= env_cpu(env
);
75 RISCVCPU
*cpu
= RISCV_CPU(cs
);
77 int base_csrno
= CSR_HPMCOUNTER3
;
78 bool rv32
= riscv_cpu_mxl(env
) == MXL_RV32
? true : false;
80 if (rv32
&& csrno
>= CSR_CYCLEH
) {
81 /* Offset for RV32 hpmcounternh counters */
84 ctr_index
= csrno
- base_csrno
;
86 if (!cpu
->cfg
.pmu_num
|| ctr_index
>= (cpu
->cfg
.pmu_num
)) {
87 /* No counter is enabled in PMU or the counter is out of range */
88 return RISCV_EXCP_ILLEGAL_INST
;
91 if (env
->priv
== PRV_S
) {
94 if (!get_field(env
->mcounteren
, COUNTEREN_CY
)) {
95 return RISCV_EXCP_ILLEGAL_INST
;
99 if (!get_field(env
->mcounteren
, COUNTEREN_TM
)) {
100 return RISCV_EXCP_ILLEGAL_INST
;
104 if (!get_field(env
->mcounteren
, COUNTEREN_IR
)) {
105 return RISCV_EXCP_ILLEGAL_INST
;
108 case CSR_HPMCOUNTER3
...CSR_HPMCOUNTER31
:
109 ctr_index
= csrno
- CSR_CYCLE
;
110 if (!get_field(env
->mcounteren
, 1 << ctr_index
)) {
111 return RISCV_EXCP_ILLEGAL_INST
;
118 if (!get_field(env
->mcounteren
, COUNTEREN_CY
)) {
119 return RISCV_EXCP_ILLEGAL_INST
;
123 if (!get_field(env
->mcounteren
, COUNTEREN_TM
)) {
124 return RISCV_EXCP_ILLEGAL_INST
;
128 if (!get_field(env
->mcounteren
, COUNTEREN_IR
)) {
129 return RISCV_EXCP_ILLEGAL_INST
;
132 case CSR_HPMCOUNTER3H
...CSR_HPMCOUNTER31H
:
133 ctr_index
= csrno
- CSR_CYCLEH
;
134 if (!get_field(env
->mcounteren
, 1 << ctr_index
)) {
135 return RISCV_EXCP_ILLEGAL_INST
;
142 if (riscv_cpu_virt_enabled(env
)) {
145 if (!get_field(env
->hcounteren
, COUNTEREN_CY
) &&
146 get_field(env
->mcounteren
, COUNTEREN_CY
)) {
147 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
151 if (!get_field(env
->hcounteren
, COUNTEREN_TM
) &&
152 get_field(env
->mcounteren
, COUNTEREN_TM
)) {
153 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
157 if (!get_field(env
->hcounteren
, COUNTEREN_IR
) &&
158 get_field(env
->mcounteren
, COUNTEREN_IR
)) {
159 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
162 case CSR_HPMCOUNTER3
...CSR_HPMCOUNTER31
:
163 ctr_index
= csrno
- CSR_CYCLE
;
164 if (!get_field(env
->hcounteren
, 1 << ctr_index
) &&
165 get_field(env
->mcounteren
, 1 << ctr_index
)) {
166 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
173 if (!get_field(env
->hcounteren
, COUNTEREN_CY
) &&
174 get_field(env
->mcounteren
, COUNTEREN_CY
)) {
175 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
179 if (!get_field(env
->hcounteren
, COUNTEREN_TM
) &&
180 get_field(env
->mcounteren
, COUNTEREN_TM
)) {
181 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
185 if (!get_field(env
->hcounteren
, COUNTEREN_IR
) &&
186 get_field(env
->mcounteren
, COUNTEREN_IR
)) {
187 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
190 case CSR_HPMCOUNTER3H
...CSR_HPMCOUNTER31H
:
191 ctr_index
= csrno
- CSR_CYCLEH
;
192 if (!get_field(env
->hcounteren
, 1 << ctr_index
) &&
193 get_field(env
->mcounteren
, 1 << ctr_index
)) {
194 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
201 return RISCV_EXCP_NONE
;
204 static RISCVException
ctr32(CPURISCVState
*env
, int csrno
)
206 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
207 return RISCV_EXCP_ILLEGAL_INST
;
210 return ctr(env
, csrno
);
213 #if !defined(CONFIG_USER_ONLY)
214 static RISCVException
mctr(CPURISCVState
*env
, int csrno
)
216 CPUState
*cs
= env_cpu(env
);
217 RISCVCPU
*cpu
= RISCV_CPU(cs
);
219 int base_csrno
= CSR_MHPMCOUNTER3
;
221 if ((riscv_cpu_mxl(env
) == MXL_RV32
) && csrno
>= CSR_MCYCLEH
) {
222 /* Offset for RV32 mhpmcounternh counters */
225 ctr_index
= csrno
- base_csrno
;
226 if (!cpu
->cfg
.pmu_num
|| ctr_index
>= cpu
->cfg
.pmu_num
) {
227 /* The PMU is not enabled or counter is out of range*/
228 return RISCV_EXCP_ILLEGAL_INST
;
231 return RISCV_EXCP_NONE
;
234 static RISCVException
mctr32(CPURISCVState
*env
, int csrno
)
236 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
237 return RISCV_EXCP_ILLEGAL_INST
;
240 return mctr(env
, csrno
);
243 static RISCVException
any(CPURISCVState
*env
, int csrno
)
245 return RISCV_EXCP_NONE
;
248 static RISCVException
any32(CPURISCVState
*env
, int csrno
)
250 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
251 return RISCV_EXCP_ILLEGAL_INST
;
254 return any(env
, csrno
);
258 static int aia_any(CPURISCVState
*env
, int csrno
)
260 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
261 return RISCV_EXCP_ILLEGAL_INST
;
264 return any(env
, csrno
);
267 static int aia_any32(CPURISCVState
*env
, int csrno
)
269 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
270 return RISCV_EXCP_ILLEGAL_INST
;
273 return any32(env
, csrno
);
276 static RISCVException
smode(CPURISCVState
*env
, int csrno
)
278 if (riscv_has_ext(env
, RVS
)) {
279 return RISCV_EXCP_NONE
;
282 return RISCV_EXCP_ILLEGAL_INST
;
285 static int smode32(CPURISCVState
*env
, int csrno
)
287 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
288 return RISCV_EXCP_ILLEGAL_INST
;
291 return smode(env
, csrno
);
294 static int aia_smode(CPURISCVState
*env
, int csrno
)
296 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
297 return RISCV_EXCP_ILLEGAL_INST
;
300 return smode(env
, csrno
);
303 static int aia_smode32(CPURISCVState
*env
, int csrno
)
305 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
306 return RISCV_EXCP_ILLEGAL_INST
;
309 return smode32(env
, csrno
);
312 static RISCVException
hmode(CPURISCVState
*env
, int csrno
)
314 if (riscv_has_ext(env
, RVS
) &&
315 riscv_has_ext(env
, RVH
)) {
316 /* Hypervisor extension is supported */
317 if ((env
->priv
== PRV_S
&& !riscv_cpu_virt_enabled(env
)) ||
318 env
->priv
== PRV_M
) {
319 return RISCV_EXCP_NONE
;
321 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
325 return RISCV_EXCP_ILLEGAL_INST
;
328 static RISCVException
hmode32(CPURISCVState
*env
, int csrno
)
330 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
331 if (!riscv_cpu_virt_enabled(env
)) {
332 return RISCV_EXCP_ILLEGAL_INST
;
334 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
338 return hmode(env
, csrno
);
342 /* Checks if PointerMasking registers could be accessed */
343 static RISCVException
pointer_masking(CPURISCVState
*env
, int csrno
)
345 /* Check if j-ext is present */
346 if (riscv_has_ext(env
, RVJ
)) {
347 return RISCV_EXCP_NONE
;
349 return RISCV_EXCP_ILLEGAL_INST
;
352 static int aia_hmode(CPURISCVState
*env
, int csrno
)
354 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
355 return RISCV_EXCP_ILLEGAL_INST
;
358 return hmode(env
, csrno
);
361 static int aia_hmode32(CPURISCVState
*env
, int csrno
)
363 if (!riscv_feature(env
, RISCV_FEATURE_AIA
)) {
364 return RISCV_EXCP_ILLEGAL_INST
;
367 return hmode32(env
, csrno
);
370 static RISCVException
pmp(CPURISCVState
*env
, int csrno
)
372 if (riscv_feature(env
, RISCV_FEATURE_PMP
)) {
373 return RISCV_EXCP_NONE
;
376 return RISCV_EXCP_ILLEGAL_INST
;
379 static RISCVException
epmp(CPURISCVState
*env
, int csrno
)
381 if (env
->priv
== PRV_M
&& riscv_feature(env
, RISCV_FEATURE_EPMP
)) {
382 return RISCV_EXCP_NONE
;
385 return RISCV_EXCP_ILLEGAL_INST
;
388 static RISCVException
debug(CPURISCVState
*env
, int csrno
)
390 if (riscv_feature(env
, RISCV_FEATURE_DEBUG
)) {
391 return RISCV_EXCP_NONE
;
394 return RISCV_EXCP_ILLEGAL_INST
;
398 static RISCVException
seed(CPURISCVState
*env
, int csrno
)
400 RISCVCPU
*cpu
= env_archcpu(env
);
402 if (!cpu
->cfg
.ext_zkr
) {
403 return RISCV_EXCP_ILLEGAL_INST
;
406 #if !defined(CONFIG_USER_ONLY)
408 * With a CSR read-write instruction:
409 * 1) The seed CSR is always available in machine mode as normal.
410 * 2) Attempted access to seed from virtual modes VS and VU always raises
411 * an exception(virtual instruction exception only if mseccfg.sseed=1).
412 * 3) Without the corresponding access control bit set to 1, any attempted
413 * access to seed from U, S or HS modes will raise an illegal instruction
416 if (env
->priv
== PRV_M
) {
417 return RISCV_EXCP_NONE
;
418 } else if (riscv_cpu_virt_enabled(env
)) {
419 if (env
->mseccfg
& MSECCFG_SSEED
) {
420 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
422 return RISCV_EXCP_ILLEGAL_INST
;
425 if (env
->priv
== PRV_S
&& (env
->mseccfg
& MSECCFG_SSEED
)) {
426 return RISCV_EXCP_NONE
;
427 } else if (env
->priv
== PRV_U
&& (env
->mseccfg
& MSECCFG_USEED
)) {
428 return RISCV_EXCP_NONE
;
430 return RISCV_EXCP_ILLEGAL_INST
;
434 return RISCV_EXCP_NONE
;
438 /* User Floating-Point CSRs */
439 static RISCVException
read_fflags(CPURISCVState
*env
, int csrno
,
442 *val
= riscv_cpu_get_fflags(env
);
443 return RISCV_EXCP_NONE
;
446 static RISCVException
write_fflags(CPURISCVState
*env
, int csrno
,
449 #if !defined(CONFIG_USER_ONLY)
450 if (riscv_has_ext(env
, RVF
)) {
451 env
->mstatus
|= MSTATUS_FS
;
454 riscv_cpu_set_fflags(env
, val
& (FSR_AEXC
>> FSR_AEXC_SHIFT
));
455 return RISCV_EXCP_NONE
;
458 static RISCVException
read_frm(CPURISCVState
*env
, int csrno
,
462 return RISCV_EXCP_NONE
;
465 static RISCVException
write_frm(CPURISCVState
*env
, int csrno
,
468 #if !defined(CONFIG_USER_ONLY)
469 if (riscv_has_ext(env
, RVF
)) {
470 env
->mstatus
|= MSTATUS_FS
;
473 env
->frm
= val
& (FSR_RD
>> FSR_RD_SHIFT
);
474 return RISCV_EXCP_NONE
;
477 static RISCVException
read_fcsr(CPURISCVState
*env
, int csrno
,
480 *val
= (riscv_cpu_get_fflags(env
) << FSR_AEXC_SHIFT
)
481 | (env
->frm
<< FSR_RD_SHIFT
);
482 return RISCV_EXCP_NONE
;
485 static RISCVException
write_fcsr(CPURISCVState
*env
, int csrno
,
488 #if !defined(CONFIG_USER_ONLY)
489 if (riscv_has_ext(env
, RVF
)) {
490 env
->mstatus
|= MSTATUS_FS
;
493 env
->frm
= (val
& FSR_RD
) >> FSR_RD_SHIFT
;
494 riscv_cpu_set_fflags(env
, (val
& FSR_AEXC
) >> FSR_AEXC_SHIFT
);
495 return RISCV_EXCP_NONE
;
498 static RISCVException
read_vtype(CPURISCVState
*env
, int csrno
,
504 vill
= (uint32_t)env
->vill
<< 31;
507 vill
= (uint64_t)env
->vill
<< 63;
510 g_assert_not_reached();
512 *val
= (target_ulong
)vill
| env
->vtype
;
513 return RISCV_EXCP_NONE
;
516 static RISCVException
read_vl(CPURISCVState
*env
, int csrno
,
520 return RISCV_EXCP_NONE
;
523 static int read_vlenb(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
525 *val
= env_archcpu(env
)->cfg
.vlen
>> 3;
526 return RISCV_EXCP_NONE
;
529 static RISCVException
read_vxrm(CPURISCVState
*env
, int csrno
,
533 return RISCV_EXCP_NONE
;
536 static RISCVException
write_vxrm(CPURISCVState
*env
, int csrno
,
539 #if !defined(CONFIG_USER_ONLY)
540 env
->mstatus
|= MSTATUS_VS
;
543 return RISCV_EXCP_NONE
;
546 static RISCVException
read_vxsat(CPURISCVState
*env
, int csrno
,
550 return RISCV_EXCP_NONE
;
553 static RISCVException
write_vxsat(CPURISCVState
*env
, int csrno
,
556 #if !defined(CONFIG_USER_ONLY)
557 env
->mstatus
|= MSTATUS_VS
;
560 return RISCV_EXCP_NONE
;
563 static RISCVException
read_vstart(CPURISCVState
*env
, int csrno
,
567 return RISCV_EXCP_NONE
;
570 static RISCVException
write_vstart(CPURISCVState
*env
, int csrno
,
573 #if !defined(CONFIG_USER_ONLY)
574 env
->mstatus
|= MSTATUS_VS
;
577 * The vstart CSR is defined to have only enough writable bits
578 * to hold the largest element index, i.e. lg2(VLEN) bits.
580 env
->vstart
= val
& ~(~0ULL << ctzl(env_archcpu(env
)->cfg
.vlen
));
581 return RISCV_EXCP_NONE
;
584 static int read_vcsr(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
586 *val
= (env
->vxrm
<< VCSR_VXRM_SHIFT
) | (env
->vxsat
<< VCSR_VXSAT_SHIFT
);
587 return RISCV_EXCP_NONE
;
590 static int write_vcsr(CPURISCVState
*env
, int csrno
, target_ulong val
)
592 #if !defined(CONFIG_USER_ONLY)
593 env
->mstatus
|= MSTATUS_VS
;
595 env
->vxrm
= (val
& VCSR_VXRM
) >> VCSR_VXRM_SHIFT
;
596 env
->vxsat
= (val
& VCSR_VXSAT
) >> VCSR_VXSAT_SHIFT
;
597 return RISCV_EXCP_NONE
;
600 /* User Timers and Counters */
601 static target_ulong
get_ticks(bool shift
)
606 #if !defined(CONFIG_USER_ONLY)
607 if (icount_enabled()) {
610 val
= cpu_get_host_ticks();
613 val
= cpu_get_host_ticks();
625 #if defined(CONFIG_USER_ONLY)
626 static RISCVException
read_time(CPURISCVState
*env
, int csrno
,
629 *val
= cpu_get_host_ticks();
630 return RISCV_EXCP_NONE
;
633 static RISCVException
read_timeh(CPURISCVState
*env
, int csrno
,
636 *val
= cpu_get_host_ticks() >> 32;
637 return RISCV_EXCP_NONE
;
640 static int read_hpmcounter(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
642 *val
= get_ticks(false);
643 return RISCV_EXCP_NONE
;
646 static int read_hpmcounterh(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
648 *val
= get_ticks(true);
649 return RISCV_EXCP_NONE
;
652 #else /* CONFIG_USER_ONLY */
654 static int read_mhpmevent(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
656 int evt_index
= csrno
- CSR_MCOUNTINHIBIT
;
658 *val
= env
->mhpmevent_val
[evt_index
];
660 return RISCV_EXCP_NONE
;
663 static int write_mhpmevent(CPURISCVState
*env
, int csrno
, target_ulong val
)
665 int evt_index
= csrno
- CSR_MCOUNTINHIBIT
;
667 env
->mhpmevent_val
[evt_index
] = val
;
669 return RISCV_EXCP_NONE
;
672 static int write_mhpmcounter(CPURISCVState
*env
, int csrno
, target_ulong val
)
674 int ctr_idx
= csrno
- CSR_MCYCLE
;
675 PMUCTRState
*counter
= &env
->pmu_ctrs
[ctr_idx
];
677 counter
->mhpmcounter_val
= val
;
678 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) ||
679 riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
)) {
680 counter
->mhpmcounter_prev
= get_ticks(false);
682 /* Other counters can keep incrementing from the given value */
683 counter
->mhpmcounter_prev
= val
;
686 return RISCV_EXCP_NONE
;
689 static int write_mhpmcounterh(CPURISCVState
*env
, int csrno
, target_ulong val
)
691 int ctr_idx
= csrno
- CSR_MCYCLEH
;
692 PMUCTRState
*counter
= &env
->pmu_ctrs
[ctr_idx
];
694 counter
->mhpmcounterh_val
= val
;
695 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) ||
696 riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
)) {
697 counter
->mhpmcounterh_prev
= get_ticks(true);
699 counter
->mhpmcounterh_prev
= val
;
702 return RISCV_EXCP_NONE
;
705 static RISCVException
riscv_pmu_read_ctr(CPURISCVState
*env
, target_ulong
*val
,
706 bool upper_half
, uint32_t ctr_idx
)
708 PMUCTRState counter
= env
->pmu_ctrs
[ctr_idx
];
709 target_ulong ctr_prev
= upper_half
? counter
.mhpmcounterh_prev
:
710 counter
.mhpmcounter_prev
;
711 target_ulong ctr_val
= upper_half
? counter
.mhpmcounterh_val
:
712 counter
.mhpmcounter_val
;
714 if (get_field(env
->mcountinhibit
, BIT(ctr_idx
))) {
716 * Counter should not increment if inhibit bit is set. We can't really
717 * stop the icount counting. Just return the counter value written by
718 * the supervisor to indicate that counter was not incremented.
720 if (!counter
.started
) {
722 return RISCV_EXCP_NONE
;
724 /* Mark that the counter has been stopped */
725 counter
.started
= false;
730 * The kernel computes the perf delta by subtracting the current value from
731 * the value it initialized previously (ctr_val).
733 if (riscv_pmu_ctr_monitor_cycles(env
, ctr_idx
) ||
734 riscv_pmu_ctr_monitor_instructions(env
, ctr_idx
)) {
735 *val
= get_ticks(upper_half
) - ctr_prev
+ ctr_val
;
740 return RISCV_EXCP_NONE
;
743 static int read_hpmcounter(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
747 if (csrno
>= CSR_MCYCLE
&& csrno
<= CSR_MHPMCOUNTER31
) {
748 ctr_index
= csrno
- CSR_MCYCLE
;
749 } else if (csrno
>= CSR_CYCLE
&& csrno
<= CSR_HPMCOUNTER31
) {
750 ctr_index
= csrno
- CSR_CYCLE
;
752 return RISCV_EXCP_ILLEGAL_INST
;
755 return riscv_pmu_read_ctr(env
, val
, false, ctr_index
);
758 static int read_hpmcounterh(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
762 if (csrno
>= CSR_MCYCLEH
&& csrno
<= CSR_MHPMCOUNTER31H
) {
763 ctr_index
= csrno
- CSR_MCYCLEH
;
764 } else if (csrno
>= CSR_CYCLEH
&& csrno
<= CSR_HPMCOUNTER31H
) {
765 ctr_index
= csrno
- CSR_CYCLEH
;
767 return RISCV_EXCP_ILLEGAL_INST
;
770 return riscv_pmu_read_ctr(env
, val
, true, ctr_index
);
773 static RISCVException
read_time(CPURISCVState
*env
, int csrno
,
776 uint64_t delta
= riscv_cpu_virt_enabled(env
) ? env
->htimedelta
: 0;
778 if (!env
->rdtime_fn
) {
779 return RISCV_EXCP_ILLEGAL_INST
;
782 *val
= env
->rdtime_fn(env
->rdtime_fn_arg
) + delta
;
783 return RISCV_EXCP_NONE
;
786 static RISCVException
read_timeh(CPURISCVState
*env
, int csrno
,
789 uint64_t delta
= riscv_cpu_virt_enabled(env
) ? env
->htimedelta
: 0;
791 if (!env
->rdtime_fn
) {
792 return RISCV_EXCP_ILLEGAL_INST
;
795 *val
= (env
->rdtime_fn(env
->rdtime_fn_arg
) + delta
) >> 32;
796 return RISCV_EXCP_NONE
;
799 /* Machine constants */
801 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
802 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP))
803 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
804 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
806 #define VSTOPI_NUM_SRCS 5
808 static const uint64_t delegable_ints
= S_MODE_INTERRUPTS
|
810 static const uint64_t vs_delegable_ints
= VS_MODE_INTERRUPTS
;
811 static const uint64_t all_ints
= M_MODE_INTERRUPTS
| S_MODE_INTERRUPTS
|
813 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
814 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
815 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
816 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
817 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
818 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
819 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
820 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
821 (1ULL << (RISCV_EXCP_U_ECALL)) | \
822 (1ULL << (RISCV_EXCP_S_ECALL)) | \
823 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
824 (1ULL << (RISCV_EXCP_M_ECALL)) | \
825 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
826 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
827 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
828 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
829 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
830 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
831 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
832 static const target_ulong vs_delegable_excps
= DELEGABLE_EXCPS
&
833 ~((1ULL << (RISCV_EXCP_S_ECALL
)) |
834 (1ULL << (RISCV_EXCP_VS_ECALL
)) |
835 (1ULL << (RISCV_EXCP_M_ECALL
)) |
836 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT
)) |
837 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT
)) |
838 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT
)) |
839 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT
)));
840 static const target_ulong sstatus_v1_10_mask
= SSTATUS_SIE
| SSTATUS_SPIE
|
841 SSTATUS_UIE
| SSTATUS_UPIE
| SSTATUS_SPP
| SSTATUS_FS
| SSTATUS_XS
|
842 SSTATUS_SUM
| SSTATUS_MXR
| SSTATUS_VS
;
843 static const target_ulong sip_writable_mask
= SIP_SSIP
| MIP_USIP
| MIP_UEIP
;
844 static const target_ulong hip_writable_mask
= MIP_VSSIP
;
845 static const target_ulong hvip_writable_mask
= MIP_VSSIP
| MIP_VSTIP
| MIP_VSEIP
;
846 static const target_ulong vsip_writable_mask
= MIP_VSSIP
;
848 static const char valid_vm_1_10_32
[16] = {
853 static const char valid_vm_1_10_64
[16] = {
860 /* Machine Information Registers */
861 static RISCVException
read_zero(CPURISCVState
*env
, int csrno
,
865 return RISCV_EXCP_NONE
;
868 static RISCVException
write_ignore(CPURISCVState
*env
, int csrno
,
871 return RISCV_EXCP_NONE
;
874 static RISCVException
read_mvendorid(CPURISCVState
*env
, int csrno
,
877 CPUState
*cs
= env_cpu(env
);
878 RISCVCPU
*cpu
= RISCV_CPU(cs
);
880 *val
= cpu
->cfg
.mvendorid
;
881 return RISCV_EXCP_NONE
;
884 static RISCVException
read_marchid(CPURISCVState
*env
, int csrno
,
887 CPUState
*cs
= env_cpu(env
);
888 RISCVCPU
*cpu
= RISCV_CPU(cs
);
890 *val
= cpu
->cfg
.marchid
;
891 return RISCV_EXCP_NONE
;
894 static RISCVException
read_mimpid(CPURISCVState
*env
, int csrno
,
897 CPUState
*cs
= env_cpu(env
);
898 RISCVCPU
*cpu
= RISCV_CPU(cs
);
900 *val
= cpu
->cfg
.mimpid
;
901 return RISCV_EXCP_NONE
;
904 static RISCVException
read_mhartid(CPURISCVState
*env
, int csrno
,
908 return RISCV_EXCP_NONE
;
911 /* Machine Trap Setup */
913 /* We do not store SD explicitly, only compute it on demand. */
914 static uint64_t add_status_sd(RISCVMXL xl
, uint64_t status
)
916 if ((status
& MSTATUS_FS
) == MSTATUS_FS
||
917 (status
& MSTATUS_VS
) == MSTATUS_VS
||
918 (status
& MSTATUS_XS
) == MSTATUS_XS
) {
921 return status
| MSTATUS32_SD
;
923 return status
| MSTATUS64_SD
;
925 return MSTATUSH128_SD
;
927 g_assert_not_reached();
933 static RISCVException
read_mstatus(CPURISCVState
*env
, int csrno
,
936 *val
= add_status_sd(riscv_cpu_mxl(env
), env
->mstatus
);
937 return RISCV_EXCP_NONE
;
940 static int validate_vm(CPURISCVState
*env
, target_ulong vm
)
942 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
943 return valid_vm_1_10_32
[vm
& 0xf];
945 return valid_vm_1_10_64
[vm
& 0xf];
949 static RISCVException
write_mstatus(CPURISCVState
*env
, int csrno
,
952 uint64_t mstatus
= env
->mstatus
;
954 RISCVMXL xl
= riscv_cpu_mxl(env
);
956 /* flush tlb on mstatus fields that affect VM */
957 if ((val
^ mstatus
) & (MSTATUS_MXR
| MSTATUS_MPP
| MSTATUS_MPV
|
958 MSTATUS_MPRV
| MSTATUS_SUM
)) {
959 tlb_flush(env_cpu(env
));
961 mask
= MSTATUS_SIE
| MSTATUS_SPIE
| MSTATUS_MIE
| MSTATUS_MPIE
|
962 MSTATUS_SPP
| MSTATUS_MPRV
| MSTATUS_SUM
|
963 MSTATUS_MPP
| MSTATUS_MXR
| MSTATUS_TVM
| MSTATUS_TSR
|
964 MSTATUS_TW
| MSTATUS_VS
;
966 if (riscv_has_ext(env
, RVF
)) {
970 if (xl
!= MXL_RV32
|| env
->debugger
) {
972 * RV32: MPV and GVA are not in mstatus. The current plan is to
973 * add them to mstatush. For now, we just don't support it.
975 mask
|= MSTATUS_MPV
| MSTATUS_GVA
;
976 if ((val
& MSTATUS64_UXL
) != 0) {
977 mask
|= MSTATUS64_UXL
;
981 mstatus
= (mstatus
& ~mask
) | (val
& mask
);
984 /* SXL field is for now read only */
985 mstatus
= set_field(mstatus
, MSTATUS64_SXL
, xl
);
987 env
->mstatus
= mstatus
;
988 env
->xl
= cpu_recompute_xl(env
);
990 return RISCV_EXCP_NONE
;
993 static RISCVException
read_mstatush(CPURISCVState
*env
, int csrno
,
996 *val
= env
->mstatus
>> 32;
997 return RISCV_EXCP_NONE
;
1000 static RISCVException
write_mstatush(CPURISCVState
*env
, int csrno
,
1003 uint64_t valh
= (uint64_t)val
<< 32;
1004 uint64_t mask
= MSTATUS_MPV
| MSTATUS_GVA
;
1006 if ((valh
^ env
->mstatus
) & (MSTATUS_MPV
)) {
1007 tlb_flush(env_cpu(env
));
1010 env
->mstatus
= (env
->mstatus
& ~mask
) | (valh
& mask
);
1012 return RISCV_EXCP_NONE
;
1015 static RISCVException
read_mstatus_i128(CPURISCVState
*env
, int csrno
,
1018 *val
= int128_make128(env
->mstatus
, add_status_sd(MXL_RV128
, env
->mstatus
));
1019 return RISCV_EXCP_NONE
;
1022 static RISCVException
read_misa_i128(CPURISCVState
*env
, int csrno
,
1025 *val
= int128_make128(env
->misa_ext
, (uint64_t)MXL_RV128
<< 62);
1026 return RISCV_EXCP_NONE
;
1029 static RISCVException
read_misa(CPURISCVState
*env
, int csrno
,
1034 switch (env
->misa_mxl
) {
1036 misa
= (target_ulong
)MXL_RV32
<< 30;
1038 #ifdef TARGET_RISCV64
1040 misa
= (target_ulong
)MXL_RV64
<< 62;
1044 g_assert_not_reached();
1047 *val
= misa
| env
->misa_ext
;
1048 return RISCV_EXCP_NONE
;
1051 static RISCVException
write_misa(CPURISCVState
*env
, int csrno
,
1054 if (!riscv_feature(env
, RISCV_FEATURE_MISA
)) {
1055 /* drop write to misa */
1056 return RISCV_EXCP_NONE
;
1059 /* 'I' or 'E' must be present */
1060 if (!(val
& (RVI
| RVE
))) {
1061 /* It is not, drop write to misa */
1062 return RISCV_EXCP_NONE
;
1065 /* 'E' excludes all other extensions */
1067 /* when we support 'E' we can do "val = RVE;" however
1068 * for now we just drop writes if 'E' is present.
1070 return RISCV_EXCP_NONE
;
1074 * misa.MXL writes are not supported by QEMU.
1075 * Drop writes to those bits.
1078 /* Mask extensions that are not supported by this hart */
1079 val
&= env
->misa_ext_mask
;
1081 /* Mask extensions that are not supported by QEMU */
1082 val
&= (RVI
| RVE
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
| RVV
);
1084 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
1085 if ((val
& RVD
) && !(val
& RVF
)) {
1089 /* Suppress 'C' if next instruction is not aligned
1090 * TODO: this should check next_pc
1092 if ((val
& RVC
) && (GETPC() & ~3) != 0) {
1096 /* If nothing changed, do nothing. */
1097 if (val
== env
->misa_ext
) {
1098 return RISCV_EXCP_NONE
;
1102 env
->mstatus
&= ~MSTATUS_FS
;
1105 /* flush translation cache */
1106 tb_flush(env_cpu(env
));
1107 env
->misa_ext
= val
;
1108 env
->xl
= riscv_cpu_mxl(env
);
1109 return RISCV_EXCP_NONE
;
1112 static RISCVException
read_medeleg(CPURISCVState
*env
, int csrno
,
1115 *val
= env
->medeleg
;
1116 return RISCV_EXCP_NONE
;
1119 static RISCVException
write_medeleg(CPURISCVState
*env
, int csrno
,
1122 env
->medeleg
= (env
->medeleg
& ~DELEGABLE_EXCPS
) | (val
& DELEGABLE_EXCPS
);
1123 return RISCV_EXCP_NONE
;
1126 static RISCVException
rmw_mideleg64(CPURISCVState
*env
, int csrno
,
1128 uint64_t new_val
, uint64_t wr_mask
)
1130 uint64_t mask
= wr_mask
& delegable_ints
;
1133 *ret_val
= env
->mideleg
;
1136 env
->mideleg
= (env
->mideleg
& ~mask
) | (new_val
& mask
);
1138 if (riscv_has_ext(env
, RVH
)) {
1139 env
->mideleg
|= HS_MODE_INTERRUPTS
;
1142 return RISCV_EXCP_NONE
;
1145 static RISCVException
rmw_mideleg(CPURISCVState
*env
, int csrno
,
1146 target_ulong
*ret_val
,
1147 target_ulong new_val
, target_ulong wr_mask
)
1152 ret
= rmw_mideleg64(env
, csrno
, &rval
, new_val
, wr_mask
);
1160 static RISCVException
rmw_midelegh(CPURISCVState
*env
, int csrno
,
1161 target_ulong
*ret_val
,
1162 target_ulong new_val
,
1163 target_ulong wr_mask
)
1168 ret
= rmw_mideleg64(env
, csrno
, &rval
,
1169 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1171 *ret_val
= rval
>> 32;
1177 static RISCVException
rmw_mie64(CPURISCVState
*env
, int csrno
,
1179 uint64_t new_val
, uint64_t wr_mask
)
1181 uint64_t mask
= wr_mask
& all_ints
;
1184 *ret_val
= env
->mie
;
1187 env
->mie
= (env
->mie
& ~mask
) | (new_val
& mask
);
1189 if (!riscv_has_ext(env
, RVH
)) {
1190 env
->mie
&= ~((uint64_t)MIP_SGEIP
);
1193 return RISCV_EXCP_NONE
;
1196 static RISCVException
rmw_mie(CPURISCVState
*env
, int csrno
,
1197 target_ulong
*ret_val
,
1198 target_ulong new_val
, target_ulong wr_mask
)
1203 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
);
1211 static RISCVException
rmw_mieh(CPURISCVState
*env
, int csrno
,
1212 target_ulong
*ret_val
,
1213 target_ulong new_val
, target_ulong wr_mask
)
1218 ret
= rmw_mie64(env
, csrno
, &rval
,
1219 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1221 *ret_val
= rval
>> 32;
1227 static int read_mtopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
1232 irq
= riscv_cpu_mirq_pending(env
);
1233 if (irq
<= 0 || irq
> 63) {
1236 iprio
= env
->miprio
[irq
];
1238 if (riscv_cpu_default_priority(irq
) > IPRIO_DEFAULT_M
) {
1239 iprio
= IPRIO_MMAXIPRIO
;
1242 *val
= (irq
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
1246 return RISCV_EXCP_NONE
;
1249 static int aia_xlate_vs_csrno(CPURISCVState
*env
, int csrno
)
1251 if (!riscv_cpu_virt_enabled(env
)) {
1257 return CSR_VSISELECT
;
1267 static int rmw_xiselect(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1268 target_ulong new_val
, target_ulong wr_mask
)
1270 target_ulong
*iselect
;
1272 /* Translate CSR number for VS-mode */
1273 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1275 /* Find the iselect CSR based on CSR number */
1278 iselect
= &env
->miselect
;
1281 iselect
= &env
->siselect
;
1284 iselect
= &env
->vsiselect
;
1287 return RISCV_EXCP_ILLEGAL_INST
;
1294 wr_mask
&= ISELECT_MASK
;
1296 *iselect
= (*iselect
& ~wr_mask
) | (new_val
& wr_mask
);
1299 return RISCV_EXCP_NONE
;
1302 static int rmw_iprio(target_ulong xlen
,
1303 target_ulong iselect
, uint8_t *iprio
,
1304 target_ulong
*val
, target_ulong new_val
,
1305 target_ulong wr_mask
, int ext_irq_no
)
1308 target_ulong old_val
;
1310 if (iselect
< ISELECT_IPRIO0
|| ISELECT_IPRIO15
< iselect
) {
1313 if (xlen
!= 32 && iselect
& 0x1) {
1317 nirqs
= 4 * (xlen
/ 32);
1318 firq
= ((iselect
- ISELECT_IPRIO0
) / (xlen
/ 32)) * (nirqs
);
1321 for (i
= 0; i
< nirqs
; i
++) {
1322 old_val
|= ((target_ulong
)iprio
[firq
+ i
]) << (IPRIO_IRQ_BITS
* i
);
1330 new_val
= (old_val
& ~wr_mask
) | (new_val
& wr_mask
);
1331 for (i
= 0; i
< nirqs
; i
++) {
1333 * M-level and S-level external IRQ priority always read-only
1334 * zero. This means default priority order is always preferred
1335 * for M-level and S-level external IRQs.
1337 if ((firq
+ i
) == ext_irq_no
) {
1340 iprio
[firq
+ i
] = (new_val
>> (IPRIO_IRQ_BITS
* i
)) & 0xff;
1347 static int rmw_xireg(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1348 target_ulong new_val
, target_ulong wr_mask
)
1353 target_ulong priv
, isel
, vgein
;
1355 /* Translate CSR number for VS-mode */
1356 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1358 /* Decode register details from CSR number */
1362 iprio
= env
->miprio
;
1363 isel
= env
->miselect
;
1367 iprio
= env
->siprio
;
1368 isel
= env
->siselect
;
1372 iprio
= env
->hviprio
;
1373 isel
= env
->vsiselect
;
1381 /* Find the selected guest interrupt file */
1382 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1384 if (ISELECT_IPRIO0
<= isel
&& isel
<= ISELECT_IPRIO15
) {
1385 /* Local interrupt priority registers not available for VS-mode */
1387 ret
= rmw_iprio(riscv_cpu_mxl_bits(env
),
1388 isel
, iprio
, val
, new_val
, wr_mask
,
1389 (priv
== PRV_M
) ? IRQ_M_EXT
: IRQ_S_EXT
);
1391 } else if (ISELECT_IMSIC_FIRST
<= isel
&& isel
<= ISELECT_IMSIC_LAST
) {
1392 /* IMSIC registers only available when machine implements it. */
1393 if (env
->aia_ireg_rmw_fn
[priv
]) {
1394 /* Selected guest interrupt file should not be zero */
1395 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1398 /* Call machine specific IMSIC register emulation */
1399 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1400 AIA_MAKE_IREG(isel
, priv
, virt
, vgein
,
1401 riscv_cpu_mxl_bits(env
)),
1402 val
, new_val
, wr_mask
);
1408 return (riscv_cpu_virt_enabled(env
) && virt
) ?
1409 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1411 return RISCV_EXCP_NONE
;
1414 static int rmw_xtopei(CPURISCVState
*env
, int csrno
, target_ulong
*val
,
1415 target_ulong new_val
, target_ulong wr_mask
)
1419 target_ulong priv
, vgein
;
1421 /* Translate CSR number for VS-mode */
1422 csrno
= aia_xlate_vs_csrno(env
, csrno
);
1424 /* Decode register details from CSR number */
1441 /* IMSIC CSRs only available when machine implements IMSIC. */
1442 if (!env
->aia_ireg_rmw_fn
[priv
]) {
1446 /* Find the selected guest interrupt file */
1447 vgein
= (virt
) ? get_field(env
->hstatus
, HSTATUS_VGEIN
) : 0;
1449 /* Selected guest interrupt file should be valid */
1450 if (virt
&& (!vgein
|| env
->geilen
< vgein
)) {
1454 /* Call machine specific IMSIC register emulation for TOPEI */
1455 ret
= env
->aia_ireg_rmw_fn
[priv
](env
->aia_ireg_rmw_fn_arg
[priv
],
1456 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI
, priv
, virt
, vgein
,
1457 riscv_cpu_mxl_bits(env
)),
1458 val
, new_val
, wr_mask
);
1462 return (riscv_cpu_virt_enabled(env
) && virt
) ?
1463 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
1465 return RISCV_EXCP_NONE
;
1468 static RISCVException
read_mtvec(CPURISCVState
*env
, int csrno
,
1472 return RISCV_EXCP_NONE
;
1475 static RISCVException
write_mtvec(CPURISCVState
*env
, int csrno
,
1478 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1479 if ((val
& 3) < 2) {
1482 qemu_log_mask(LOG_UNIMP
, "CSR_MTVEC: reserved mode not supported\n");
1484 return RISCV_EXCP_NONE
;
1487 static RISCVException
read_mcountinhibit(CPURISCVState
*env
, int csrno
,
1490 if (env
->priv_ver
< PRIV_VERSION_1_11_0
) {
1491 return RISCV_EXCP_ILLEGAL_INST
;
1494 *val
= env
->mcountinhibit
;
1495 return RISCV_EXCP_NONE
;
1498 static RISCVException
write_mcountinhibit(CPURISCVState
*env
, int csrno
,
1502 PMUCTRState
*counter
;
1504 if (env
->priv_ver
< PRIV_VERSION_1_11_0
) {
1505 return RISCV_EXCP_ILLEGAL_INST
;
1508 env
->mcountinhibit
= val
;
1510 /* Check if any other counter is also monitoring cycles/instructions */
1511 for (cidx
= 0; cidx
< RV_MAX_MHPMCOUNTERS
; cidx
++) {
1512 if (!get_field(env
->mcountinhibit
, BIT(cidx
))) {
1513 counter
= &env
->pmu_ctrs
[cidx
];
1514 counter
->started
= true;
1518 return RISCV_EXCP_NONE
;
1521 static RISCVException
read_mcounteren(CPURISCVState
*env
, int csrno
,
1524 *val
= env
->mcounteren
;
1525 return RISCV_EXCP_NONE
;
1528 static RISCVException
write_mcounteren(CPURISCVState
*env
, int csrno
,
1531 env
->mcounteren
= val
;
1532 return RISCV_EXCP_NONE
;
1535 /* Machine Trap Handling */
1536 static RISCVException
read_mscratch_i128(CPURISCVState
*env
, int csrno
,
1539 *val
= int128_make128(env
->mscratch
, env
->mscratchh
);
1540 return RISCV_EXCP_NONE
;
1543 static RISCVException
write_mscratch_i128(CPURISCVState
*env
, int csrno
,
1546 env
->mscratch
= int128_getlo(val
);
1547 env
->mscratchh
= int128_gethi(val
);
1548 return RISCV_EXCP_NONE
;
1551 static RISCVException
read_mscratch(CPURISCVState
*env
, int csrno
,
1554 *val
= env
->mscratch
;
1555 return RISCV_EXCP_NONE
;
1558 static RISCVException
write_mscratch(CPURISCVState
*env
, int csrno
,
1561 env
->mscratch
= val
;
1562 return RISCV_EXCP_NONE
;
1565 static RISCVException
read_mepc(CPURISCVState
*env
, int csrno
,
1569 return RISCV_EXCP_NONE
;
1572 static RISCVException
write_mepc(CPURISCVState
*env
, int csrno
,
1576 return RISCV_EXCP_NONE
;
1579 static RISCVException
read_mcause(CPURISCVState
*env
, int csrno
,
1583 return RISCV_EXCP_NONE
;
1586 static RISCVException
write_mcause(CPURISCVState
*env
, int csrno
,
1590 return RISCV_EXCP_NONE
;
1593 static RISCVException
read_mtval(CPURISCVState
*env
, int csrno
,
1597 return RISCV_EXCP_NONE
;
1600 static RISCVException
write_mtval(CPURISCVState
*env
, int csrno
,
1604 return RISCV_EXCP_NONE
;
1607 /* Execution environment configuration setup */
1608 static RISCVException
read_menvcfg(CPURISCVState
*env
, int csrno
,
1611 *val
= env
->menvcfg
;
1612 return RISCV_EXCP_NONE
;
1615 static RISCVException
write_menvcfg(CPURISCVState
*env
, int csrno
,
1618 uint64_t mask
= MENVCFG_FIOM
| MENVCFG_CBIE
| MENVCFG_CBCFE
| MENVCFG_CBZE
;
1620 if (riscv_cpu_mxl(env
) == MXL_RV64
) {
1621 mask
|= MENVCFG_PBMTE
| MENVCFG_STCE
;
1623 env
->menvcfg
= (env
->menvcfg
& ~mask
) | (val
& mask
);
1625 return RISCV_EXCP_NONE
;
1628 static RISCVException
read_menvcfgh(CPURISCVState
*env
, int csrno
,
1631 *val
= env
->menvcfg
>> 32;
1632 return RISCV_EXCP_NONE
;
1635 static RISCVException
write_menvcfgh(CPURISCVState
*env
, int csrno
,
1638 uint64_t mask
= MENVCFG_PBMTE
| MENVCFG_STCE
;
1639 uint64_t valh
= (uint64_t)val
<< 32;
1641 env
->menvcfg
= (env
->menvcfg
& ~mask
) | (valh
& mask
);
1643 return RISCV_EXCP_NONE
;
1646 static RISCVException
read_senvcfg(CPURISCVState
*env
, int csrno
,
1649 *val
= env
->senvcfg
;
1650 return RISCV_EXCP_NONE
;
1653 static RISCVException
write_senvcfg(CPURISCVState
*env
, int csrno
,
1656 uint64_t mask
= SENVCFG_FIOM
| SENVCFG_CBIE
| SENVCFG_CBCFE
| SENVCFG_CBZE
;
1658 env
->senvcfg
= (env
->senvcfg
& ~mask
) | (val
& mask
);
1660 return RISCV_EXCP_NONE
;
1663 static RISCVException
read_henvcfg(CPURISCVState
*env
, int csrno
,
1666 *val
= env
->henvcfg
;
1667 return RISCV_EXCP_NONE
;
1670 static RISCVException
write_henvcfg(CPURISCVState
*env
, int csrno
,
1673 uint64_t mask
= HENVCFG_FIOM
| HENVCFG_CBIE
| HENVCFG_CBCFE
| HENVCFG_CBZE
;
1675 if (riscv_cpu_mxl(env
) == MXL_RV64
) {
1676 mask
|= HENVCFG_PBMTE
| HENVCFG_STCE
;
1679 env
->henvcfg
= (env
->henvcfg
& ~mask
) | (val
& mask
);
1681 return RISCV_EXCP_NONE
;
1684 static RISCVException
read_henvcfgh(CPURISCVState
*env
, int csrno
,
1687 *val
= env
->henvcfg
>> 32;
1688 return RISCV_EXCP_NONE
;
1691 static RISCVException
write_henvcfgh(CPURISCVState
*env
, int csrno
,
1694 uint64_t mask
= HENVCFG_PBMTE
| HENVCFG_STCE
;
1695 uint64_t valh
= (uint64_t)val
<< 32;
1697 env
->henvcfg
= (env
->henvcfg
& ~mask
) | (valh
& mask
);
1699 return RISCV_EXCP_NONE
;
1702 static RISCVException
rmw_mip64(CPURISCVState
*env
, int csrno
,
1704 uint64_t new_val
, uint64_t wr_mask
)
1706 RISCVCPU
*cpu
= env_archcpu(env
);
1707 uint64_t old_mip
, mask
= wr_mask
& delegable_ints
;
1710 if (mask
& MIP_SEIP
) {
1711 env
->software_seip
= new_val
& MIP_SEIP
;
1712 new_val
|= env
->external_seip
* MIP_SEIP
;
1716 old_mip
= riscv_cpu_update_mip(cpu
, mask
, (new_val
& mask
));
1721 if (csrno
!= CSR_HVIP
) {
1722 gin
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
1723 old_mip
|= (env
->hgeip
& ((target_ulong
)1 << gin
)) ? MIP_VSEIP
: 0;
1730 return RISCV_EXCP_NONE
;
1733 static RISCVException
rmw_mip(CPURISCVState
*env
, int csrno
,
1734 target_ulong
*ret_val
,
1735 target_ulong new_val
, target_ulong wr_mask
)
1740 ret
= rmw_mip64(env
, csrno
, &rval
, new_val
, wr_mask
);
1748 static RISCVException
rmw_miph(CPURISCVState
*env
, int csrno
,
1749 target_ulong
*ret_val
,
1750 target_ulong new_val
, target_ulong wr_mask
)
1755 ret
= rmw_mip64(env
, csrno
, &rval
,
1756 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1758 *ret_val
= rval
>> 32;
1764 /* Supervisor Trap Setup */
1765 static RISCVException
read_sstatus_i128(CPURISCVState
*env
, int csrno
,
1768 uint64_t mask
= sstatus_v1_10_mask
;
1769 uint64_t sstatus
= env
->mstatus
& mask
;
1770 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1771 mask
|= SSTATUS64_UXL
;
1774 *val
= int128_make128(sstatus
, add_status_sd(MXL_RV128
, sstatus
));
1775 return RISCV_EXCP_NONE
;
1778 static RISCVException
read_sstatus(CPURISCVState
*env
, int csrno
,
1781 target_ulong mask
= (sstatus_v1_10_mask
);
1782 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1783 mask
|= SSTATUS64_UXL
;
1785 /* TODO: Use SXL not MXL. */
1786 *val
= add_status_sd(riscv_cpu_mxl(env
), env
->mstatus
& mask
);
1787 return RISCV_EXCP_NONE
;
1790 static RISCVException
write_sstatus(CPURISCVState
*env
, int csrno
,
1793 target_ulong mask
= (sstatus_v1_10_mask
);
1795 if (env
->xl
!= MXL_RV32
|| env
->debugger
) {
1796 if ((val
& SSTATUS64_UXL
) != 0) {
1797 mask
|= SSTATUS64_UXL
;
1800 target_ulong newval
= (env
->mstatus
& ~mask
) | (val
& mask
);
1801 return write_mstatus(env
, CSR_MSTATUS
, newval
);
1804 static RISCVException
rmw_vsie64(CPURISCVState
*env
, int csrno
,
1806 uint64_t new_val
, uint64_t wr_mask
)
1809 uint64_t rval
, vsbits
, mask
= env
->hideleg
& VS_MODE_INTERRUPTS
;
1811 /* Bring VS-level bits to correct position */
1812 vsbits
= new_val
& (VS_MODE_INTERRUPTS
>> 1);
1813 new_val
&= ~(VS_MODE_INTERRUPTS
>> 1);
1814 new_val
|= vsbits
<< 1;
1815 vsbits
= wr_mask
& (VS_MODE_INTERRUPTS
>> 1);
1816 wr_mask
&= ~(VS_MODE_INTERRUPTS
>> 1);
1817 wr_mask
|= vsbits
<< 1;
1819 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
& mask
);
1822 vsbits
= rval
& VS_MODE_INTERRUPTS
;
1823 rval
&= ~VS_MODE_INTERRUPTS
;
1824 *ret_val
= rval
| (vsbits
>> 1);
1830 static RISCVException
rmw_vsie(CPURISCVState
*env
, int csrno
,
1831 target_ulong
*ret_val
,
1832 target_ulong new_val
, target_ulong wr_mask
)
1837 ret
= rmw_vsie64(env
, csrno
, &rval
, new_val
, wr_mask
);
1845 static RISCVException
rmw_vsieh(CPURISCVState
*env
, int csrno
,
1846 target_ulong
*ret_val
,
1847 target_ulong new_val
, target_ulong wr_mask
)
1852 ret
= rmw_vsie64(env
, csrno
, &rval
,
1853 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1855 *ret_val
= rval
>> 32;
1861 static RISCVException
rmw_sie64(CPURISCVState
*env
, int csrno
,
1863 uint64_t new_val
, uint64_t wr_mask
)
1866 uint64_t mask
= env
->mideleg
& S_MODE_INTERRUPTS
;
1868 if (riscv_cpu_virt_enabled(env
)) {
1869 if (env
->hvictl
& HVICTL_VTI
) {
1870 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
1872 ret
= rmw_vsie64(env
, CSR_VSIE
, ret_val
, new_val
, wr_mask
);
1874 ret
= rmw_mie64(env
, csrno
, ret_val
, new_val
, wr_mask
& mask
);
1884 static RISCVException
rmw_sie(CPURISCVState
*env
, int csrno
,
1885 target_ulong
*ret_val
,
1886 target_ulong new_val
, target_ulong wr_mask
)
1891 ret
= rmw_sie64(env
, csrno
, &rval
, new_val
, wr_mask
);
1892 if (ret
== RISCV_EXCP_NONE
&& ret_val
) {
1899 static RISCVException
rmw_sieh(CPURISCVState
*env
, int csrno
,
1900 target_ulong
*ret_val
,
1901 target_ulong new_val
, target_ulong wr_mask
)
1906 ret
= rmw_sie64(env
, csrno
, &rval
,
1907 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
1909 *ret_val
= rval
>> 32;
1915 static RISCVException
read_stvec(CPURISCVState
*env
, int csrno
,
1919 return RISCV_EXCP_NONE
;
1922 static RISCVException
write_stvec(CPURISCVState
*env
, int csrno
,
1925 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1926 if ((val
& 3) < 2) {
1929 qemu_log_mask(LOG_UNIMP
, "CSR_STVEC: reserved mode not supported\n");
1931 return RISCV_EXCP_NONE
;
1934 static RISCVException
read_scounteren(CPURISCVState
*env
, int csrno
,
1937 *val
= env
->scounteren
;
1938 return RISCV_EXCP_NONE
;
1941 static RISCVException
write_scounteren(CPURISCVState
*env
, int csrno
,
1944 env
->scounteren
= val
;
1945 return RISCV_EXCP_NONE
;
1948 /* Supervisor Trap Handling */
1949 static RISCVException
read_sscratch_i128(CPURISCVState
*env
, int csrno
,
1952 *val
= int128_make128(env
->sscratch
, env
->sscratchh
);
1953 return RISCV_EXCP_NONE
;
1956 static RISCVException
write_sscratch_i128(CPURISCVState
*env
, int csrno
,
1959 env
->sscratch
= int128_getlo(val
);
1960 env
->sscratchh
= int128_gethi(val
);
1961 return RISCV_EXCP_NONE
;
1964 static RISCVException
read_sscratch(CPURISCVState
*env
, int csrno
,
1967 *val
= env
->sscratch
;
1968 return RISCV_EXCP_NONE
;
1971 static RISCVException
write_sscratch(CPURISCVState
*env
, int csrno
,
1974 env
->sscratch
= val
;
1975 return RISCV_EXCP_NONE
;
1978 static RISCVException
read_sepc(CPURISCVState
*env
, int csrno
,
1982 return RISCV_EXCP_NONE
;
1985 static RISCVException
write_sepc(CPURISCVState
*env
, int csrno
,
1989 return RISCV_EXCP_NONE
;
1992 static RISCVException
read_scause(CPURISCVState
*env
, int csrno
,
1996 return RISCV_EXCP_NONE
;
1999 static RISCVException
write_scause(CPURISCVState
*env
, int csrno
,
2003 return RISCV_EXCP_NONE
;
2006 static RISCVException
read_stval(CPURISCVState
*env
, int csrno
,
2010 return RISCV_EXCP_NONE
;
2013 static RISCVException
write_stval(CPURISCVState
*env
, int csrno
,
2017 return RISCV_EXCP_NONE
;
2020 static RISCVException
rmw_vsip64(CPURISCVState
*env
, int csrno
,
2022 uint64_t new_val
, uint64_t wr_mask
)
2025 uint64_t rval
, vsbits
, mask
= env
->hideleg
& vsip_writable_mask
;
2027 /* Bring VS-level bits to correct position */
2028 vsbits
= new_val
& (VS_MODE_INTERRUPTS
>> 1);
2029 new_val
&= ~(VS_MODE_INTERRUPTS
>> 1);
2030 new_val
|= vsbits
<< 1;
2031 vsbits
= wr_mask
& (VS_MODE_INTERRUPTS
>> 1);
2032 wr_mask
&= ~(VS_MODE_INTERRUPTS
>> 1);
2033 wr_mask
|= vsbits
<< 1;
2035 ret
= rmw_mip64(env
, csrno
, &rval
, new_val
, wr_mask
& mask
);
2038 vsbits
= rval
& VS_MODE_INTERRUPTS
;
2039 rval
&= ~VS_MODE_INTERRUPTS
;
2040 *ret_val
= rval
| (vsbits
>> 1);
2046 static RISCVException
rmw_vsip(CPURISCVState
*env
, int csrno
,
2047 target_ulong
*ret_val
,
2048 target_ulong new_val
, target_ulong wr_mask
)
2053 ret
= rmw_vsip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2061 static RISCVException
rmw_vsiph(CPURISCVState
*env
, int csrno
,
2062 target_ulong
*ret_val
,
2063 target_ulong new_val
, target_ulong wr_mask
)
2068 ret
= rmw_vsip64(env
, csrno
, &rval
,
2069 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2071 *ret_val
= rval
>> 32;
2077 static RISCVException
rmw_sip64(CPURISCVState
*env
, int csrno
,
2079 uint64_t new_val
, uint64_t wr_mask
)
2082 uint64_t mask
= env
->mideleg
& sip_writable_mask
;
2084 if (riscv_cpu_virt_enabled(env
)) {
2085 if (env
->hvictl
& HVICTL_VTI
) {
2086 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
2088 ret
= rmw_vsip64(env
, CSR_VSIP
, ret_val
, new_val
, wr_mask
);
2090 ret
= rmw_mip64(env
, csrno
, ret_val
, new_val
, wr_mask
& mask
);
2094 *ret_val
&= env
->mideleg
& S_MODE_INTERRUPTS
;
2100 static RISCVException
rmw_sip(CPURISCVState
*env
, int csrno
,
2101 target_ulong
*ret_val
,
2102 target_ulong new_val
, target_ulong wr_mask
)
2107 ret
= rmw_sip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2115 static RISCVException
rmw_siph(CPURISCVState
*env
, int csrno
,
2116 target_ulong
*ret_val
,
2117 target_ulong new_val
, target_ulong wr_mask
)
2122 ret
= rmw_sip64(env
, csrno
, &rval
,
2123 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2125 *ret_val
= rval
>> 32;
2131 /* Supervisor Protection and Translation */
2132 static RISCVException
read_satp(CPURISCVState
*env
, int csrno
,
2135 if (!riscv_feature(env
, RISCV_FEATURE_MMU
)) {
2137 return RISCV_EXCP_NONE
;
2140 if (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_TVM
)) {
2141 return RISCV_EXCP_ILLEGAL_INST
;
2146 return RISCV_EXCP_NONE
;
2149 static RISCVException
write_satp(CPURISCVState
*env
, int csrno
,
2152 target_ulong vm
, mask
;
2154 if (!riscv_feature(env
, RISCV_FEATURE_MMU
)) {
2155 return RISCV_EXCP_NONE
;
2158 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
2159 vm
= validate_vm(env
, get_field(val
, SATP32_MODE
));
2160 mask
= (val
^ env
->satp
) & (SATP32_MODE
| SATP32_ASID
| SATP32_PPN
);
2162 vm
= validate_vm(env
, get_field(val
, SATP64_MODE
));
2163 mask
= (val
^ env
->satp
) & (SATP64_MODE
| SATP64_ASID
| SATP64_PPN
);
2167 if (env
->priv
== PRV_S
&& get_field(env
->mstatus
, MSTATUS_TVM
)) {
2168 return RISCV_EXCP_ILLEGAL_INST
;
2171 * The ISA defines SATP.MODE=Bare as "no translation", but we still
2172 * pass these through QEMU's TLB emulation as it improves
2173 * performance. Flushing the TLB on SATP writes with paging
2174 * enabled avoids leaking those invalid cached mappings.
2176 tlb_flush(env_cpu(env
));
2180 return RISCV_EXCP_NONE
;
2183 static int read_vstopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2187 uint64_t vseip
, vsgein
;
2188 uint32_t iid
, iprio
, hviid
, hviprio
, gein
;
2189 uint32_t s
, scount
= 0, siid
[VSTOPI_NUM_SRCS
], siprio
[VSTOPI_NUM_SRCS
];
2191 gein
= get_field(env
->hstatus
, HSTATUS_VGEIN
);
2192 hviid
= get_field(env
->hvictl
, HVICTL_IID
);
2193 hviprio
= get_field(env
->hvictl
, HVICTL_IPRIO
);
2196 vsgein
= (env
->hgeip
& (1ULL << gein
)) ? MIP_VSEIP
: 0;
2197 vseip
= env
->mie
& (env
->mip
| vsgein
) & MIP_VSEIP
;
2198 if (gein
<= env
->geilen
&& vseip
) {
2199 siid
[scount
] = IRQ_S_EXT
;
2200 siprio
[scount
] = IPRIO_MMAXIPRIO
+ 1;
2201 if (env
->aia_ireg_rmw_fn
[PRV_S
]) {
2203 * Call machine specific IMSIC register emulation for
2206 ret
= env
->aia_ireg_rmw_fn
[PRV_S
](
2207 env
->aia_ireg_rmw_fn_arg
[PRV_S
],
2208 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI
, PRV_S
, true, gein
,
2209 riscv_cpu_mxl_bits(env
)),
2211 if (!ret
&& topei
) {
2212 siprio
[scount
] = topei
& IMSIC_TOPEI_IPRIO_MASK
;
2218 if (hviid
== IRQ_S_EXT
&& hviprio
) {
2219 siid
[scount
] = IRQ_S_EXT
;
2220 siprio
[scount
] = hviprio
;
2225 if (env
->hvictl
& HVICTL_VTI
) {
2226 if (hviid
!= IRQ_S_EXT
) {
2227 siid
[scount
] = hviid
;
2228 siprio
[scount
] = hviprio
;
2232 irq
= riscv_cpu_vsirq_pending(env
);
2233 if (irq
!= IRQ_S_EXT
&& 0 < irq
&& irq
<= 63) {
2235 siprio
[scount
] = env
->hviprio
[irq
];
2242 for (s
= 0; s
< scount
; s
++) {
2243 if (siprio
[s
] < iprio
) {
2250 if (env
->hvictl
& HVICTL_IPRIOM
) {
2251 if (iprio
> IPRIO_MMAXIPRIO
) {
2252 iprio
= IPRIO_MMAXIPRIO
;
2255 if (riscv_cpu_default_priority(iid
) > IPRIO_DEFAULT_S
) {
2256 iprio
= IPRIO_MMAXIPRIO
;
2266 *val
= (iid
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
2268 return RISCV_EXCP_NONE
;
2271 static int read_stopi(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2276 if (riscv_cpu_virt_enabled(env
)) {
2277 return read_vstopi(env
, CSR_VSTOPI
, val
);
2280 irq
= riscv_cpu_sirq_pending(env
);
2281 if (irq
<= 0 || irq
> 63) {
2284 iprio
= env
->siprio
[irq
];
2286 if (riscv_cpu_default_priority(irq
) > IPRIO_DEFAULT_S
) {
2287 iprio
= IPRIO_MMAXIPRIO
;
2290 *val
= (irq
& TOPI_IID_MASK
) << TOPI_IID_SHIFT
;
2294 return RISCV_EXCP_NONE
;
2297 /* Hypervisor Extensions */
2298 static RISCVException
read_hstatus(CPURISCVState
*env
, int csrno
,
2301 *val
= env
->hstatus
;
2302 if (riscv_cpu_mxl(env
) != MXL_RV32
) {
2303 /* We only support 64-bit VSXL */
2304 *val
= set_field(*val
, HSTATUS_VSXL
, 2);
2306 /* We only support little endian */
2307 *val
= set_field(*val
, HSTATUS_VSBE
, 0);
2308 return RISCV_EXCP_NONE
;
2311 static RISCVException
write_hstatus(CPURISCVState
*env
, int csrno
,
2315 if (riscv_cpu_mxl(env
) != MXL_RV32
&& get_field(val
, HSTATUS_VSXL
) != 2) {
2316 qemu_log_mask(LOG_UNIMP
, "QEMU does not support mixed HSXLEN options.");
2318 if (get_field(val
, HSTATUS_VSBE
) != 0) {
2319 qemu_log_mask(LOG_UNIMP
, "QEMU does not support big endian guests.");
2321 return RISCV_EXCP_NONE
;
2324 static RISCVException
read_hedeleg(CPURISCVState
*env
, int csrno
,
2327 *val
= env
->hedeleg
;
2328 return RISCV_EXCP_NONE
;
2331 static RISCVException
write_hedeleg(CPURISCVState
*env
, int csrno
,
2334 env
->hedeleg
= val
& vs_delegable_excps
;
2335 return RISCV_EXCP_NONE
;
2338 static RISCVException
rmw_hideleg64(CPURISCVState
*env
, int csrno
,
2340 uint64_t new_val
, uint64_t wr_mask
)
2342 uint64_t mask
= wr_mask
& vs_delegable_ints
;
2345 *ret_val
= env
->hideleg
& vs_delegable_ints
;
2348 env
->hideleg
= (env
->hideleg
& ~mask
) | (new_val
& mask
);
2349 return RISCV_EXCP_NONE
;
2352 static RISCVException
rmw_hideleg(CPURISCVState
*env
, int csrno
,
2353 target_ulong
*ret_val
,
2354 target_ulong new_val
, target_ulong wr_mask
)
2359 ret
= rmw_hideleg64(env
, csrno
, &rval
, new_val
, wr_mask
);
2367 static RISCVException
rmw_hidelegh(CPURISCVState
*env
, int csrno
,
2368 target_ulong
*ret_val
,
2369 target_ulong new_val
, target_ulong wr_mask
)
2374 ret
= rmw_hideleg64(env
, csrno
, &rval
,
2375 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2377 *ret_val
= rval
>> 32;
2383 static RISCVException
rmw_hvip64(CPURISCVState
*env
, int csrno
,
2385 uint64_t new_val
, uint64_t wr_mask
)
2389 ret
= rmw_mip64(env
, csrno
, ret_val
, new_val
,
2390 wr_mask
& hvip_writable_mask
);
2392 *ret_val
&= VS_MODE_INTERRUPTS
;
2398 static RISCVException
rmw_hvip(CPURISCVState
*env
, int csrno
,
2399 target_ulong
*ret_val
,
2400 target_ulong new_val
, target_ulong wr_mask
)
2405 ret
= rmw_hvip64(env
, csrno
, &rval
, new_val
, wr_mask
);
2413 static RISCVException
rmw_hviph(CPURISCVState
*env
, int csrno
,
2414 target_ulong
*ret_val
,
2415 target_ulong new_val
, target_ulong wr_mask
)
2420 ret
= rmw_hvip64(env
, csrno
, &rval
,
2421 ((uint64_t)new_val
) << 32, ((uint64_t)wr_mask
) << 32);
2423 *ret_val
= rval
>> 32;
2429 static RISCVException
rmw_hip(CPURISCVState
*env
, int csrno
,
2430 target_ulong
*ret_value
,
2431 target_ulong new_value
, target_ulong write_mask
)
2433 int ret
= rmw_mip(env
, csrno
, ret_value
, new_value
,
2434 write_mask
& hip_writable_mask
);
2437 *ret_value
&= HS_MODE_INTERRUPTS
;
2442 static RISCVException
rmw_hie(CPURISCVState
*env
, int csrno
,
2443 target_ulong
*ret_val
,
2444 target_ulong new_val
, target_ulong wr_mask
)
2449 ret
= rmw_mie64(env
, csrno
, &rval
, new_val
, wr_mask
& HS_MODE_INTERRUPTS
);
2451 *ret_val
= rval
& HS_MODE_INTERRUPTS
;
2457 static RISCVException
read_hcounteren(CPURISCVState
*env
, int csrno
,
2460 *val
= env
->hcounteren
;
2461 return RISCV_EXCP_NONE
;
2464 static RISCVException
write_hcounteren(CPURISCVState
*env
, int csrno
,
2467 env
->hcounteren
= val
;
2468 return RISCV_EXCP_NONE
;
2471 static RISCVException
read_hgeie(CPURISCVState
*env
, int csrno
,
2477 return RISCV_EXCP_NONE
;
2480 static RISCVException
write_hgeie(CPURISCVState
*env
, int csrno
,
2483 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2484 val
&= ((((target_ulong
)1) << env
->geilen
) - 1) << 1;
2486 /* Update mip.SGEIP bit */
2487 riscv_cpu_update_mip(env_archcpu(env
), MIP_SGEIP
,
2488 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
2489 return RISCV_EXCP_NONE
;
2492 static RISCVException
read_htval(CPURISCVState
*env
, int csrno
,
2496 return RISCV_EXCP_NONE
;
2499 static RISCVException
write_htval(CPURISCVState
*env
, int csrno
,
2503 return RISCV_EXCP_NONE
;
2506 static RISCVException
read_htinst(CPURISCVState
*env
, int csrno
,
2510 return RISCV_EXCP_NONE
;
2513 static RISCVException
write_htinst(CPURISCVState
*env
, int csrno
,
2516 return RISCV_EXCP_NONE
;
2519 static RISCVException
read_hgeip(CPURISCVState
*env
, int csrno
,
2525 return RISCV_EXCP_NONE
;
2528 static RISCVException
read_hgatp(CPURISCVState
*env
, int csrno
,
2532 return RISCV_EXCP_NONE
;
2535 static RISCVException
write_hgatp(CPURISCVState
*env
, int csrno
,
2539 return RISCV_EXCP_NONE
;
2542 static RISCVException
read_htimedelta(CPURISCVState
*env
, int csrno
,
2545 if (!env
->rdtime_fn
) {
2546 return RISCV_EXCP_ILLEGAL_INST
;
2549 *val
= env
->htimedelta
;
2550 return RISCV_EXCP_NONE
;
2553 static RISCVException
write_htimedelta(CPURISCVState
*env
, int csrno
,
2556 if (!env
->rdtime_fn
) {
2557 return RISCV_EXCP_ILLEGAL_INST
;
2560 if (riscv_cpu_mxl(env
) == MXL_RV32
) {
2561 env
->htimedelta
= deposit64(env
->htimedelta
, 0, 32, (uint64_t)val
);
2563 env
->htimedelta
= val
;
2565 return RISCV_EXCP_NONE
;
2568 static RISCVException
read_htimedeltah(CPURISCVState
*env
, int csrno
,
2571 if (!env
->rdtime_fn
) {
2572 return RISCV_EXCP_ILLEGAL_INST
;
2575 *val
= env
->htimedelta
>> 32;
2576 return RISCV_EXCP_NONE
;
2579 static RISCVException
write_htimedeltah(CPURISCVState
*env
, int csrno
,
2582 if (!env
->rdtime_fn
) {
2583 return RISCV_EXCP_ILLEGAL_INST
;
2586 env
->htimedelta
= deposit64(env
->htimedelta
, 32, 32, (uint64_t)val
);
2587 return RISCV_EXCP_NONE
;
2590 static int read_hvictl(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2593 return RISCV_EXCP_NONE
;
2596 static int write_hvictl(CPURISCVState
*env
, int csrno
, target_ulong val
)
2598 env
->hvictl
= val
& HVICTL_VALID_MASK
;
2599 return RISCV_EXCP_NONE
;
2602 static int read_hvipriox(CPURISCVState
*env
, int first_index
,
2603 uint8_t *iprio
, target_ulong
*val
)
2605 int i
, irq
, rdzero
, num_irqs
= 4 * (riscv_cpu_mxl_bits(env
) / 32);
2607 /* First index has to be a multiple of number of irqs per register */
2608 if (first_index
% num_irqs
) {
2609 return (riscv_cpu_virt_enabled(env
)) ?
2610 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
2613 /* Fill-up return value */
2615 for (i
= 0; i
< num_irqs
; i
++) {
2616 if (riscv_cpu_hviprio_index2irq(first_index
+ i
, &irq
, &rdzero
)) {
2622 *val
|= ((target_ulong
)iprio
[irq
]) << (i
* 8);
2625 return RISCV_EXCP_NONE
;
2628 static int write_hvipriox(CPURISCVState
*env
, int first_index
,
2629 uint8_t *iprio
, target_ulong val
)
2631 int i
, irq
, rdzero
, num_irqs
= 4 * (riscv_cpu_mxl_bits(env
) / 32);
2633 /* First index has to be a multiple of number of irqs per register */
2634 if (first_index
% num_irqs
) {
2635 return (riscv_cpu_virt_enabled(env
)) ?
2636 RISCV_EXCP_VIRT_INSTRUCTION_FAULT
: RISCV_EXCP_ILLEGAL_INST
;
2639 /* Fill-up priority arrary */
2640 for (i
= 0; i
< num_irqs
; i
++) {
2641 if (riscv_cpu_hviprio_index2irq(first_index
+ i
, &irq
, &rdzero
)) {
2647 iprio
[irq
] = (val
>> (i
* 8)) & 0xff;
2651 return RISCV_EXCP_NONE
;
2654 static int read_hviprio1(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2656 return read_hvipriox(env
, 0, env
->hviprio
, val
);
2659 static int write_hviprio1(CPURISCVState
*env
, int csrno
, target_ulong val
)
2661 return write_hvipriox(env
, 0, env
->hviprio
, val
);
2664 static int read_hviprio1h(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2666 return read_hvipriox(env
, 4, env
->hviprio
, val
);
2669 static int write_hviprio1h(CPURISCVState
*env
, int csrno
, target_ulong val
)
2671 return write_hvipriox(env
, 4, env
->hviprio
, val
);
2674 static int read_hviprio2(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2676 return read_hvipriox(env
, 8, env
->hviprio
, val
);
2679 static int write_hviprio2(CPURISCVState
*env
, int csrno
, target_ulong val
)
2681 return write_hvipriox(env
, 8, env
->hviprio
, val
);
2684 static int read_hviprio2h(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2686 return read_hvipriox(env
, 12, env
->hviprio
, val
);
2689 static int write_hviprio2h(CPURISCVState
*env
, int csrno
, target_ulong val
)
2691 return write_hvipriox(env
, 12, env
->hviprio
, val
);
2694 /* Virtual CSR Registers */
2695 static RISCVException
read_vsstatus(CPURISCVState
*env
, int csrno
,
2698 *val
= env
->vsstatus
;
2699 return RISCV_EXCP_NONE
;
2702 static RISCVException
write_vsstatus(CPURISCVState
*env
, int csrno
,
2705 uint64_t mask
= (target_ulong
)-1;
2706 if ((val
& VSSTATUS64_UXL
) == 0) {
2707 mask
&= ~VSSTATUS64_UXL
;
2709 env
->vsstatus
= (env
->vsstatus
& ~mask
) | (uint64_t)val
;
2710 return RISCV_EXCP_NONE
;
2713 static int read_vstvec(CPURISCVState
*env
, int csrno
, target_ulong
*val
)
2716 return RISCV_EXCP_NONE
;
2719 static RISCVException
write_vstvec(CPURISCVState
*env
, int csrno
,
2723 return RISCV_EXCP_NONE
;
2726 static RISCVException
read_vsscratch(CPURISCVState
*env
, int csrno
,
2729 *val
= env
->vsscratch
;
2730 return RISCV_EXCP_NONE
;
2733 static RISCVException
write_vsscratch(CPURISCVState
*env
, int csrno
,
2736 env
->vsscratch
= val
;
2737 return RISCV_EXCP_NONE
;
2740 static RISCVException
read_vsepc(CPURISCVState
*env
, int csrno
,
2744 return RISCV_EXCP_NONE
;
2747 static RISCVException
write_vsepc(CPURISCVState
*env
, int csrno
,
2751 return RISCV_EXCP_NONE
;
2754 static RISCVException
read_vscause(CPURISCVState
*env
, int csrno
,
2757 *val
= env
->vscause
;
2758 return RISCV_EXCP_NONE
;
2761 static RISCVException
write_vscause(CPURISCVState
*env
, int csrno
,
2765 return RISCV_EXCP_NONE
;
2768 static RISCVException
read_vstval(CPURISCVState
*env
, int csrno
,
2772 return RISCV_EXCP_NONE
;
2775 static RISCVException
write_vstval(CPURISCVState
*env
, int csrno
,
2779 return RISCV_EXCP_NONE
;
2782 static RISCVException
read_vsatp(CPURISCVState
*env
, int csrno
,
2786 return RISCV_EXCP_NONE
;
2789 static RISCVException
write_vsatp(CPURISCVState
*env
, int csrno
,
2793 return RISCV_EXCP_NONE
;
2796 static RISCVException
read_mtval2(CPURISCVState
*env
, int csrno
,
2800 return RISCV_EXCP_NONE
;
2803 static RISCVException
write_mtval2(CPURISCVState
*env
, int csrno
,
2807 return RISCV_EXCP_NONE
;
2810 static RISCVException
read_mtinst(CPURISCVState
*env
, int csrno
,
2814 return RISCV_EXCP_NONE
;
2817 static RISCVException
write_mtinst(CPURISCVState
*env
, int csrno
,
2821 return RISCV_EXCP_NONE
;
2824 /* Physical Memory Protection */
2825 static RISCVException
read_mseccfg(CPURISCVState
*env
, int csrno
,
2828 *val
= mseccfg_csr_read(env
);
2829 return RISCV_EXCP_NONE
;
2832 static RISCVException
write_mseccfg(CPURISCVState
*env
, int csrno
,
2835 mseccfg_csr_write(env
, val
);
2836 return RISCV_EXCP_NONE
;
2839 static bool check_pmp_reg_index(CPURISCVState
*env
, uint32_t reg_index
)
2841 /* TODO: RV128 restriction check */
2842 if ((reg_index
& 1) && (riscv_cpu_mxl(env
) == MXL_RV64
)) {
2848 static RISCVException
read_pmpcfg(CPURISCVState
*env
, int csrno
,
2851 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
2853 if (!check_pmp_reg_index(env
, reg_index
)) {
2854 return RISCV_EXCP_ILLEGAL_INST
;
2856 *val
= pmpcfg_csr_read(env
, csrno
- CSR_PMPCFG0
);
2857 return RISCV_EXCP_NONE
;
2860 static RISCVException
write_pmpcfg(CPURISCVState
*env
, int csrno
,
2863 uint32_t reg_index
= csrno
- CSR_PMPCFG0
;
2865 if (!check_pmp_reg_index(env
, reg_index
)) {
2866 return RISCV_EXCP_ILLEGAL_INST
;
2868 pmpcfg_csr_write(env
, csrno
- CSR_PMPCFG0
, val
);
2869 return RISCV_EXCP_NONE
;
2872 static RISCVException
read_pmpaddr(CPURISCVState
*env
, int csrno
,
2875 *val
= pmpaddr_csr_read(env
, csrno
- CSR_PMPADDR0
);
2876 return RISCV_EXCP_NONE
;
2879 static RISCVException
write_pmpaddr(CPURISCVState
*env
, int csrno
,
2882 pmpaddr_csr_write(env
, csrno
- CSR_PMPADDR0
, val
);
2883 return RISCV_EXCP_NONE
;
2886 static RISCVException
read_tselect(CPURISCVState
*env
, int csrno
,
2889 *val
= tselect_csr_read(env
);
2890 return RISCV_EXCP_NONE
;
2893 static RISCVException
write_tselect(CPURISCVState
*env
, int csrno
,
2896 tselect_csr_write(env
, val
);
2897 return RISCV_EXCP_NONE
;
2900 static RISCVException
read_tdata(CPURISCVState
*env
, int csrno
,
2903 /* return 0 in tdata1 to end the trigger enumeration */
2904 if (env
->trigger_cur
>= TRIGGER_NUM
&& csrno
== CSR_TDATA1
) {
2906 return RISCV_EXCP_NONE
;
2909 if (!tdata_available(env
, csrno
- CSR_TDATA1
)) {
2910 return RISCV_EXCP_ILLEGAL_INST
;
2913 *val
= tdata_csr_read(env
, csrno
- CSR_TDATA1
);
2914 return RISCV_EXCP_NONE
;
2917 static RISCVException
write_tdata(CPURISCVState
*env
, int csrno
,
2920 if (!tdata_available(env
, csrno
- CSR_TDATA1
)) {
2921 return RISCV_EXCP_ILLEGAL_INST
;
2924 tdata_csr_write(env
, csrno
- CSR_TDATA1
, val
);
2925 return RISCV_EXCP_NONE
;
2929 * Functions to access Pointer Masking feature registers
2930 * We have to check if current priv lvl could modify
2933 static bool check_pm_current_disabled(CPURISCVState
*env
, int csrno
)
2935 int csr_priv
= get_field(csrno
, 0x300);
2938 if (env
->debugger
) {
2942 * If priv lvls differ that means we're accessing csr from higher priv lvl,
2943 * so allow the access
2945 if (env
->priv
!= csr_priv
) {
2948 switch (env
->priv
) {
2950 pm_current
= get_field(env
->mmte
, M_PM_CURRENT
);
2953 pm_current
= get_field(env
->mmte
, S_PM_CURRENT
);
2956 pm_current
= get_field(env
->mmte
, U_PM_CURRENT
);
2959 g_assert_not_reached();
2961 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
2965 static RISCVException
read_mmte(CPURISCVState
*env
, int csrno
,
2968 *val
= env
->mmte
& MMTE_MASK
;
2969 return RISCV_EXCP_NONE
;
2972 static RISCVException
write_mmte(CPURISCVState
*env
, int csrno
,
2976 target_ulong wpri_val
= val
& MMTE_MASK
;
2978 if (val
!= wpri_val
) {
2979 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
2980 "MMTE: WPRI violation written 0x", val
,
2981 "vs expected 0x", wpri_val
);
2983 /* for machine mode pm.current is hardwired to 1 */
2984 wpri_val
|= MMTE_M_PM_CURRENT
;
2986 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
2987 wpri_val
&= ~(MMTE_M_PM_INSN
| MMTE_S_PM_INSN
| MMTE_U_PM_INSN
);
2988 env
->mmte
= wpri_val
| PM_EXT_DIRTY
;
2989 riscv_cpu_update_mask(env
);
2991 /* Set XS and SD bits, since PM CSRs are dirty */
2992 mstatus
= env
->mstatus
| MSTATUS_XS
;
2993 write_mstatus(env
, csrno
, mstatus
);
2994 return RISCV_EXCP_NONE
;
2997 static RISCVException
read_smte(CPURISCVState
*env
, int csrno
,
3000 *val
= env
->mmte
& SMTE_MASK
;
3001 return RISCV_EXCP_NONE
;
3004 static RISCVException
write_smte(CPURISCVState
*env
, int csrno
,
3007 target_ulong wpri_val
= val
& SMTE_MASK
;
3009 if (val
!= wpri_val
) {
3010 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
3011 "SMTE: WPRI violation written 0x", val
,
3012 "vs expected 0x", wpri_val
);
3015 /* if pm.current==0 we can't modify current PM CSRs */
3016 if (check_pm_current_disabled(env
, csrno
)) {
3017 return RISCV_EXCP_NONE
;
3020 wpri_val
|= (env
->mmte
& ~SMTE_MASK
);
3021 write_mmte(env
, csrno
, wpri_val
);
3022 return RISCV_EXCP_NONE
;
3025 static RISCVException
read_umte(CPURISCVState
*env
, int csrno
,
3028 *val
= env
->mmte
& UMTE_MASK
;
3029 return RISCV_EXCP_NONE
;
3032 static RISCVException
write_umte(CPURISCVState
*env
, int csrno
,
3035 target_ulong wpri_val
= val
& UMTE_MASK
;
3037 if (val
!= wpri_val
) {
3038 qemu_log_mask(LOG_GUEST_ERROR
, "%s" TARGET_FMT_lx
" %s" TARGET_FMT_lx
"\n",
3039 "UMTE: WPRI violation written 0x", val
,
3040 "vs expected 0x", wpri_val
);
3043 if (check_pm_current_disabled(env
, csrno
)) {
3044 return RISCV_EXCP_NONE
;
3047 wpri_val
|= (env
->mmte
& ~UMTE_MASK
);
3048 write_mmte(env
, csrno
, wpri_val
);
3049 return RISCV_EXCP_NONE
;
3052 static RISCVException
read_mpmmask(CPURISCVState
*env
, int csrno
,
3055 *val
= env
->mpmmask
;
3056 return RISCV_EXCP_NONE
;
3059 static RISCVException
write_mpmmask(CPURISCVState
*env
, int csrno
,
3065 if ((env
->priv
== PRV_M
) && (env
->mmte
& M_PM_ENABLE
)) {
3066 env
->cur_pmmask
= val
;
3068 env
->mmte
|= PM_EXT_DIRTY
;
3070 /* Set XS and SD bits, since PM CSRs are dirty */
3071 mstatus
= env
->mstatus
| MSTATUS_XS
;
3072 write_mstatus(env
, csrno
, mstatus
);
3073 return RISCV_EXCP_NONE
;
3076 static RISCVException
read_spmmask(CPURISCVState
*env
, int csrno
,
3079 *val
= env
->spmmask
;
3080 return RISCV_EXCP_NONE
;
3083 static RISCVException
write_spmmask(CPURISCVState
*env
, int csrno
,
3088 /* if pm.current==0 we can't modify current PM CSRs */
3089 if (check_pm_current_disabled(env
, csrno
)) {
3090 return RISCV_EXCP_NONE
;
3093 if ((env
->priv
== PRV_S
) && (env
->mmte
& S_PM_ENABLE
)) {
3094 env
->cur_pmmask
= val
;
3096 env
->mmte
|= PM_EXT_DIRTY
;
3098 /* Set XS and SD bits, since PM CSRs are dirty */
3099 mstatus
= env
->mstatus
| MSTATUS_XS
;
3100 write_mstatus(env
, csrno
, mstatus
);
3101 return RISCV_EXCP_NONE
;
3104 static RISCVException
read_upmmask(CPURISCVState
*env
, int csrno
,
3107 *val
= env
->upmmask
;
3108 return RISCV_EXCP_NONE
;
3111 static RISCVException
write_upmmask(CPURISCVState
*env
, int csrno
,
3116 /* if pm.current==0 we can't modify current PM CSRs */
3117 if (check_pm_current_disabled(env
, csrno
)) {
3118 return RISCV_EXCP_NONE
;
3121 if ((env
->priv
== PRV_U
) && (env
->mmte
& U_PM_ENABLE
)) {
3122 env
->cur_pmmask
= val
;
3124 env
->mmte
|= PM_EXT_DIRTY
;
3126 /* Set XS and SD bits, since PM CSRs are dirty */
3127 mstatus
= env
->mstatus
| MSTATUS_XS
;
3128 write_mstatus(env
, csrno
, mstatus
);
3129 return RISCV_EXCP_NONE
;
3132 static RISCVException
read_mpmbase(CPURISCVState
*env
, int csrno
,
3135 *val
= env
->mpmbase
;
3136 return RISCV_EXCP_NONE
;
3139 static RISCVException
write_mpmbase(CPURISCVState
*env
, int csrno
,
3145 if ((env
->priv
== PRV_M
) && (env
->mmte
& M_PM_ENABLE
)) {
3146 env
->cur_pmbase
= val
;
3148 env
->mmte
|= PM_EXT_DIRTY
;
3150 /* Set XS and SD bits, since PM CSRs are dirty */
3151 mstatus
= env
->mstatus
| MSTATUS_XS
;
3152 write_mstatus(env
, csrno
, mstatus
);
3153 return RISCV_EXCP_NONE
;
3156 static RISCVException
read_spmbase(CPURISCVState
*env
, int csrno
,
3159 *val
= env
->spmbase
;
3160 return RISCV_EXCP_NONE
;
3163 static RISCVException
write_spmbase(CPURISCVState
*env
, int csrno
,
3168 /* if pm.current==0 we can't modify current PM CSRs */
3169 if (check_pm_current_disabled(env
, csrno
)) {
3170 return RISCV_EXCP_NONE
;
3173 if ((env
->priv
== PRV_S
) && (env
->mmte
& S_PM_ENABLE
)) {
3174 env
->cur_pmbase
= val
;
3176 env
->mmte
|= PM_EXT_DIRTY
;
3178 /* Set XS and SD bits, since PM CSRs are dirty */
3179 mstatus
= env
->mstatus
| MSTATUS_XS
;
3180 write_mstatus(env
, csrno
, mstatus
);
3181 return RISCV_EXCP_NONE
;
3184 static RISCVException
read_upmbase(CPURISCVState
*env
, int csrno
,
3187 *val
= env
->upmbase
;
3188 return RISCV_EXCP_NONE
;
3191 static RISCVException
write_upmbase(CPURISCVState
*env
, int csrno
,
3196 /* if pm.current==0 we can't modify current PM CSRs */
3197 if (check_pm_current_disabled(env
, csrno
)) {
3198 return RISCV_EXCP_NONE
;
3201 if ((env
->priv
== PRV_U
) && (env
->mmte
& U_PM_ENABLE
)) {
3202 env
->cur_pmbase
= val
;
3204 env
->mmte
|= PM_EXT_DIRTY
;
3206 /* Set XS and SD bits, since PM CSRs are dirty */
3207 mstatus
= env
->mstatus
| MSTATUS_XS
;
3208 write_mstatus(env
, csrno
, mstatus
);
3209 return RISCV_EXCP_NONE
;
3214 /* Crypto Extension */
3215 static RISCVException
rmw_seed(CPURISCVState
*env
, int csrno
,
3216 target_ulong
*ret_value
,
3217 target_ulong new_value
,
3218 target_ulong write_mask
)
3221 Error
*random_e
= NULL
;
3225 random_r
= qemu_guest_getrandom(&random_v
, 2, &random_e
);
3226 if (unlikely(random_r
< 0)) {
3228 * Failed, for unknown reasons in the crypto subsystem.
3229 * The best we can do is log the reason and return a
3230 * failure indication to the guest. There is no reason
3231 * we know to expect the failure to be transitory, so
3232 * indicate DEAD to avoid having the guest spin on WAIT.
3234 qemu_log_mask(LOG_UNIMP
, "%s: Crypto failure: %s",
3235 __func__
, error_get_pretty(random_e
));
3236 error_free(random_e
);
3237 rval
= SEED_OPST_DEAD
;
3239 rval
= random_v
| SEED_OPST_ES16
;
3246 return RISCV_EXCP_NONE
;
3250 * riscv_csrrw - read and/or update control and status register
3252 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
3253 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
3254 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
3255 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
3258 static inline RISCVException
riscv_csrrw_check(CPURISCVState
*env
,
3263 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
3264 int read_only
= get_field(csrno
, 0xC00) == 3;
3265 int csr_min_priv
= csr_ops
[csrno
].min_priv_ver
;
3266 #if !defined(CONFIG_USER_ONLY)
3267 int csr_priv
, effective_priv
= env
->priv
;
3269 if (riscv_has_ext(env
, RVH
) && env
->priv
== PRV_S
) {
3271 * We are in either HS or VS mode.
3272 * Add 1 to the effective privledge level to allow us to access the
3273 * Hypervisor CSRs. The `hmode` predicate will determine if access
3274 * should be allowed(HS) or if a virtual instruction exception should be
3280 csr_priv
= get_field(csrno
, 0x300);
3281 if (!env
->debugger
&& (effective_priv
< csr_priv
)) {
3282 if (csr_priv
== (PRV_S
+ 1) && riscv_cpu_virt_enabled(env
)) {
3283 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT
;
3285 return RISCV_EXCP_ILLEGAL_INST
;
3288 if (write_mask
&& read_only
) {
3289 return RISCV_EXCP_ILLEGAL_INST
;
3292 /* ensure the CSR extension is enabled. */
3293 if (!cpu
->cfg
.ext_icsr
) {
3294 return RISCV_EXCP_ILLEGAL_INST
;
3297 /* check predicate */
3298 if (!csr_ops
[csrno
].predicate
) {
3299 return RISCV_EXCP_ILLEGAL_INST
;
3302 if (env
->priv_ver
< csr_min_priv
) {
3303 return RISCV_EXCP_ILLEGAL_INST
;
3306 return csr_ops
[csrno
].predicate(env
, csrno
);
3309 static RISCVException
riscv_csrrw_do64(CPURISCVState
*env
, int csrno
,
3310 target_ulong
*ret_value
,
3311 target_ulong new_value
,
3312 target_ulong write_mask
)
3315 target_ulong old_value
;
3317 /* execute combined read/write operation if it exists */
3318 if (csr_ops
[csrno
].op
) {
3319 return csr_ops
[csrno
].op(env
, csrno
, ret_value
, new_value
, write_mask
);
3322 /* if no accessor exists then return failure */
3323 if (!csr_ops
[csrno
].read
) {
3324 return RISCV_EXCP_ILLEGAL_INST
;
3326 /* read old value */
3327 ret
= csr_ops
[csrno
].read(env
, csrno
, &old_value
);
3328 if (ret
!= RISCV_EXCP_NONE
) {
3332 /* write value if writable and write mask set, otherwise drop writes */
3334 new_value
= (old_value
& ~write_mask
) | (new_value
& write_mask
);
3335 if (csr_ops
[csrno
].write
) {
3336 ret
= csr_ops
[csrno
].write(env
, csrno
, new_value
);
3337 if (ret
!= RISCV_EXCP_NONE
) {
3343 /* return old value */
3345 *ret_value
= old_value
;
3348 return RISCV_EXCP_NONE
;
3351 RISCVException
riscv_csrrw(CPURISCVState
*env
, int csrno
,
3352 target_ulong
*ret_value
,
3353 target_ulong new_value
, target_ulong write_mask
)
3355 RISCVCPU
*cpu
= env_archcpu(env
);
3357 RISCVException ret
= riscv_csrrw_check(env
, csrno
, write_mask
, cpu
);
3358 if (ret
!= RISCV_EXCP_NONE
) {
3362 return riscv_csrrw_do64(env
, csrno
, ret_value
, new_value
, write_mask
);
3365 static RISCVException
riscv_csrrw_do128(CPURISCVState
*env
, int csrno
,
3373 /* read old value */
3374 ret
= csr_ops
[csrno
].read128(env
, csrno
, &old_value
);
3375 if (ret
!= RISCV_EXCP_NONE
) {
3379 /* write value if writable and write mask set, otherwise drop writes */
3380 if (int128_nz(write_mask
)) {
3381 new_value
= int128_or(int128_and(old_value
, int128_not(write_mask
)),
3382 int128_and(new_value
, write_mask
));
3383 if (csr_ops
[csrno
].write128
) {
3384 ret
= csr_ops
[csrno
].write128(env
, csrno
, new_value
);
3385 if (ret
!= RISCV_EXCP_NONE
) {
3388 } else if (csr_ops
[csrno
].write
) {
3389 /* avoids having to write wrappers for all registers */
3390 ret
= csr_ops
[csrno
].write(env
, csrno
, int128_getlo(new_value
));
3391 if (ret
!= RISCV_EXCP_NONE
) {
3397 /* return old value */
3399 *ret_value
= old_value
;
3402 return RISCV_EXCP_NONE
;
3405 RISCVException
riscv_csrrw_i128(CPURISCVState
*env
, int csrno
,
3407 Int128 new_value
, Int128 write_mask
)
3410 RISCVCPU
*cpu
= env_archcpu(env
);
3412 ret
= riscv_csrrw_check(env
, csrno
, int128_nz(write_mask
), cpu
);
3413 if (ret
!= RISCV_EXCP_NONE
) {
3417 if (csr_ops
[csrno
].read128
) {
3418 return riscv_csrrw_do128(env
, csrno
, ret_value
, new_value
, write_mask
);
3422 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3424 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3425 * significant), for those, this fallback is correctly handling the accesses
3427 target_ulong old_value
;
3428 ret
= riscv_csrrw_do64(env
, csrno
, &old_value
,
3429 int128_getlo(new_value
),
3430 int128_getlo(write_mask
));
3431 if (ret
== RISCV_EXCP_NONE
&& ret_value
) {
3432 *ret_value
= int128_make64(old_value
);
3438 * Debugger support. If not in user mode, set env->debugger before the
3439 * riscv_csrrw call and clear it after the call.
3441 RISCVException
riscv_csrrw_debug(CPURISCVState
*env
, int csrno
,
3442 target_ulong
*ret_value
,
3443 target_ulong new_value
,
3444 target_ulong write_mask
)
3447 #if !defined(CONFIG_USER_ONLY)
3448 env
->debugger
= true;
3450 ret
= riscv_csrrw(env
, csrno
, ret_value
, new_value
, write_mask
);
3451 #if !defined(CONFIG_USER_ONLY)
3452 env
->debugger
= false;
3457 /* Control and Status Register function table */
3458 riscv_csr_operations csr_ops
[CSR_TABLE_SIZE
] = {
3459 /* User Floating-Point CSRs */
3460 [CSR_FFLAGS
] = { "fflags", fs
, read_fflags
, write_fflags
},
3461 [CSR_FRM
] = { "frm", fs
, read_frm
, write_frm
},
3462 [CSR_FCSR
] = { "fcsr", fs
, read_fcsr
, write_fcsr
},
3464 [CSR_VSTART
] = { "vstart", vs
, read_vstart
, write_vstart
,
3465 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3466 [CSR_VXSAT
] = { "vxsat", vs
, read_vxsat
, write_vxsat
,
3467 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3468 [CSR_VXRM
] = { "vxrm", vs
, read_vxrm
, write_vxrm
,
3469 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3470 [CSR_VCSR
] = { "vcsr", vs
, read_vcsr
, write_vcsr
,
3471 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3472 [CSR_VL
] = { "vl", vs
, read_vl
,
3473 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3474 [CSR_VTYPE
] = { "vtype", vs
, read_vtype
,
3475 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3476 [CSR_VLENB
] = { "vlenb", vs
, read_vlenb
,
3477 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3478 /* User Timers and Counters */
3479 [CSR_CYCLE
] = { "cycle", ctr
, read_hpmcounter
},
3480 [CSR_INSTRET
] = { "instret", ctr
, read_hpmcounter
},
3481 [CSR_CYCLEH
] = { "cycleh", ctr32
, read_hpmcounterh
},
3482 [CSR_INSTRETH
] = { "instreth", ctr32
, read_hpmcounterh
},
3485 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3486 * rdtime callback is not provided by machine/platform emulation.
3488 [CSR_TIME
] = { "time", ctr
, read_time
},
3489 [CSR_TIMEH
] = { "timeh", ctr32
, read_timeh
},
3491 /* Crypto Extension */
3492 [CSR_SEED
] = { "seed", seed
, NULL
, NULL
, rmw_seed
},
3494 #if !defined(CONFIG_USER_ONLY)
3495 /* Machine Timers and Counters */
3496 [CSR_MCYCLE
] = { "mcycle", any
, read_hpmcounter
, write_mhpmcounter
},
3497 [CSR_MINSTRET
] = { "minstret", any
, read_hpmcounter
, write_mhpmcounter
},
3498 [CSR_MCYCLEH
] = { "mcycleh", any32
, read_hpmcounterh
, write_mhpmcounterh
},
3499 [CSR_MINSTRETH
] = { "minstreth", any32
, read_hpmcounterh
, write_mhpmcounterh
},
3501 /* Machine Information Registers */
3502 [CSR_MVENDORID
] = { "mvendorid", any
, read_mvendorid
},
3503 [CSR_MARCHID
] = { "marchid", any
, read_marchid
},
3504 [CSR_MIMPID
] = { "mimpid", any
, read_mimpid
},
3505 [CSR_MHARTID
] = { "mhartid", any
, read_mhartid
},
3507 [CSR_MCONFIGPTR
] = { "mconfigptr", any
, read_zero
,
3508 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3509 /* Machine Trap Setup */
3510 [CSR_MSTATUS
] = { "mstatus", any
, read_mstatus
, write_mstatus
, NULL
,
3511 read_mstatus_i128
},
3512 [CSR_MISA
] = { "misa", any
, read_misa
, write_misa
, NULL
,
3514 [CSR_MIDELEG
] = { "mideleg", any
, NULL
, NULL
, rmw_mideleg
},
3515 [CSR_MEDELEG
] = { "medeleg", any
, read_medeleg
, write_medeleg
},
3516 [CSR_MIE
] = { "mie", any
, NULL
, NULL
, rmw_mie
},
3517 [CSR_MTVEC
] = { "mtvec", any
, read_mtvec
, write_mtvec
},
3518 [CSR_MCOUNTEREN
] = { "mcounteren", any
, read_mcounteren
, write_mcounteren
},
3520 [CSR_MSTATUSH
] = { "mstatush", any32
, read_mstatush
, write_mstatush
},
3522 /* Machine Trap Handling */
3523 [CSR_MSCRATCH
] = { "mscratch", any
, read_mscratch
, write_mscratch
, NULL
,
3524 read_mscratch_i128
, write_mscratch_i128
},
3525 [CSR_MEPC
] = { "mepc", any
, read_mepc
, write_mepc
},
3526 [CSR_MCAUSE
] = { "mcause", any
, read_mcause
, write_mcause
},
3527 [CSR_MTVAL
] = { "mtval", any
, read_mtval
, write_mtval
},
3528 [CSR_MIP
] = { "mip", any
, NULL
, NULL
, rmw_mip
},
3530 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
3531 [CSR_MISELECT
] = { "miselect", aia_any
, NULL
, NULL
, rmw_xiselect
},
3532 [CSR_MIREG
] = { "mireg", aia_any
, NULL
, NULL
, rmw_xireg
},
3534 /* Machine-Level Interrupts (AIA) */
3535 [CSR_MTOPEI
] = { "mtopei", aia_any
, NULL
, NULL
, rmw_xtopei
},
3536 [CSR_MTOPI
] = { "mtopi", aia_any
, read_mtopi
},
3538 /* Virtual Interrupts for Supervisor Level (AIA) */
3539 [CSR_MVIEN
] = { "mvien", aia_any
, read_zero
, write_ignore
},
3540 [CSR_MVIP
] = { "mvip", aia_any
, read_zero
, write_ignore
},
3542 /* Machine-Level High-Half CSRs (AIA) */
3543 [CSR_MIDELEGH
] = { "midelegh", aia_any32
, NULL
, NULL
, rmw_midelegh
},
3544 [CSR_MIEH
] = { "mieh", aia_any32
, NULL
, NULL
, rmw_mieh
},
3545 [CSR_MVIENH
] = { "mvienh", aia_any32
, read_zero
, write_ignore
},
3546 [CSR_MVIPH
] = { "mviph", aia_any32
, read_zero
, write_ignore
},
3547 [CSR_MIPH
] = { "miph", aia_any32
, NULL
, NULL
, rmw_miph
},
3549 /* Execution environment configuration */
3550 [CSR_MENVCFG
] = { "menvcfg", any
, read_menvcfg
, write_menvcfg
,
3551 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3552 [CSR_MENVCFGH
] = { "menvcfgh", any32
, read_menvcfgh
, write_menvcfgh
,
3553 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3554 [CSR_SENVCFG
] = { "senvcfg", smode
, read_senvcfg
, write_senvcfg
,
3555 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3556 [CSR_HENVCFG
] = { "henvcfg", hmode
, read_henvcfg
, write_henvcfg
,
3557 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3558 [CSR_HENVCFGH
] = { "henvcfgh", hmode32
, read_henvcfgh
, write_henvcfgh
,
3559 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3561 /* Supervisor Trap Setup */
3562 [CSR_SSTATUS
] = { "sstatus", smode
, read_sstatus
, write_sstatus
, NULL
,
3563 read_sstatus_i128
},
3564 [CSR_SIE
] = { "sie", smode
, NULL
, NULL
, rmw_sie
},
3565 [CSR_STVEC
] = { "stvec", smode
, read_stvec
, write_stvec
},
3566 [CSR_SCOUNTEREN
] = { "scounteren", smode
, read_scounteren
, write_scounteren
},
3568 /* Supervisor Trap Handling */
3569 [CSR_SSCRATCH
] = { "sscratch", smode
, read_sscratch
, write_sscratch
, NULL
,
3570 read_sscratch_i128
, write_sscratch_i128
},
3571 [CSR_SEPC
] = { "sepc", smode
, read_sepc
, write_sepc
},
3572 [CSR_SCAUSE
] = { "scause", smode
, read_scause
, write_scause
},
3573 [CSR_STVAL
] = { "stval", smode
, read_stval
, write_stval
},
3574 [CSR_SIP
] = { "sip", smode
, NULL
, NULL
, rmw_sip
},
3576 /* Supervisor Protection and Translation */
3577 [CSR_SATP
] = { "satp", smode
, read_satp
, write_satp
},
3579 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
3580 [CSR_SISELECT
] = { "siselect", aia_smode
, NULL
, NULL
, rmw_xiselect
},
3581 [CSR_SIREG
] = { "sireg", aia_smode
, NULL
, NULL
, rmw_xireg
},
3583 /* Supervisor-Level Interrupts (AIA) */
3584 [CSR_STOPEI
] = { "stopei", aia_smode
, NULL
, NULL
, rmw_xtopei
},
3585 [CSR_STOPI
] = { "stopi", aia_smode
, read_stopi
},
3587 /* Supervisor-Level High-Half CSRs (AIA) */
3588 [CSR_SIEH
] = { "sieh", aia_smode32
, NULL
, NULL
, rmw_sieh
},
3589 [CSR_SIPH
] = { "siph", aia_smode32
, NULL
, NULL
, rmw_siph
},
3591 [CSR_HSTATUS
] = { "hstatus", hmode
, read_hstatus
, write_hstatus
,
3592 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3593 [CSR_HEDELEG
] = { "hedeleg", hmode
, read_hedeleg
, write_hedeleg
,
3594 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3595 [CSR_HIDELEG
] = { "hideleg", hmode
, NULL
, NULL
, rmw_hideleg
,
3596 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3597 [CSR_HVIP
] = { "hvip", hmode
, NULL
, NULL
, rmw_hvip
,
3598 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3599 [CSR_HIP
] = { "hip", hmode
, NULL
, NULL
, rmw_hip
,
3600 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3601 [CSR_HIE
] = { "hie", hmode
, NULL
, NULL
, rmw_hie
,
3602 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3603 [CSR_HCOUNTEREN
] = { "hcounteren", hmode
, read_hcounteren
, write_hcounteren
,
3604 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3605 [CSR_HGEIE
] = { "hgeie", hmode
, read_hgeie
, write_hgeie
,
3606 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3607 [CSR_HTVAL
] = { "htval", hmode
, read_htval
, write_htval
,
3608 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3609 [CSR_HTINST
] = { "htinst", hmode
, read_htinst
, write_htinst
,
3610 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3611 [CSR_HGEIP
] = { "hgeip", hmode
, read_hgeip
,
3612 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3613 [CSR_HGATP
] = { "hgatp", hmode
, read_hgatp
, write_hgatp
,
3614 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3615 [CSR_HTIMEDELTA
] = { "htimedelta", hmode
, read_htimedelta
, write_htimedelta
,
3616 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3617 [CSR_HTIMEDELTAH
] = { "htimedeltah", hmode32
, read_htimedeltah
, write_htimedeltah
,
3618 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3620 [CSR_VSSTATUS
] = { "vsstatus", hmode
, read_vsstatus
, write_vsstatus
,
3621 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3622 [CSR_VSIP
] = { "vsip", hmode
, NULL
, NULL
, rmw_vsip
,
3623 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3624 [CSR_VSIE
] = { "vsie", hmode
, NULL
, NULL
, rmw_vsie
,
3625 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3626 [CSR_VSTVEC
] = { "vstvec", hmode
, read_vstvec
, write_vstvec
,
3627 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3628 [CSR_VSSCRATCH
] = { "vsscratch", hmode
, read_vsscratch
, write_vsscratch
,
3629 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3630 [CSR_VSEPC
] = { "vsepc", hmode
, read_vsepc
, write_vsepc
,
3631 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3632 [CSR_VSCAUSE
] = { "vscause", hmode
, read_vscause
, write_vscause
,
3633 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3634 [CSR_VSTVAL
] = { "vstval", hmode
, read_vstval
, write_vstval
,
3635 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3636 [CSR_VSATP
] = { "vsatp", hmode
, read_vsatp
, write_vsatp
,
3637 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3639 [CSR_MTVAL2
] = { "mtval2", hmode
, read_mtval2
, write_mtval2
,
3640 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3641 [CSR_MTINST
] = { "mtinst", hmode
, read_mtinst
, write_mtinst
,
3642 .min_priv_ver
= PRIV_VERSION_1_12_0
},
3644 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
3645 [CSR_HVIEN
] = { "hvien", aia_hmode
, read_zero
, write_ignore
},
3646 [CSR_HVICTL
] = { "hvictl", aia_hmode
, read_hvictl
, write_hvictl
},
3647 [CSR_HVIPRIO1
] = { "hviprio1", aia_hmode
, read_hviprio1
, write_hviprio1
},
3648 [CSR_HVIPRIO2
] = { "hviprio2", aia_hmode
, read_hviprio2
, write_hviprio2
},
3651 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
3653 [CSR_VSISELECT
] = { "vsiselect", aia_hmode
, NULL
, NULL
, rmw_xiselect
},
3654 [CSR_VSIREG
] = { "vsireg", aia_hmode
, NULL
, NULL
, rmw_xireg
},
3656 /* VS-Level Interrupts (H-extension with AIA) */
3657 [CSR_VSTOPEI
] = { "vstopei", aia_hmode
, NULL
, NULL
, rmw_xtopei
},
3658 [CSR_VSTOPI
] = { "vstopi", aia_hmode
, read_vstopi
},
3660 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
3661 [CSR_HIDELEGH
] = { "hidelegh", aia_hmode32
, NULL
, NULL
, rmw_hidelegh
},
3662 [CSR_HVIENH
] = { "hvienh", aia_hmode32
, read_zero
, write_ignore
},
3663 [CSR_HVIPH
] = { "hviph", aia_hmode32
, NULL
, NULL
, rmw_hviph
},
3664 [CSR_HVIPRIO1H
] = { "hviprio1h", aia_hmode32
, read_hviprio1h
, write_hviprio1h
},
3665 [CSR_HVIPRIO2H
] = { "hviprio2h", aia_hmode32
, read_hviprio2h
, write_hviprio2h
},
3666 [CSR_VSIEH
] = { "vsieh", aia_hmode32
, NULL
, NULL
, rmw_vsieh
},
3667 [CSR_VSIPH
] = { "vsiph", aia_hmode32
, NULL
, NULL
, rmw_vsiph
},
3669 /* Physical Memory Protection */
3670 [CSR_MSECCFG
] = { "mseccfg", epmp
, read_mseccfg
, write_mseccfg
,
3671 .min_priv_ver
= PRIV_VERSION_1_11_0
},
3672 [CSR_PMPCFG0
] = { "pmpcfg0", pmp
, read_pmpcfg
, write_pmpcfg
},
3673 [CSR_PMPCFG1
] = { "pmpcfg1", pmp
, read_pmpcfg
, write_pmpcfg
},
3674 [CSR_PMPCFG2
] = { "pmpcfg2", pmp
, read_pmpcfg
, write_pmpcfg
},
3675 [CSR_PMPCFG3
] = { "pmpcfg3", pmp
, read_pmpcfg
, write_pmpcfg
},
3676 [CSR_PMPADDR0
] = { "pmpaddr0", pmp
, read_pmpaddr
, write_pmpaddr
},
3677 [CSR_PMPADDR1
] = { "pmpaddr1", pmp
, read_pmpaddr
, write_pmpaddr
},
3678 [CSR_PMPADDR2
] = { "pmpaddr2", pmp
, read_pmpaddr
, write_pmpaddr
},
3679 [CSR_PMPADDR3
] = { "pmpaddr3", pmp
, read_pmpaddr
, write_pmpaddr
},
3680 [CSR_PMPADDR4
] = { "pmpaddr4", pmp
, read_pmpaddr
, write_pmpaddr
},
3681 [CSR_PMPADDR5
] = { "pmpaddr5", pmp
, read_pmpaddr
, write_pmpaddr
},
3682 [CSR_PMPADDR6
] = { "pmpaddr6", pmp
, read_pmpaddr
, write_pmpaddr
},
3683 [CSR_PMPADDR7
] = { "pmpaddr7", pmp
, read_pmpaddr
, write_pmpaddr
},
3684 [CSR_PMPADDR8
] = { "pmpaddr8", pmp
, read_pmpaddr
, write_pmpaddr
},
3685 [CSR_PMPADDR9
] = { "pmpaddr9", pmp
, read_pmpaddr
, write_pmpaddr
},
3686 [CSR_PMPADDR10
] = { "pmpaddr10", pmp
, read_pmpaddr
, write_pmpaddr
},
3687 [CSR_PMPADDR11
] = { "pmpaddr11", pmp
, read_pmpaddr
, write_pmpaddr
},
3688 [CSR_PMPADDR12
] = { "pmpaddr12", pmp
, read_pmpaddr
, write_pmpaddr
},
3689 [CSR_PMPADDR13
] = { "pmpaddr13", pmp
, read_pmpaddr
, write_pmpaddr
},
3690 [CSR_PMPADDR14
] = { "pmpaddr14", pmp
, read_pmpaddr
, write_pmpaddr
},
3691 [CSR_PMPADDR15
] = { "pmpaddr15", pmp
, read_pmpaddr
, write_pmpaddr
},
3694 [CSR_TSELECT
] = { "tselect", debug
, read_tselect
, write_tselect
},
3695 [CSR_TDATA1
] = { "tdata1", debug
, read_tdata
, write_tdata
},
3696 [CSR_TDATA2
] = { "tdata2", debug
, read_tdata
, write_tdata
},
3697 [CSR_TDATA3
] = { "tdata3", debug
, read_tdata
, write_tdata
},
3699 /* User Pointer Masking */
3700 [CSR_UMTE
] = { "umte", pointer_masking
, read_umte
, write_umte
},
3701 [CSR_UPMMASK
] = { "upmmask", pointer_masking
, read_upmmask
, write_upmmask
},
3702 [CSR_UPMBASE
] = { "upmbase", pointer_masking
, read_upmbase
, write_upmbase
},
3703 /* Machine Pointer Masking */
3704 [CSR_MMTE
] = { "mmte", pointer_masking
, read_mmte
, write_mmte
},
3705 [CSR_MPMMASK
] = { "mpmmask", pointer_masking
, read_mpmmask
, write_mpmmask
},
3706 [CSR_MPMBASE
] = { "mpmbase", pointer_masking
, read_mpmbase
, write_mpmbase
},
3707 /* Supervisor Pointer Masking */
3708 [CSR_SMTE
] = { "smte", pointer_masking
, read_smte
, write_smte
},
3709 [CSR_SPMMASK
] = { "spmmask", pointer_masking
, read_spmmask
, write_spmmask
},
3710 [CSR_SPMBASE
] = { "spmbase", pointer_masking
, read_spmbase
, write_spmbase
},
3712 /* Performance Counters */
3713 [CSR_HPMCOUNTER3
] = { "hpmcounter3", ctr
, read_hpmcounter
},
3714 [CSR_HPMCOUNTER4
] = { "hpmcounter4", ctr
, read_hpmcounter
},
3715 [CSR_HPMCOUNTER5
] = { "hpmcounter5", ctr
, read_hpmcounter
},
3716 [CSR_HPMCOUNTER6
] = { "hpmcounter6", ctr
, read_hpmcounter
},
3717 [CSR_HPMCOUNTER7
] = { "hpmcounter7", ctr
, read_hpmcounter
},
3718 [CSR_HPMCOUNTER8
] = { "hpmcounter8", ctr
, read_hpmcounter
},
3719 [CSR_HPMCOUNTER9
] = { "hpmcounter9", ctr
, read_hpmcounter
},
3720 [CSR_HPMCOUNTER10
] = { "hpmcounter10", ctr
, read_hpmcounter
},
3721 [CSR_HPMCOUNTER11
] = { "hpmcounter11", ctr
, read_hpmcounter
},
3722 [CSR_HPMCOUNTER12
] = { "hpmcounter12", ctr
, read_hpmcounter
},
3723 [CSR_HPMCOUNTER13
] = { "hpmcounter13", ctr
, read_hpmcounter
},
3724 [CSR_HPMCOUNTER14
] = { "hpmcounter14", ctr
, read_hpmcounter
},
3725 [CSR_HPMCOUNTER15
] = { "hpmcounter15", ctr
, read_hpmcounter
},
3726 [CSR_HPMCOUNTER16
] = { "hpmcounter16", ctr
, read_hpmcounter
},
3727 [CSR_HPMCOUNTER17
] = { "hpmcounter17", ctr
, read_hpmcounter
},
3728 [CSR_HPMCOUNTER18
] = { "hpmcounter18", ctr
, read_hpmcounter
},
3729 [CSR_HPMCOUNTER19
] = { "hpmcounter19", ctr
, read_hpmcounter
},
3730 [CSR_HPMCOUNTER20
] = { "hpmcounter20", ctr
, read_hpmcounter
},
3731 [CSR_HPMCOUNTER21
] = { "hpmcounter21", ctr
, read_hpmcounter
},
3732 [CSR_HPMCOUNTER22
] = { "hpmcounter22", ctr
, read_hpmcounter
},
3733 [CSR_HPMCOUNTER23
] = { "hpmcounter23", ctr
, read_hpmcounter
},
3734 [CSR_HPMCOUNTER24
] = { "hpmcounter24", ctr
, read_hpmcounter
},
3735 [CSR_HPMCOUNTER25
] = { "hpmcounter25", ctr
, read_hpmcounter
},
3736 [CSR_HPMCOUNTER26
] = { "hpmcounter26", ctr
, read_hpmcounter
},
3737 [CSR_HPMCOUNTER27
] = { "hpmcounter27", ctr
, read_hpmcounter
},
3738 [CSR_HPMCOUNTER28
] = { "hpmcounter28", ctr
, read_hpmcounter
},
3739 [CSR_HPMCOUNTER29
] = { "hpmcounter29", ctr
, read_hpmcounter
},
3740 [CSR_HPMCOUNTER30
] = { "hpmcounter30", ctr
, read_hpmcounter
},
3741 [CSR_HPMCOUNTER31
] = { "hpmcounter31", ctr
, read_hpmcounter
},
3743 [CSR_MHPMCOUNTER3
] = { "mhpmcounter3", mctr
, read_hpmcounter
,
3744 write_mhpmcounter
},
3745 [CSR_MHPMCOUNTER4
] = { "mhpmcounter4", mctr
, read_hpmcounter
,
3746 write_mhpmcounter
},
3747 [CSR_MHPMCOUNTER5
] = { "mhpmcounter5", mctr
, read_hpmcounter
,
3748 write_mhpmcounter
},
3749 [CSR_MHPMCOUNTER6
] = { "mhpmcounter6", mctr
, read_hpmcounter
,
3750 write_mhpmcounter
},
3751 [CSR_MHPMCOUNTER7
] = { "mhpmcounter7", mctr
, read_hpmcounter
,
3752 write_mhpmcounter
},
3753 [CSR_MHPMCOUNTER8
] = { "mhpmcounter8", mctr
, read_hpmcounter
,
3754 write_mhpmcounter
},
3755 [CSR_MHPMCOUNTER9
] = { "mhpmcounter9", mctr
, read_hpmcounter
,
3756 write_mhpmcounter
},
3757 [CSR_MHPMCOUNTER10
] = { "mhpmcounter10", mctr
, read_hpmcounter
,
3758 write_mhpmcounter
},
3759 [CSR_MHPMCOUNTER11
] = { "mhpmcounter11", mctr
, read_hpmcounter
,
3760 write_mhpmcounter
},
3761 [CSR_MHPMCOUNTER12
] = { "mhpmcounter12", mctr
, read_hpmcounter
,
3762 write_mhpmcounter
},
3763 [CSR_MHPMCOUNTER13
] = { "mhpmcounter13", mctr
, read_hpmcounter
,
3764 write_mhpmcounter
},
3765 [CSR_MHPMCOUNTER14
] = { "mhpmcounter14", mctr
, read_hpmcounter
,
3766 write_mhpmcounter
},
3767 [CSR_MHPMCOUNTER15
] = { "mhpmcounter15", mctr
, read_hpmcounter
,
3768 write_mhpmcounter
},
3769 [CSR_MHPMCOUNTER16
] = { "mhpmcounter16", mctr
, read_hpmcounter
,
3770 write_mhpmcounter
},
3771 [CSR_MHPMCOUNTER17
] = { "mhpmcounter17", mctr
, read_hpmcounter
,
3772 write_mhpmcounter
},
3773 [CSR_MHPMCOUNTER18
] = { "mhpmcounter18", mctr
, read_hpmcounter
,
3774 write_mhpmcounter
},
3775 [CSR_MHPMCOUNTER19
] = { "mhpmcounter19", mctr
, read_hpmcounter
,
3776 write_mhpmcounter
},
3777 [CSR_MHPMCOUNTER20
] = { "mhpmcounter20", mctr
, read_hpmcounter
,
3778 write_mhpmcounter
},
3779 [CSR_MHPMCOUNTER21
] = { "mhpmcounter21", mctr
, read_hpmcounter
,
3780 write_mhpmcounter
},
3781 [CSR_MHPMCOUNTER22
] = { "mhpmcounter22", mctr
, read_hpmcounter
,
3782 write_mhpmcounter
},
3783 [CSR_MHPMCOUNTER23
] = { "mhpmcounter23", mctr
, read_hpmcounter
,
3784 write_mhpmcounter
},
3785 [CSR_MHPMCOUNTER24
] = { "mhpmcounter24", mctr
, read_hpmcounter
,
3786 write_mhpmcounter
},
3787 [CSR_MHPMCOUNTER25
] = { "mhpmcounter25", mctr
, read_hpmcounter
,
3788 write_mhpmcounter
},
3789 [CSR_MHPMCOUNTER26
] = { "mhpmcounter26", mctr
, read_hpmcounter
,
3790 write_mhpmcounter
},
3791 [CSR_MHPMCOUNTER27
] = { "mhpmcounter27", mctr
, read_hpmcounter
,
3792 write_mhpmcounter
},
3793 [CSR_MHPMCOUNTER28
] = { "mhpmcounter28", mctr
, read_hpmcounter
,
3794 write_mhpmcounter
},
3795 [CSR_MHPMCOUNTER29
] = { "mhpmcounter29", mctr
, read_hpmcounter
,
3796 write_mhpmcounter
},
3797 [CSR_MHPMCOUNTER30
] = { "mhpmcounter30", mctr
, read_hpmcounter
,
3798 write_mhpmcounter
},
3799 [CSR_MHPMCOUNTER31
] = { "mhpmcounter31", mctr
, read_hpmcounter
,
3800 write_mhpmcounter
},
3802 [CSR_MCOUNTINHIBIT
] = { "mcountinhibit", any
, read_mcountinhibit
,
3803 write_mcountinhibit
, .min_priv_ver
= PRIV_VERSION_1_11_0
},
3805 [CSR_MHPMEVENT3
] = { "mhpmevent3", any
, read_mhpmevent
,
3807 [CSR_MHPMEVENT4
] = { "mhpmevent4", any
, read_mhpmevent
,
3809 [CSR_MHPMEVENT5
] = { "mhpmevent5", any
, read_mhpmevent
,
3811 [CSR_MHPMEVENT6
] = { "mhpmevent6", any
, read_mhpmevent
,
3813 [CSR_MHPMEVENT7
] = { "mhpmevent7", any
, read_mhpmevent
,
3815 [CSR_MHPMEVENT8
] = { "mhpmevent8", any
, read_mhpmevent
,
3817 [CSR_MHPMEVENT9
] = { "mhpmevent9", any
, read_mhpmevent
,
3819 [CSR_MHPMEVENT10
] = { "mhpmevent10", any
, read_mhpmevent
,
3821 [CSR_MHPMEVENT11
] = { "mhpmevent11", any
, read_mhpmevent
,
3823 [CSR_MHPMEVENT12
] = { "mhpmevent12", any
, read_mhpmevent
,
3825 [CSR_MHPMEVENT13
] = { "mhpmevent13", any
, read_mhpmevent
,
3827 [CSR_MHPMEVENT14
] = { "mhpmevent14", any
, read_mhpmevent
,
3829 [CSR_MHPMEVENT15
] = { "mhpmevent15", any
, read_mhpmevent
,
3831 [CSR_MHPMEVENT16
] = { "mhpmevent16", any
, read_mhpmevent
,
3833 [CSR_MHPMEVENT17
] = { "mhpmevent17", any
, read_mhpmevent
,
3835 [CSR_MHPMEVENT18
] = { "mhpmevent18", any
, read_mhpmevent
,
3837 [CSR_MHPMEVENT19
] = { "mhpmevent19", any
, read_mhpmevent
,
3839 [CSR_MHPMEVENT20
] = { "mhpmevent20", any
, read_mhpmevent
,
3841 [CSR_MHPMEVENT21
] = { "mhpmevent21", any
, read_mhpmevent
,
3843 [CSR_MHPMEVENT22
] = { "mhpmevent22", any
, read_mhpmevent
,
3845 [CSR_MHPMEVENT23
] = { "mhpmevent23", any
, read_mhpmevent
,
3847 [CSR_MHPMEVENT24
] = { "mhpmevent24", any
, read_mhpmevent
,
3849 [CSR_MHPMEVENT25
] = { "mhpmevent25", any
, read_mhpmevent
,
3851 [CSR_MHPMEVENT26
] = { "mhpmevent26", any
, read_mhpmevent
,
3853 [CSR_MHPMEVENT27
] = { "mhpmevent27", any
, read_mhpmevent
,
3855 [CSR_MHPMEVENT28
] = { "mhpmevent28", any
, read_mhpmevent
,
3857 [CSR_MHPMEVENT29
] = { "mhpmevent29", any
, read_mhpmevent
,
3859 [CSR_MHPMEVENT30
] = { "mhpmevent30", any
, read_mhpmevent
,
3861 [CSR_MHPMEVENT31
] = { "mhpmevent31", any
, read_mhpmevent
,
3864 [CSR_HPMCOUNTER3H
] = { "hpmcounter3h", ctr32
, read_hpmcounterh
},
3865 [CSR_HPMCOUNTER4H
] = { "hpmcounter4h", ctr32
, read_hpmcounterh
},
3866 [CSR_HPMCOUNTER5H
] = { "hpmcounter5h", ctr32
, read_hpmcounterh
},
3867 [CSR_HPMCOUNTER6H
] = { "hpmcounter6h", ctr32
, read_hpmcounterh
},
3868 [CSR_HPMCOUNTER7H
] = { "hpmcounter7h", ctr32
, read_hpmcounterh
},
3869 [CSR_HPMCOUNTER8H
] = { "hpmcounter8h", ctr32
, read_hpmcounterh
},
3870 [CSR_HPMCOUNTER9H
] = { "hpmcounter9h", ctr32
, read_hpmcounterh
},
3871 [CSR_HPMCOUNTER10H
] = { "hpmcounter10h", ctr32
, read_hpmcounterh
},
3872 [CSR_HPMCOUNTER11H
] = { "hpmcounter11h", ctr32
, read_hpmcounterh
},
3873 [CSR_HPMCOUNTER12H
] = { "hpmcounter12h", ctr32
, read_hpmcounterh
},
3874 [CSR_HPMCOUNTER13H
] = { "hpmcounter13h", ctr32
, read_hpmcounterh
},
3875 [CSR_HPMCOUNTER14H
] = { "hpmcounter14h", ctr32
, read_hpmcounterh
},
3876 [CSR_HPMCOUNTER15H
] = { "hpmcounter15h", ctr32
, read_hpmcounterh
},
3877 [CSR_HPMCOUNTER16H
] = { "hpmcounter16h", ctr32
, read_hpmcounterh
},
3878 [CSR_HPMCOUNTER17H
] = { "hpmcounter17h", ctr32
, read_hpmcounterh
},
3879 [CSR_HPMCOUNTER18H
] = { "hpmcounter18h", ctr32
, read_hpmcounterh
},
3880 [CSR_HPMCOUNTER19H
] = { "hpmcounter19h", ctr32
, read_hpmcounterh
},
3881 [CSR_HPMCOUNTER20H
] = { "hpmcounter20h", ctr32
, read_hpmcounterh
},
3882 [CSR_HPMCOUNTER21H
] = { "hpmcounter21h", ctr32
, read_hpmcounterh
},
3883 [CSR_HPMCOUNTER22H
] = { "hpmcounter22h", ctr32
, read_hpmcounterh
},
3884 [CSR_HPMCOUNTER23H
] = { "hpmcounter23h", ctr32
, read_hpmcounterh
},
3885 [CSR_HPMCOUNTER24H
] = { "hpmcounter24h", ctr32
, read_hpmcounterh
},
3886 [CSR_HPMCOUNTER25H
] = { "hpmcounter25h", ctr32
, read_hpmcounterh
},
3887 [CSR_HPMCOUNTER26H
] = { "hpmcounter26h", ctr32
, read_hpmcounterh
},
3888 [CSR_HPMCOUNTER27H
] = { "hpmcounter27h", ctr32
, read_hpmcounterh
},
3889 [CSR_HPMCOUNTER28H
] = { "hpmcounter28h", ctr32
, read_hpmcounterh
},
3890 [CSR_HPMCOUNTER29H
] = { "hpmcounter29h", ctr32
, read_hpmcounterh
},
3891 [CSR_HPMCOUNTER30H
] = { "hpmcounter30h", ctr32
, read_hpmcounterh
},
3892 [CSR_HPMCOUNTER31H
] = { "hpmcounter31h", ctr32
, read_hpmcounterh
},
3894 [CSR_MHPMCOUNTER3H
] = { "mhpmcounter3h", mctr32
, read_hpmcounterh
,
3895 write_mhpmcounterh
},
3896 [CSR_MHPMCOUNTER4H
] = { "mhpmcounter4h", mctr32
, read_hpmcounterh
,
3897 write_mhpmcounterh
},
3898 [CSR_MHPMCOUNTER5H
] = { "mhpmcounter5h", mctr32
, read_hpmcounterh
,
3899 write_mhpmcounterh
},
3900 [CSR_MHPMCOUNTER6H
] = { "mhpmcounter6h", mctr32
, read_hpmcounterh
,
3901 write_mhpmcounterh
},
3902 [CSR_MHPMCOUNTER7H
] = { "mhpmcounter7h", mctr32
, read_hpmcounterh
,
3903 write_mhpmcounterh
},
3904 [CSR_MHPMCOUNTER8H
] = { "mhpmcounter8h", mctr32
, read_hpmcounterh
,
3905 write_mhpmcounterh
},
3906 [CSR_MHPMCOUNTER9H
] = { "mhpmcounter9h", mctr32
, read_hpmcounterh
,
3907 write_mhpmcounterh
},
3908 [CSR_MHPMCOUNTER10H
] = { "mhpmcounter10h", mctr32
, read_hpmcounterh
,
3909 write_mhpmcounterh
},
3910 [CSR_MHPMCOUNTER11H
] = { "mhpmcounter11h", mctr32
, read_hpmcounterh
,
3911 write_mhpmcounterh
},
3912 [CSR_MHPMCOUNTER12H
] = { "mhpmcounter12h", mctr32
, read_hpmcounterh
,
3913 write_mhpmcounterh
},
3914 [CSR_MHPMCOUNTER13H
] = { "mhpmcounter13h", mctr32
, read_hpmcounterh
,
3915 write_mhpmcounterh
},
3916 [CSR_MHPMCOUNTER14H
] = { "mhpmcounter14h", mctr32
, read_hpmcounterh
,
3917 write_mhpmcounterh
},
3918 [CSR_MHPMCOUNTER15H
] = { "mhpmcounter15h", mctr32
, read_hpmcounterh
,
3919 write_mhpmcounterh
},
3920 [CSR_MHPMCOUNTER16H
] = { "mhpmcounter16h", mctr32
, read_hpmcounterh
,
3921 write_mhpmcounterh
},
3922 [CSR_MHPMCOUNTER17H
] = { "mhpmcounter17h", mctr32
, read_hpmcounterh
,
3923 write_mhpmcounterh
},
3924 [CSR_MHPMCOUNTER18H
] = { "mhpmcounter18h", mctr32
, read_hpmcounterh
,
3925 write_mhpmcounterh
},
3926 [CSR_MHPMCOUNTER19H
] = { "mhpmcounter19h", mctr32
, read_hpmcounterh
,
3927 write_mhpmcounterh
},
3928 [CSR_MHPMCOUNTER20H
] = { "mhpmcounter20h", mctr32
, read_hpmcounterh
,
3929 write_mhpmcounterh
},
3930 [CSR_MHPMCOUNTER21H
] = { "mhpmcounter21h", mctr32
, read_hpmcounterh
,
3931 write_mhpmcounterh
},
3932 [CSR_MHPMCOUNTER22H
] = { "mhpmcounter22h", mctr32
, read_hpmcounterh
,
3933 write_mhpmcounterh
},
3934 [CSR_MHPMCOUNTER23H
] = { "mhpmcounter23h", mctr32
, read_hpmcounterh
,
3935 write_mhpmcounterh
},
3936 [CSR_MHPMCOUNTER24H
] = { "mhpmcounter24h", mctr32
, read_hpmcounterh
,
3937 write_mhpmcounterh
},
3938 [CSR_MHPMCOUNTER25H
] = { "mhpmcounter25h", mctr32
, read_hpmcounterh
,
3939 write_mhpmcounterh
},
3940 [CSR_MHPMCOUNTER26H
] = { "mhpmcounter26h", mctr32
, read_hpmcounterh
,
3941 write_mhpmcounterh
},
3942 [CSR_MHPMCOUNTER27H
] = { "mhpmcounter27h", mctr32
, read_hpmcounterh
,
3943 write_mhpmcounterh
},
3944 [CSR_MHPMCOUNTER28H
] = { "mhpmcounter28h", mctr32
, read_hpmcounterh
,
3945 write_mhpmcounterh
},
3946 [CSR_MHPMCOUNTER29H
] = { "mhpmcounter29h", mctr32
, read_hpmcounterh
,
3947 write_mhpmcounterh
},
3948 [CSR_MHPMCOUNTER30H
] = { "mhpmcounter30h", mctr32
, read_hpmcounterh
,
3949 write_mhpmcounterh
},
3950 [CSR_MHPMCOUNTER31H
] = { "mhpmcounter31h", mctr32
, read_hpmcounterh
,
3951 write_mhpmcounterh
},
3952 #endif /* !CONFIG_USER_ONLY */