target/riscv: Implement AIA IMSIC interface CSRs
[qemu/ar7.git] / target / riscv / csr.c
blobfe2c8dd40ecc5e1104febf2a24fef00687501ec7
1 /*
2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "qemu/main-loop.h"
24 #include "exec/exec-all.h"
26 /* CSR function table public API */
27 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
29 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
32 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
34 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
37 /* Predicates */
38 static RISCVException fs(CPURISCVState *env, int csrno)
40 #if !defined(CONFIG_USER_ONLY)
41 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
42 return RISCV_EXCP_ILLEGAL_INST;
44 #endif
45 return RISCV_EXCP_NONE;
48 static RISCVException vs(CPURISCVState *env, int csrno)
50 CPUState *cs = env_cpu(env);
51 RISCVCPU *cpu = RISCV_CPU(cs);
53 if (env->misa_ext & RVV ||
54 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
55 #if !defined(CONFIG_USER_ONLY)
56 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
57 return RISCV_EXCP_ILLEGAL_INST;
59 #endif
60 return RISCV_EXCP_NONE;
62 return RISCV_EXCP_ILLEGAL_INST;
65 static RISCVException ctr(CPURISCVState *env, int csrno)
67 #if !defined(CONFIG_USER_ONLY)
68 CPUState *cs = env_cpu(env);
69 RISCVCPU *cpu = RISCV_CPU(cs);
71 if (!cpu->cfg.ext_counters) {
72 /* The Counters extensions is not enabled */
73 return RISCV_EXCP_ILLEGAL_INST;
76 if (riscv_cpu_virt_enabled(env)) {
77 switch (csrno) {
78 case CSR_CYCLE:
79 if (!get_field(env->hcounteren, COUNTEREN_CY) &&
80 get_field(env->mcounteren, COUNTEREN_CY)) {
81 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
83 break;
84 case CSR_TIME:
85 if (!get_field(env->hcounteren, COUNTEREN_TM) &&
86 get_field(env->mcounteren, COUNTEREN_TM)) {
87 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
89 break;
90 case CSR_INSTRET:
91 if (!get_field(env->hcounteren, COUNTEREN_IR) &&
92 get_field(env->mcounteren, COUNTEREN_IR)) {
93 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
95 break;
96 case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31:
97 if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3)) &&
98 get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3))) {
99 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
101 break;
103 if (riscv_cpu_mxl(env) == MXL_RV32) {
104 switch (csrno) {
105 case CSR_CYCLEH:
106 if (!get_field(env->hcounteren, COUNTEREN_CY) &&
107 get_field(env->mcounteren, COUNTEREN_CY)) {
108 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
110 break;
111 case CSR_TIMEH:
112 if (!get_field(env->hcounteren, COUNTEREN_TM) &&
113 get_field(env->mcounteren, COUNTEREN_TM)) {
114 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
116 break;
117 case CSR_INSTRETH:
118 if (!get_field(env->hcounteren, COUNTEREN_IR) &&
119 get_field(env->mcounteren, COUNTEREN_IR)) {
120 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
122 break;
123 case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H:
124 if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3H)) &&
125 get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3H))) {
126 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
128 break;
132 #endif
133 return RISCV_EXCP_NONE;
136 static RISCVException ctr32(CPURISCVState *env, int csrno)
138 if (riscv_cpu_mxl(env) != MXL_RV32) {
139 return RISCV_EXCP_ILLEGAL_INST;
142 return ctr(env, csrno);
145 #if !defined(CONFIG_USER_ONLY)
146 static RISCVException any(CPURISCVState *env, int csrno)
148 return RISCV_EXCP_NONE;
151 static RISCVException any32(CPURISCVState *env, int csrno)
153 if (riscv_cpu_mxl(env) != MXL_RV32) {
154 return RISCV_EXCP_ILLEGAL_INST;
157 return any(env, csrno);
161 static int aia_any(CPURISCVState *env, int csrno)
163 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
164 return RISCV_EXCP_ILLEGAL_INST;
167 return any(env, csrno);
170 static int aia_any32(CPURISCVState *env, int csrno)
172 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
173 return RISCV_EXCP_ILLEGAL_INST;
176 return any32(env, csrno);
179 static RISCVException smode(CPURISCVState *env, int csrno)
181 if (riscv_has_ext(env, RVS)) {
182 return RISCV_EXCP_NONE;
185 return RISCV_EXCP_ILLEGAL_INST;
188 static int smode32(CPURISCVState *env, int csrno)
190 if (riscv_cpu_mxl(env) != MXL_RV32) {
191 return RISCV_EXCP_ILLEGAL_INST;
194 return smode(env, csrno);
197 static int aia_smode(CPURISCVState *env, int csrno)
199 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
200 return RISCV_EXCP_ILLEGAL_INST;
203 return smode(env, csrno);
206 static int aia_smode32(CPURISCVState *env, int csrno)
208 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
209 return RISCV_EXCP_ILLEGAL_INST;
212 return smode32(env, csrno);
215 static RISCVException hmode(CPURISCVState *env, int csrno)
217 if (riscv_has_ext(env, RVS) &&
218 riscv_has_ext(env, RVH)) {
219 /* Hypervisor extension is supported */
220 if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
221 env->priv == PRV_M) {
222 return RISCV_EXCP_NONE;
223 } else {
224 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
228 return RISCV_EXCP_ILLEGAL_INST;
231 static RISCVException hmode32(CPURISCVState *env, int csrno)
233 if (riscv_cpu_mxl(env) != MXL_RV32) {
234 if (!riscv_cpu_virt_enabled(env)) {
235 return RISCV_EXCP_ILLEGAL_INST;
236 } else {
237 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
241 return hmode(env, csrno);
245 /* Checks if PointerMasking registers could be accessed */
246 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
248 /* Check if j-ext is present */
249 if (riscv_has_ext(env, RVJ)) {
250 return RISCV_EXCP_NONE;
252 return RISCV_EXCP_ILLEGAL_INST;
255 static int aia_hmode(CPURISCVState *env, int csrno)
257 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
258 return RISCV_EXCP_ILLEGAL_INST;
261 return hmode(env, csrno);
264 static int aia_hmode32(CPURISCVState *env, int csrno)
266 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
267 return RISCV_EXCP_ILLEGAL_INST;
270 return hmode32(env, csrno);
273 static RISCVException pmp(CPURISCVState *env, int csrno)
275 if (riscv_feature(env, RISCV_FEATURE_PMP)) {
276 return RISCV_EXCP_NONE;
279 return RISCV_EXCP_ILLEGAL_INST;
282 static RISCVException epmp(CPURISCVState *env, int csrno)
284 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
285 return RISCV_EXCP_NONE;
288 return RISCV_EXCP_ILLEGAL_INST;
290 #endif
292 /* User Floating-Point CSRs */
293 static RISCVException read_fflags(CPURISCVState *env, int csrno,
294 target_ulong *val)
296 *val = riscv_cpu_get_fflags(env);
297 return RISCV_EXCP_NONE;
300 static RISCVException write_fflags(CPURISCVState *env, int csrno,
301 target_ulong val)
303 #if !defined(CONFIG_USER_ONLY)
304 env->mstatus |= MSTATUS_FS;
305 #endif
306 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
307 return RISCV_EXCP_NONE;
310 static RISCVException read_frm(CPURISCVState *env, int csrno,
311 target_ulong *val)
313 *val = env->frm;
314 return RISCV_EXCP_NONE;
317 static RISCVException write_frm(CPURISCVState *env, int csrno,
318 target_ulong val)
320 #if !defined(CONFIG_USER_ONLY)
321 env->mstatus |= MSTATUS_FS;
322 #endif
323 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
324 return RISCV_EXCP_NONE;
327 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
328 target_ulong *val)
330 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
331 | (env->frm << FSR_RD_SHIFT);
332 return RISCV_EXCP_NONE;
335 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
336 target_ulong val)
338 #if !defined(CONFIG_USER_ONLY)
339 env->mstatus |= MSTATUS_FS;
340 #endif
341 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
342 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
343 return RISCV_EXCP_NONE;
346 static RISCVException read_vtype(CPURISCVState *env, int csrno,
347 target_ulong *val)
349 uint64_t vill;
350 switch (env->xl) {
351 case MXL_RV32:
352 vill = (uint32_t)env->vill << 31;
353 break;
354 case MXL_RV64:
355 vill = (uint64_t)env->vill << 63;
356 break;
357 default:
358 g_assert_not_reached();
360 *val = (target_ulong)vill | env->vtype;
361 return RISCV_EXCP_NONE;
364 static RISCVException read_vl(CPURISCVState *env, int csrno,
365 target_ulong *val)
367 *val = env->vl;
368 return RISCV_EXCP_NONE;
371 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
373 *val = env_archcpu(env)->cfg.vlen >> 3;
374 return RISCV_EXCP_NONE;
377 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
378 target_ulong *val)
380 *val = env->vxrm;
381 return RISCV_EXCP_NONE;
384 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
385 target_ulong val)
387 #if !defined(CONFIG_USER_ONLY)
388 env->mstatus |= MSTATUS_VS;
389 #endif
390 env->vxrm = val;
391 return RISCV_EXCP_NONE;
394 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
395 target_ulong *val)
397 *val = env->vxsat;
398 return RISCV_EXCP_NONE;
401 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
402 target_ulong val)
404 #if !defined(CONFIG_USER_ONLY)
405 env->mstatus |= MSTATUS_VS;
406 #endif
407 env->vxsat = val;
408 return RISCV_EXCP_NONE;
411 static RISCVException read_vstart(CPURISCVState *env, int csrno,
412 target_ulong *val)
414 *val = env->vstart;
415 return RISCV_EXCP_NONE;
418 static RISCVException write_vstart(CPURISCVState *env, int csrno,
419 target_ulong val)
421 #if !defined(CONFIG_USER_ONLY)
422 env->mstatus |= MSTATUS_VS;
423 #endif
425 * The vstart CSR is defined to have only enough writable bits
426 * to hold the largest element index, i.e. lg2(VLEN) bits.
428 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
429 return RISCV_EXCP_NONE;
432 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
434 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
435 return RISCV_EXCP_NONE;
438 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
440 #if !defined(CONFIG_USER_ONLY)
441 env->mstatus |= MSTATUS_VS;
442 #endif
443 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
444 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
445 return RISCV_EXCP_NONE;
448 /* User Timers and Counters */
449 static RISCVException read_instret(CPURISCVState *env, int csrno,
450 target_ulong *val)
452 #if !defined(CONFIG_USER_ONLY)
453 if (icount_enabled()) {
454 *val = icount_get();
455 } else {
456 *val = cpu_get_host_ticks();
458 #else
459 *val = cpu_get_host_ticks();
460 #endif
461 return RISCV_EXCP_NONE;
464 static RISCVException read_instreth(CPURISCVState *env, int csrno,
465 target_ulong *val)
467 #if !defined(CONFIG_USER_ONLY)
468 if (icount_enabled()) {
469 *val = icount_get() >> 32;
470 } else {
471 *val = cpu_get_host_ticks() >> 32;
473 #else
474 *val = cpu_get_host_ticks() >> 32;
475 #endif
476 return RISCV_EXCP_NONE;
479 #if defined(CONFIG_USER_ONLY)
480 static RISCVException read_time(CPURISCVState *env, int csrno,
481 target_ulong *val)
483 *val = cpu_get_host_ticks();
484 return RISCV_EXCP_NONE;
487 static RISCVException read_timeh(CPURISCVState *env, int csrno,
488 target_ulong *val)
490 *val = cpu_get_host_ticks() >> 32;
491 return RISCV_EXCP_NONE;
494 #else /* CONFIG_USER_ONLY */
496 static RISCVException read_time(CPURISCVState *env, int csrno,
497 target_ulong *val)
499 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
501 if (!env->rdtime_fn) {
502 return RISCV_EXCP_ILLEGAL_INST;
505 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
506 return RISCV_EXCP_NONE;
509 static RISCVException read_timeh(CPURISCVState *env, int csrno,
510 target_ulong *val)
512 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
514 if (!env->rdtime_fn) {
515 return RISCV_EXCP_ILLEGAL_INST;
518 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
519 return RISCV_EXCP_NONE;
522 /* Machine constants */
524 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
525 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP))
526 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
527 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
529 #define VSTOPI_NUM_SRCS 5
531 static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
532 VS_MODE_INTERRUPTS;
533 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
534 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
535 HS_MODE_INTERRUPTS;
536 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
537 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
538 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
539 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
540 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
541 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
542 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
543 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
544 (1ULL << (RISCV_EXCP_U_ECALL)) | \
545 (1ULL << (RISCV_EXCP_S_ECALL)) | \
546 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
547 (1ULL << (RISCV_EXCP_M_ECALL)) | \
548 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
549 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
550 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
551 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
552 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
553 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
554 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
555 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
556 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
557 (1ULL << (RISCV_EXCP_VS_ECALL)) |
558 (1ULL << (RISCV_EXCP_M_ECALL)) |
559 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
560 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
561 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
562 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
563 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
564 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
565 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
566 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
567 static const target_ulong hip_writable_mask = MIP_VSSIP;
568 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
569 static const target_ulong vsip_writable_mask = MIP_VSSIP;
571 static const char valid_vm_1_10_32[16] = {
572 [VM_1_10_MBARE] = 1,
573 [VM_1_10_SV32] = 1
576 static const char valid_vm_1_10_64[16] = {
577 [VM_1_10_MBARE] = 1,
578 [VM_1_10_SV39] = 1,
579 [VM_1_10_SV48] = 1,
580 [VM_1_10_SV57] = 1
583 /* Machine Information Registers */
584 static RISCVException read_zero(CPURISCVState *env, int csrno,
585 target_ulong *val)
587 *val = 0;
588 return RISCV_EXCP_NONE;
591 static RISCVException write_ignore(CPURISCVState *env, int csrno,
592 target_ulong val)
594 return RISCV_EXCP_NONE;
597 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
598 target_ulong *val)
600 *val = env->mhartid;
601 return RISCV_EXCP_NONE;
604 /* Machine Trap Setup */
606 /* We do not store SD explicitly, only compute it on demand. */
607 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
609 if ((status & MSTATUS_FS) == MSTATUS_FS ||
610 (status & MSTATUS_VS) == MSTATUS_VS ||
611 (status & MSTATUS_XS) == MSTATUS_XS) {
612 switch (xl) {
613 case MXL_RV32:
614 return status | MSTATUS32_SD;
615 case MXL_RV64:
616 return status | MSTATUS64_SD;
617 case MXL_RV128:
618 return MSTATUSH128_SD;
619 default:
620 g_assert_not_reached();
623 return status;
626 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
627 target_ulong *val)
629 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
630 return RISCV_EXCP_NONE;
633 static int validate_vm(CPURISCVState *env, target_ulong vm)
635 if (riscv_cpu_mxl(env) == MXL_RV32) {
636 return valid_vm_1_10_32[vm & 0xf];
637 } else {
638 return valid_vm_1_10_64[vm & 0xf];
642 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
643 target_ulong val)
645 uint64_t mstatus = env->mstatus;
646 uint64_t mask = 0;
647 RISCVMXL xl = riscv_cpu_mxl(env);
649 /* flush tlb on mstatus fields that affect VM */
650 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
651 MSTATUS_MPRV | MSTATUS_SUM)) {
652 tlb_flush(env_cpu(env));
654 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
655 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
656 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
657 MSTATUS_TW | MSTATUS_VS;
659 if (xl != MXL_RV32 || env->debugger) {
661 * RV32: MPV and GVA are not in mstatus. The current plan is to
662 * add them to mstatush. For now, we just don't support it.
664 mask |= MSTATUS_MPV | MSTATUS_GVA;
665 if ((val & MSTATUS64_UXL) != 0) {
666 mask |= MSTATUS64_UXL;
670 mstatus = (mstatus & ~mask) | (val & mask);
672 if (xl > MXL_RV32) {
673 /* SXL field is for now read only */
674 mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
676 env->mstatus = mstatus;
677 env->xl = cpu_recompute_xl(env);
679 return RISCV_EXCP_NONE;
682 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
683 target_ulong *val)
685 *val = env->mstatus >> 32;
686 return RISCV_EXCP_NONE;
689 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
690 target_ulong val)
692 uint64_t valh = (uint64_t)val << 32;
693 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
695 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) {
696 tlb_flush(env_cpu(env));
699 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
701 return RISCV_EXCP_NONE;
704 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
705 Int128 *val)
707 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
708 return RISCV_EXCP_NONE;
711 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
712 Int128 *val)
714 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
715 return RISCV_EXCP_NONE;
718 static RISCVException read_misa(CPURISCVState *env, int csrno,
719 target_ulong *val)
721 target_ulong misa;
723 switch (env->misa_mxl) {
724 case MXL_RV32:
725 misa = (target_ulong)MXL_RV32 << 30;
726 break;
727 #ifdef TARGET_RISCV64
728 case MXL_RV64:
729 misa = (target_ulong)MXL_RV64 << 62;
730 break;
731 #endif
732 default:
733 g_assert_not_reached();
736 *val = misa | env->misa_ext;
737 return RISCV_EXCP_NONE;
740 static RISCVException write_misa(CPURISCVState *env, int csrno,
741 target_ulong val)
743 if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
744 /* drop write to misa */
745 return RISCV_EXCP_NONE;
748 /* 'I' or 'E' must be present */
749 if (!(val & (RVI | RVE))) {
750 /* It is not, drop write to misa */
751 return RISCV_EXCP_NONE;
754 /* 'E' excludes all other extensions */
755 if (val & RVE) {
756 /* when we support 'E' we can do "val = RVE;" however
757 * for now we just drop writes if 'E' is present.
759 return RISCV_EXCP_NONE;
763 * misa.MXL writes are not supported by QEMU.
764 * Drop writes to those bits.
767 /* Mask extensions that are not supported by this hart */
768 val &= env->misa_ext_mask;
770 /* Mask extensions that are not supported by QEMU */
771 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
773 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
774 if ((val & RVD) && !(val & RVF)) {
775 val &= ~RVD;
778 /* Suppress 'C' if next instruction is not aligned
779 * TODO: this should check next_pc
781 if ((val & RVC) && (GETPC() & ~3) != 0) {
782 val &= ~RVC;
785 /* If nothing changed, do nothing. */
786 if (val == env->misa_ext) {
787 return RISCV_EXCP_NONE;
790 /* flush translation cache */
791 tb_flush(env_cpu(env));
792 env->misa_ext = val;
793 env->xl = riscv_cpu_mxl(env);
794 return RISCV_EXCP_NONE;
797 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
798 target_ulong *val)
800 *val = env->medeleg;
801 return RISCV_EXCP_NONE;
804 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
805 target_ulong val)
807 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
808 return RISCV_EXCP_NONE;
811 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
812 uint64_t *ret_val,
813 uint64_t new_val, uint64_t wr_mask)
815 uint64_t mask = wr_mask & delegable_ints;
817 if (ret_val) {
818 *ret_val = env->mideleg;
821 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
823 if (riscv_has_ext(env, RVH)) {
824 env->mideleg |= HS_MODE_INTERRUPTS;
827 return RISCV_EXCP_NONE;
830 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
831 target_ulong *ret_val,
832 target_ulong new_val, target_ulong wr_mask)
834 uint64_t rval;
835 RISCVException ret;
837 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
838 if (ret_val) {
839 *ret_val = rval;
842 return ret;
845 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
846 target_ulong *ret_val,
847 target_ulong new_val,
848 target_ulong wr_mask)
850 uint64_t rval;
851 RISCVException ret;
853 ret = rmw_mideleg64(env, csrno, &rval,
854 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
855 if (ret_val) {
856 *ret_val = rval >> 32;
859 return ret;
862 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
863 uint64_t *ret_val,
864 uint64_t new_val, uint64_t wr_mask)
866 uint64_t mask = wr_mask & all_ints;
868 if (ret_val) {
869 *ret_val = env->mie;
872 env->mie = (env->mie & ~mask) | (new_val & mask);
874 if (!riscv_has_ext(env, RVH)) {
875 env->mie &= ~((uint64_t)MIP_SGEIP);
878 return RISCV_EXCP_NONE;
881 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
882 target_ulong *ret_val,
883 target_ulong new_val, target_ulong wr_mask)
885 uint64_t rval;
886 RISCVException ret;
888 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
889 if (ret_val) {
890 *ret_val = rval;
893 return ret;
896 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
897 target_ulong *ret_val,
898 target_ulong new_val, target_ulong wr_mask)
900 uint64_t rval;
901 RISCVException ret;
903 ret = rmw_mie64(env, csrno, &rval,
904 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
905 if (ret_val) {
906 *ret_val = rval >> 32;
909 return ret;
912 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
914 int irq;
915 uint8_t iprio;
917 irq = riscv_cpu_mirq_pending(env);
918 if (irq <= 0 || irq > 63) {
919 *val = 0;
920 } else {
921 iprio = env->miprio[irq];
922 if (!iprio) {
923 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
924 iprio = IPRIO_MMAXIPRIO;
927 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
928 *val |= iprio;
931 return RISCV_EXCP_NONE;
934 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
936 if (!riscv_cpu_virt_enabled(env)) {
937 return csrno;
940 switch (csrno) {
941 case CSR_SISELECT:
942 return CSR_VSISELECT;
943 case CSR_SIREG:
944 return CSR_VSIREG;
945 case CSR_SSETEIPNUM:
946 return CSR_VSSETEIPNUM;
947 case CSR_SCLREIPNUM:
948 return CSR_VSCLREIPNUM;
949 case CSR_SSETEIENUM:
950 return CSR_VSSETEIENUM;
951 case CSR_SCLREIENUM:
952 return CSR_VSCLREIENUM;
953 case CSR_STOPEI:
954 return CSR_VSTOPEI;
955 default:
956 return csrno;
960 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
961 target_ulong new_val, target_ulong wr_mask)
963 target_ulong *iselect;
965 /* Translate CSR number for VS-mode */
966 csrno = aia_xlate_vs_csrno(env, csrno);
968 /* Find the iselect CSR based on CSR number */
969 switch (csrno) {
970 case CSR_MISELECT:
971 iselect = &env->miselect;
972 break;
973 case CSR_SISELECT:
974 iselect = &env->siselect;
975 break;
976 case CSR_VSISELECT:
977 iselect = &env->vsiselect;
978 break;
979 default:
980 return RISCV_EXCP_ILLEGAL_INST;
983 if (val) {
984 *val = *iselect;
987 wr_mask &= ISELECT_MASK;
988 if (wr_mask) {
989 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
992 return RISCV_EXCP_NONE;
995 static int rmw_iprio(target_ulong xlen,
996 target_ulong iselect, uint8_t *iprio,
997 target_ulong *val, target_ulong new_val,
998 target_ulong wr_mask, int ext_irq_no)
1000 int i, firq, nirqs;
1001 target_ulong old_val;
1003 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1004 return -EINVAL;
1006 if (xlen != 32 && iselect & 0x1) {
1007 return -EINVAL;
1010 nirqs = 4 * (xlen / 32);
1011 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1013 old_val = 0;
1014 for (i = 0; i < nirqs; i++) {
1015 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1018 if (val) {
1019 *val = old_val;
1022 if (wr_mask) {
1023 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1024 for (i = 0; i < nirqs; i++) {
1026 * M-level and S-level external IRQ priority always read-only
1027 * zero. This means default priority order is always preferred
1028 * for M-level and S-level external IRQs.
1030 if ((firq + i) == ext_irq_no) {
1031 continue;
1033 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
1037 return 0;
1040 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
1041 target_ulong new_val, target_ulong wr_mask)
1043 bool virt;
1044 uint8_t *iprio;
1045 int ret = -EINVAL;
1046 target_ulong priv, isel, vgein;
1048 /* Translate CSR number for VS-mode */
1049 csrno = aia_xlate_vs_csrno(env, csrno);
1051 /* Decode register details from CSR number */
1052 virt = false;
1053 switch (csrno) {
1054 case CSR_MIREG:
1055 iprio = env->miprio;
1056 isel = env->miselect;
1057 priv = PRV_M;
1058 break;
1059 case CSR_SIREG:
1060 iprio = env->siprio;
1061 isel = env->siselect;
1062 priv = PRV_S;
1063 break;
1064 case CSR_VSIREG:
1065 iprio = env->hviprio;
1066 isel = env->vsiselect;
1067 priv = PRV_S;
1068 virt = true;
1069 break;
1070 default:
1071 goto done;
1074 /* Find the selected guest interrupt file */
1075 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1077 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
1078 /* Local interrupt priority registers not available for VS-mode */
1079 if (!virt) {
1080 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
1081 isel, iprio, val, new_val, wr_mask,
1082 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
1084 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
1085 /* IMSIC registers only available when machine implements it. */
1086 if (env->aia_ireg_rmw_fn[priv]) {
1087 /* Selected guest interrupt file should not be zero */
1088 if (virt && (!vgein || env->geilen < vgein)) {
1089 goto done;
1091 /* Call machine specific IMSIC register emulation */
1092 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1093 AIA_MAKE_IREG(isel, priv, virt, vgein,
1094 riscv_cpu_mxl_bits(env)),
1095 val, new_val, wr_mask);
1099 done:
1100 if (ret) {
1101 return (riscv_cpu_virt_enabled(env) && virt) ?
1102 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1104 return RISCV_EXCP_NONE;
1107 static int rmw_xsetclreinum(CPURISCVState *env, int csrno, target_ulong *val,
1108 target_ulong new_val, target_ulong wr_mask)
1110 int ret = -EINVAL;
1111 bool set, pend, virt;
1112 target_ulong priv, isel, vgein, xlen, nval, wmask;
1114 /* Translate CSR number for VS-mode */
1115 csrno = aia_xlate_vs_csrno(env, csrno);
1117 /* Decode register details from CSR number */
1118 virt = set = pend = false;
1119 switch (csrno) {
1120 case CSR_MSETEIPNUM:
1121 priv = PRV_M;
1122 set = true;
1123 pend = true;
1124 break;
1125 case CSR_MCLREIPNUM:
1126 priv = PRV_M;
1127 pend = true;
1128 break;
1129 case CSR_MSETEIENUM:
1130 priv = PRV_M;
1131 set = true;
1132 break;
1133 case CSR_MCLREIENUM:
1134 priv = PRV_M;
1135 break;
1136 case CSR_SSETEIPNUM:
1137 priv = PRV_S;
1138 set = true;
1139 pend = true;
1140 break;
1141 case CSR_SCLREIPNUM:
1142 priv = PRV_S;
1143 pend = true;
1144 break;
1145 case CSR_SSETEIENUM:
1146 priv = PRV_S;
1147 set = true;
1148 break;
1149 case CSR_SCLREIENUM:
1150 priv = PRV_S;
1151 break;
1152 case CSR_VSSETEIPNUM:
1153 priv = PRV_S;
1154 virt = true;
1155 set = true;
1156 pend = true;
1157 break;
1158 case CSR_VSCLREIPNUM:
1159 priv = PRV_S;
1160 virt = true;
1161 pend = true;
1162 break;
1163 case CSR_VSSETEIENUM:
1164 priv = PRV_S;
1165 virt = true;
1166 set = true;
1167 break;
1168 case CSR_VSCLREIENUM:
1169 priv = PRV_S;
1170 virt = true;
1171 break;
1172 default:
1173 goto done;
1176 /* IMSIC CSRs only available when machine implements IMSIC. */
1177 if (!env->aia_ireg_rmw_fn[priv]) {
1178 goto done;
1181 /* Find the selected guest interrupt file */
1182 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1184 /* Selected guest interrupt file should be valid */
1185 if (virt && (!vgein || env->geilen < vgein)) {
1186 goto done;
1189 /* Set/Clear CSRs always read zero */
1190 if (val) {
1191 *val = 0;
1194 if (wr_mask) {
1195 /* Get interrupt number */
1196 new_val &= wr_mask;
1198 /* Find target interrupt pending/enable register */
1199 xlen = riscv_cpu_mxl_bits(env);
1200 isel = (new_val / xlen);
1201 isel *= (xlen / IMSIC_EIPx_BITS);
1202 isel += (pend) ? ISELECT_IMSIC_EIP0 : ISELECT_IMSIC_EIE0;
1204 /* Find the interrupt bit to be set/clear */
1205 wmask = ((target_ulong)1) << (new_val % xlen);
1206 nval = (set) ? wmask : 0;
1208 /* Call machine specific IMSIC register emulation */
1209 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1210 AIA_MAKE_IREG(isel, priv, virt,
1211 vgein, xlen),
1212 NULL, nval, wmask);
1213 } else {
1214 ret = 0;
1217 done:
1218 if (ret) {
1219 return (riscv_cpu_virt_enabled(env) && virt) ?
1220 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1222 return RISCV_EXCP_NONE;
1225 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
1226 target_ulong new_val, target_ulong wr_mask)
1228 bool virt;
1229 int ret = -EINVAL;
1230 target_ulong priv, vgein;
1232 /* Translate CSR number for VS-mode */
1233 csrno = aia_xlate_vs_csrno(env, csrno);
1235 /* Decode register details from CSR number */
1236 virt = false;
1237 switch (csrno) {
1238 case CSR_MTOPEI:
1239 priv = PRV_M;
1240 break;
1241 case CSR_STOPEI:
1242 priv = PRV_S;
1243 break;
1244 case CSR_VSTOPEI:
1245 priv = PRV_S;
1246 virt = true;
1247 break;
1248 default:
1249 goto done;
1252 /* IMSIC CSRs only available when machine implements IMSIC. */
1253 if (!env->aia_ireg_rmw_fn[priv]) {
1254 goto done;
1257 /* Find the selected guest interrupt file */
1258 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1260 /* Selected guest interrupt file should be valid */
1261 if (virt && (!vgein || env->geilen < vgein)) {
1262 goto done;
1265 /* Call machine specific IMSIC register emulation for TOPEI */
1266 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1267 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
1268 riscv_cpu_mxl_bits(env)),
1269 val, new_val, wr_mask);
1271 done:
1272 if (ret) {
1273 return (riscv_cpu_virt_enabled(env) && virt) ?
1274 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1276 return RISCV_EXCP_NONE;
1279 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
1280 target_ulong *val)
1282 *val = env->mtvec;
1283 return RISCV_EXCP_NONE;
1286 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
1287 target_ulong val)
1289 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1290 if ((val & 3) < 2) {
1291 env->mtvec = val;
1292 } else {
1293 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
1295 return RISCV_EXCP_NONE;
1298 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
1299 target_ulong *val)
1301 *val = env->mcounteren;
1302 return RISCV_EXCP_NONE;
1305 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
1306 target_ulong val)
1308 env->mcounteren = val;
1309 return RISCV_EXCP_NONE;
1312 /* Machine Trap Handling */
1313 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
1314 Int128 *val)
1316 *val = int128_make128(env->mscratch, env->mscratchh);
1317 return RISCV_EXCP_NONE;
1320 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
1321 Int128 val)
1323 env->mscratch = int128_getlo(val);
1324 env->mscratchh = int128_gethi(val);
1325 return RISCV_EXCP_NONE;
1328 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
1329 target_ulong *val)
1331 *val = env->mscratch;
1332 return RISCV_EXCP_NONE;
1335 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
1336 target_ulong val)
1338 env->mscratch = val;
1339 return RISCV_EXCP_NONE;
1342 static RISCVException read_mepc(CPURISCVState *env, int csrno,
1343 target_ulong *val)
1345 *val = env->mepc;
1346 return RISCV_EXCP_NONE;
1349 static RISCVException write_mepc(CPURISCVState *env, int csrno,
1350 target_ulong val)
1352 env->mepc = val;
1353 return RISCV_EXCP_NONE;
1356 static RISCVException read_mcause(CPURISCVState *env, int csrno,
1357 target_ulong *val)
1359 *val = env->mcause;
1360 return RISCV_EXCP_NONE;
1363 static RISCVException write_mcause(CPURISCVState *env, int csrno,
1364 target_ulong val)
1366 env->mcause = val;
1367 return RISCV_EXCP_NONE;
1370 static RISCVException read_mtval(CPURISCVState *env, int csrno,
1371 target_ulong *val)
1373 *val = env->mtval;
1374 return RISCV_EXCP_NONE;
1377 static RISCVException write_mtval(CPURISCVState *env, int csrno,
1378 target_ulong val)
1380 env->mtval = val;
1381 return RISCV_EXCP_NONE;
1384 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
1385 uint64_t *ret_val,
1386 uint64_t new_val, uint64_t wr_mask)
1388 RISCVCPU *cpu = env_archcpu(env);
1389 /* Allow software control of delegable interrupts not claimed by hardware */
1390 uint64_t old_mip, mask = wr_mask & delegable_ints & ~env->miclaim;
1391 uint32_t gin;
1393 if (mask) {
1394 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask));
1395 } else {
1396 old_mip = env->mip;
1399 if (csrno != CSR_HVIP) {
1400 gin = get_field(env->hstatus, HSTATUS_VGEIN);
1401 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
1404 if (ret_val) {
1405 *ret_val = old_mip;
1408 return RISCV_EXCP_NONE;
1411 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
1412 target_ulong *ret_val,
1413 target_ulong new_val, target_ulong wr_mask)
1415 uint64_t rval;
1416 RISCVException ret;
1418 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
1419 if (ret_val) {
1420 *ret_val = rval;
1423 return ret;
1426 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
1427 target_ulong *ret_val,
1428 target_ulong new_val, target_ulong wr_mask)
1430 uint64_t rval;
1431 RISCVException ret;
1433 ret = rmw_mip64(env, csrno, &rval,
1434 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1435 if (ret_val) {
1436 *ret_val = rval >> 32;
1439 return ret;
1442 /* Supervisor Trap Setup */
1443 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
1444 Int128 *val)
1446 uint64_t mask = sstatus_v1_10_mask;
1447 uint64_t sstatus = env->mstatus & mask;
1448 if (env->xl != MXL_RV32 || env->debugger) {
1449 mask |= SSTATUS64_UXL;
1452 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
1453 return RISCV_EXCP_NONE;
1456 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
1457 target_ulong *val)
1459 target_ulong mask = (sstatus_v1_10_mask);
1460 if (env->xl != MXL_RV32 || env->debugger) {
1461 mask |= SSTATUS64_UXL;
1463 /* TODO: Use SXL not MXL. */
1464 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
1465 return RISCV_EXCP_NONE;
1468 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
1469 target_ulong val)
1471 target_ulong mask = (sstatus_v1_10_mask);
1473 if (env->xl != MXL_RV32 || env->debugger) {
1474 if ((val & SSTATUS64_UXL) != 0) {
1475 mask |= SSTATUS64_UXL;
1478 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
1479 return write_mstatus(env, CSR_MSTATUS, newval);
1482 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
1483 uint64_t *ret_val,
1484 uint64_t new_val, uint64_t wr_mask)
1486 RISCVException ret;
1487 uint64_t rval, vsbits, mask = env->hideleg & VS_MODE_INTERRUPTS;
1489 /* Bring VS-level bits to correct position */
1490 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
1491 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
1492 new_val |= vsbits << 1;
1493 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
1494 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
1495 wr_mask |= vsbits << 1;
1497 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
1498 if (ret_val) {
1499 rval &= mask;
1500 vsbits = rval & VS_MODE_INTERRUPTS;
1501 rval &= ~VS_MODE_INTERRUPTS;
1502 *ret_val = rval | (vsbits >> 1);
1505 return ret;
1508 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
1509 target_ulong *ret_val,
1510 target_ulong new_val, target_ulong wr_mask)
1512 uint64_t rval;
1513 RISCVException ret;
1515 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
1516 if (ret_val) {
1517 *ret_val = rval;
1520 return ret;
1523 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
1524 target_ulong *ret_val,
1525 target_ulong new_val, target_ulong wr_mask)
1527 uint64_t rval;
1528 RISCVException ret;
1530 ret = rmw_vsie64(env, csrno, &rval,
1531 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1532 if (ret_val) {
1533 *ret_val = rval >> 32;
1536 return ret;
1539 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
1540 uint64_t *ret_val,
1541 uint64_t new_val, uint64_t wr_mask)
1543 RISCVException ret;
1544 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
1546 if (riscv_cpu_virt_enabled(env)) {
1547 if (env->hvictl & HVICTL_VTI) {
1548 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1550 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
1551 } else {
1552 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
1555 if (ret_val) {
1556 *ret_val &= mask;
1559 return ret;
1562 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
1563 target_ulong *ret_val,
1564 target_ulong new_val, target_ulong wr_mask)
1566 uint64_t rval;
1567 RISCVException ret;
1569 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
1570 if (ret == RISCV_EXCP_NONE && ret_val) {
1571 *ret_val = rval;
1574 return ret;
1577 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
1578 target_ulong *ret_val,
1579 target_ulong new_val, target_ulong wr_mask)
1581 uint64_t rval;
1582 RISCVException ret;
1584 ret = rmw_sie64(env, csrno, &rval,
1585 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1586 if (ret_val) {
1587 *ret_val = rval >> 32;
1590 return ret;
1593 static RISCVException read_stvec(CPURISCVState *env, int csrno,
1594 target_ulong *val)
1596 *val = env->stvec;
1597 return RISCV_EXCP_NONE;
1600 static RISCVException write_stvec(CPURISCVState *env, int csrno,
1601 target_ulong val)
1603 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1604 if ((val & 3) < 2) {
1605 env->stvec = val;
1606 } else {
1607 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
1609 return RISCV_EXCP_NONE;
1612 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
1613 target_ulong *val)
1615 *val = env->scounteren;
1616 return RISCV_EXCP_NONE;
1619 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
1620 target_ulong val)
1622 env->scounteren = val;
1623 return RISCV_EXCP_NONE;
1626 /* Supervisor Trap Handling */
1627 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
1628 Int128 *val)
1630 *val = int128_make128(env->sscratch, env->sscratchh);
1631 return RISCV_EXCP_NONE;
1634 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
1635 Int128 val)
1637 env->sscratch = int128_getlo(val);
1638 env->sscratchh = int128_gethi(val);
1639 return RISCV_EXCP_NONE;
1642 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
1643 target_ulong *val)
1645 *val = env->sscratch;
1646 return RISCV_EXCP_NONE;
1649 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
1650 target_ulong val)
1652 env->sscratch = val;
1653 return RISCV_EXCP_NONE;
1656 static RISCVException read_sepc(CPURISCVState *env, int csrno,
1657 target_ulong *val)
1659 *val = env->sepc;
1660 return RISCV_EXCP_NONE;
1663 static RISCVException write_sepc(CPURISCVState *env, int csrno,
1664 target_ulong val)
1666 env->sepc = val;
1667 return RISCV_EXCP_NONE;
1670 static RISCVException read_scause(CPURISCVState *env, int csrno,
1671 target_ulong *val)
1673 *val = env->scause;
1674 return RISCV_EXCP_NONE;
1677 static RISCVException write_scause(CPURISCVState *env, int csrno,
1678 target_ulong val)
1680 env->scause = val;
1681 return RISCV_EXCP_NONE;
1684 static RISCVException read_stval(CPURISCVState *env, int csrno,
1685 target_ulong *val)
1687 *val = env->stval;
1688 return RISCV_EXCP_NONE;
1691 static RISCVException write_stval(CPURISCVState *env, int csrno,
1692 target_ulong val)
1694 env->stval = val;
1695 return RISCV_EXCP_NONE;
1698 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
1699 uint64_t *ret_val,
1700 uint64_t new_val, uint64_t wr_mask)
1702 RISCVException ret;
1703 uint64_t rval, vsbits, mask = env->hideleg & vsip_writable_mask;
1705 /* Bring VS-level bits to correct position */
1706 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
1707 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
1708 new_val |= vsbits << 1;
1709 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
1710 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
1711 wr_mask |= vsbits << 1;
1713 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask & mask);
1714 if (ret_val) {
1715 rval &= mask;
1716 vsbits = rval & VS_MODE_INTERRUPTS;
1717 rval &= ~VS_MODE_INTERRUPTS;
1718 *ret_val = rval | (vsbits >> 1);
1721 return ret;
1724 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
1725 target_ulong *ret_val,
1726 target_ulong new_val, target_ulong wr_mask)
1728 uint64_t rval;
1729 RISCVException ret;
1731 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
1732 if (ret_val) {
1733 *ret_val = rval;
1736 return ret;
1739 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
1740 target_ulong *ret_val,
1741 target_ulong new_val, target_ulong wr_mask)
1743 uint64_t rval;
1744 RISCVException ret;
1746 ret = rmw_vsip64(env, csrno, &rval,
1747 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1748 if (ret_val) {
1749 *ret_val = rval >> 32;
1752 return ret;
1755 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
1756 uint64_t *ret_val,
1757 uint64_t new_val, uint64_t wr_mask)
1759 RISCVException ret;
1760 uint64_t mask = env->mideleg & sip_writable_mask;
1762 if (riscv_cpu_virt_enabled(env)) {
1763 if (env->hvictl & HVICTL_VTI) {
1764 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1766 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
1767 } else {
1768 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
1771 if (ret_val) {
1772 *ret_val &= env->mideleg & S_MODE_INTERRUPTS;
1775 return ret;
1778 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
1779 target_ulong *ret_val,
1780 target_ulong new_val, target_ulong wr_mask)
1782 uint64_t rval;
1783 RISCVException ret;
1785 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
1786 if (ret_val) {
1787 *ret_val = rval;
1790 return ret;
1793 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
1794 target_ulong *ret_val,
1795 target_ulong new_val, target_ulong wr_mask)
1797 uint64_t rval;
1798 RISCVException ret;
1800 ret = rmw_sip64(env, csrno, &rval,
1801 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1802 if (ret_val) {
1803 *ret_val = rval >> 32;
1806 return ret;
1809 /* Supervisor Protection and Translation */
1810 static RISCVException read_satp(CPURISCVState *env, int csrno,
1811 target_ulong *val)
1813 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
1814 *val = 0;
1815 return RISCV_EXCP_NONE;
1818 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
1819 return RISCV_EXCP_ILLEGAL_INST;
1820 } else {
1821 *val = env->satp;
1824 return RISCV_EXCP_NONE;
1827 static RISCVException write_satp(CPURISCVState *env, int csrno,
1828 target_ulong val)
1830 target_ulong vm, mask, asid;
1832 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
1833 return RISCV_EXCP_NONE;
1836 if (riscv_cpu_mxl(env) == MXL_RV32) {
1837 vm = validate_vm(env, get_field(val, SATP32_MODE));
1838 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1839 asid = (val ^ env->satp) & SATP32_ASID;
1840 } else {
1841 vm = validate_vm(env, get_field(val, SATP64_MODE));
1842 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1843 asid = (val ^ env->satp) & SATP64_ASID;
1846 if (vm && mask) {
1847 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
1848 return RISCV_EXCP_ILLEGAL_INST;
1849 } else {
1850 if (asid) {
1851 tlb_flush(env_cpu(env));
1853 env->satp = val;
1856 return RISCV_EXCP_NONE;
1859 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
1861 int irq, ret;
1862 target_ulong topei;
1863 uint64_t vseip, vsgein;
1864 uint32_t iid, iprio, hviid, hviprio, gein;
1865 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
1867 gein = get_field(env->hstatus, HSTATUS_VGEIN);
1868 hviid = get_field(env->hvictl, HVICTL_IID);
1869 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
1871 if (gein) {
1872 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
1873 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
1874 if (gein <= env->geilen && vseip) {
1875 siid[scount] = IRQ_S_EXT;
1876 siprio[scount] = IPRIO_MMAXIPRIO + 1;
1877 if (env->aia_ireg_rmw_fn[PRV_S]) {
1879 * Call machine specific IMSIC register emulation for
1880 * reading TOPEI.
1882 ret = env->aia_ireg_rmw_fn[PRV_S](
1883 env->aia_ireg_rmw_fn_arg[PRV_S],
1884 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
1885 riscv_cpu_mxl_bits(env)),
1886 &topei, 0, 0);
1887 if (!ret && topei) {
1888 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
1891 scount++;
1893 } else {
1894 if (hviid == IRQ_S_EXT && hviprio) {
1895 siid[scount] = IRQ_S_EXT;
1896 siprio[scount] = hviprio;
1897 scount++;
1901 if (env->hvictl & HVICTL_VTI) {
1902 if (hviid != IRQ_S_EXT) {
1903 siid[scount] = hviid;
1904 siprio[scount] = hviprio;
1905 scount++;
1907 } else {
1908 irq = riscv_cpu_vsirq_pending(env);
1909 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
1910 siid[scount] = irq;
1911 siprio[scount] = env->hviprio[irq];
1912 scount++;
1916 iid = 0;
1917 iprio = UINT_MAX;
1918 for (s = 0; s < scount; s++) {
1919 if (siprio[s] < iprio) {
1920 iid = siid[s];
1921 iprio = siprio[s];
1925 if (iid) {
1926 if (env->hvictl & HVICTL_IPRIOM) {
1927 if (iprio > IPRIO_MMAXIPRIO) {
1928 iprio = IPRIO_MMAXIPRIO;
1930 if (!iprio) {
1931 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
1932 iprio = IPRIO_MMAXIPRIO;
1935 } else {
1936 iprio = 1;
1938 } else {
1939 iprio = 0;
1942 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1943 *val |= iprio;
1944 return RISCV_EXCP_NONE;
1947 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
1949 int irq;
1950 uint8_t iprio;
1952 if (riscv_cpu_virt_enabled(env)) {
1953 return read_vstopi(env, CSR_VSTOPI, val);
1956 irq = riscv_cpu_sirq_pending(env);
1957 if (irq <= 0 || irq > 63) {
1958 *val = 0;
1959 } else {
1960 iprio = env->siprio[irq];
1961 if (!iprio) {
1962 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
1963 iprio = IPRIO_MMAXIPRIO;
1966 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1967 *val |= iprio;
1970 return RISCV_EXCP_NONE;
1973 /* Hypervisor Extensions */
1974 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
1975 target_ulong *val)
1977 *val = env->hstatus;
1978 if (riscv_cpu_mxl(env) != MXL_RV32) {
1979 /* We only support 64-bit VSXL */
1980 *val = set_field(*val, HSTATUS_VSXL, 2);
1982 /* We only support little endian */
1983 *val = set_field(*val, HSTATUS_VSBE, 0);
1984 return RISCV_EXCP_NONE;
1987 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
1988 target_ulong val)
1990 env->hstatus = val;
1991 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
1992 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options.");
1994 if (get_field(val, HSTATUS_VSBE) != 0) {
1995 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
1997 return RISCV_EXCP_NONE;
2000 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
2001 target_ulong *val)
2003 *val = env->hedeleg;
2004 return RISCV_EXCP_NONE;
2007 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
2008 target_ulong val)
2010 env->hedeleg = val & vs_delegable_excps;
2011 return RISCV_EXCP_NONE;
2014 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
2015 uint64_t *ret_val,
2016 uint64_t new_val, uint64_t wr_mask)
2018 uint64_t mask = wr_mask & vs_delegable_ints;
2020 if (ret_val) {
2021 *ret_val = env->hideleg & vs_delegable_ints;
2024 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
2025 return RISCV_EXCP_NONE;
2028 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
2029 target_ulong *ret_val,
2030 target_ulong new_val, target_ulong wr_mask)
2032 uint64_t rval;
2033 RISCVException ret;
2035 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
2036 if (ret_val) {
2037 *ret_val = rval;
2040 return ret;
2043 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
2044 target_ulong *ret_val,
2045 target_ulong new_val, target_ulong wr_mask)
2047 uint64_t rval;
2048 RISCVException ret;
2050 ret = rmw_hideleg64(env, csrno, &rval,
2051 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2052 if (ret_val) {
2053 *ret_val = rval >> 32;
2056 return ret;
2059 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
2060 uint64_t *ret_val,
2061 uint64_t new_val, uint64_t wr_mask)
2063 RISCVException ret;
2065 ret = rmw_mip64(env, csrno, ret_val, new_val,
2066 wr_mask & hvip_writable_mask);
2067 if (ret_val) {
2068 *ret_val &= VS_MODE_INTERRUPTS;
2071 return ret;
2074 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
2075 target_ulong *ret_val,
2076 target_ulong new_val, target_ulong wr_mask)
2078 uint64_t rval;
2079 RISCVException ret;
2081 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
2082 if (ret_val) {
2083 *ret_val = rval;
2086 return ret;
2089 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
2090 target_ulong *ret_val,
2091 target_ulong new_val, target_ulong wr_mask)
2093 uint64_t rval;
2094 RISCVException ret;
2096 ret = rmw_hvip64(env, csrno, &rval,
2097 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2098 if (ret_val) {
2099 *ret_val = rval >> 32;
2102 return ret;
2105 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
2106 target_ulong *ret_value,
2107 target_ulong new_value, target_ulong write_mask)
2109 int ret = rmw_mip(env, csrno, ret_value, new_value,
2110 write_mask & hip_writable_mask);
2112 if (ret_value) {
2113 *ret_value &= HS_MODE_INTERRUPTS;
2115 return ret;
2118 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
2119 target_ulong *ret_val,
2120 target_ulong new_val, target_ulong wr_mask)
2122 uint64_t rval;
2123 RISCVException ret;
2125 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
2126 if (ret_val) {
2127 *ret_val = rval & HS_MODE_INTERRUPTS;
2130 return ret;
2133 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
2134 target_ulong *val)
2136 *val = env->hcounteren;
2137 return RISCV_EXCP_NONE;
2140 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
2141 target_ulong val)
2143 env->hcounteren = val;
2144 return RISCV_EXCP_NONE;
2147 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
2148 target_ulong *val)
2150 if (val) {
2151 *val = env->hgeie;
2153 return RISCV_EXCP_NONE;
2156 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
2157 target_ulong val)
2159 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2160 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
2161 env->hgeie = val;
2162 /* Update mip.SGEIP bit */
2163 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP,
2164 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
2165 return RISCV_EXCP_NONE;
2168 static RISCVException read_htval(CPURISCVState *env, int csrno,
2169 target_ulong *val)
2171 *val = env->htval;
2172 return RISCV_EXCP_NONE;
2175 static RISCVException write_htval(CPURISCVState *env, int csrno,
2176 target_ulong val)
2178 env->htval = val;
2179 return RISCV_EXCP_NONE;
2182 static RISCVException read_htinst(CPURISCVState *env, int csrno,
2183 target_ulong *val)
2185 *val = env->htinst;
2186 return RISCV_EXCP_NONE;
2189 static RISCVException write_htinst(CPURISCVState *env, int csrno,
2190 target_ulong val)
2192 return RISCV_EXCP_NONE;
2195 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
2196 target_ulong *val)
2198 if (val) {
2199 *val = env->hgeip;
2201 return RISCV_EXCP_NONE;
2204 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
2205 target_ulong *val)
2207 *val = env->hgatp;
2208 return RISCV_EXCP_NONE;
2211 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
2212 target_ulong val)
2214 env->hgatp = val;
2215 return RISCV_EXCP_NONE;
2218 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
2219 target_ulong *val)
2221 if (!env->rdtime_fn) {
2222 return RISCV_EXCP_ILLEGAL_INST;
2225 *val = env->htimedelta;
2226 return RISCV_EXCP_NONE;
2229 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
2230 target_ulong val)
2232 if (!env->rdtime_fn) {
2233 return RISCV_EXCP_ILLEGAL_INST;
2236 if (riscv_cpu_mxl(env) == MXL_RV32) {
2237 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
2238 } else {
2239 env->htimedelta = val;
2241 return RISCV_EXCP_NONE;
2244 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
2245 target_ulong *val)
2247 if (!env->rdtime_fn) {
2248 return RISCV_EXCP_ILLEGAL_INST;
2251 *val = env->htimedelta >> 32;
2252 return RISCV_EXCP_NONE;
2255 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
2256 target_ulong val)
2258 if (!env->rdtime_fn) {
2259 return RISCV_EXCP_ILLEGAL_INST;
2262 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
2263 return RISCV_EXCP_NONE;
2266 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
2268 *val = env->hvictl;
2269 return RISCV_EXCP_NONE;
2272 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
2274 env->hvictl = val & HVICTL_VALID_MASK;
2275 return RISCV_EXCP_NONE;
2278 static int read_hvipriox(CPURISCVState *env, int first_index,
2279 uint8_t *iprio, target_ulong *val)
2281 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
2283 /* First index has to be a multiple of number of irqs per register */
2284 if (first_index % num_irqs) {
2285 return (riscv_cpu_virt_enabled(env)) ?
2286 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2289 /* Fill-up return value */
2290 *val = 0;
2291 for (i = 0; i < num_irqs; i++) {
2292 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
2293 continue;
2295 if (rdzero) {
2296 continue;
2298 *val |= ((target_ulong)iprio[irq]) << (i * 8);
2301 return RISCV_EXCP_NONE;
2304 static int write_hvipriox(CPURISCVState *env, int first_index,
2305 uint8_t *iprio, target_ulong val)
2307 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
2309 /* First index has to be a multiple of number of irqs per register */
2310 if (first_index % num_irqs) {
2311 return (riscv_cpu_virt_enabled(env)) ?
2312 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2315 /* Fill-up priority arrary */
2316 for (i = 0; i < num_irqs; i++) {
2317 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
2318 continue;
2320 if (rdzero) {
2321 iprio[irq] = 0;
2322 } else {
2323 iprio[irq] = (val >> (i * 8)) & 0xff;
2327 return RISCV_EXCP_NONE;
2330 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
2332 return read_hvipriox(env, 0, env->hviprio, val);
2335 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
2337 return write_hvipriox(env, 0, env->hviprio, val);
2340 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
2342 return read_hvipriox(env, 4, env->hviprio, val);
2345 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
2347 return write_hvipriox(env, 4, env->hviprio, val);
2350 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
2352 return read_hvipriox(env, 8, env->hviprio, val);
2355 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
2357 return write_hvipriox(env, 8, env->hviprio, val);
2360 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
2362 return read_hvipriox(env, 12, env->hviprio, val);
2365 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
2367 return write_hvipriox(env, 12, env->hviprio, val);
2370 /* Virtual CSR Registers */
2371 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
2372 target_ulong *val)
2374 *val = env->vsstatus;
2375 return RISCV_EXCP_NONE;
2378 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
2379 target_ulong val)
2381 uint64_t mask = (target_ulong)-1;
2382 if ((val & VSSTATUS64_UXL) == 0) {
2383 mask &= ~VSSTATUS64_UXL;
2385 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
2386 return RISCV_EXCP_NONE;
2389 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
2391 *val = env->vstvec;
2392 return RISCV_EXCP_NONE;
2395 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
2396 target_ulong val)
2398 env->vstvec = val;
2399 return RISCV_EXCP_NONE;
2402 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
2403 target_ulong *val)
2405 *val = env->vsscratch;
2406 return RISCV_EXCP_NONE;
2409 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
2410 target_ulong val)
2412 env->vsscratch = val;
2413 return RISCV_EXCP_NONE;
2416 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
2417 target_ulong *val)
2419 *val = env->vsepc;
2420 return RISCV_EXCP_NONE;
2423 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
2424 target_ulong val)
2426 env->vsepc = val;
2427 return RISCV_EXCP_NONE;
2430 static RISCVException read_vscause(CPURISCVState *env, int csrno,
2431 target_ulong *val)
2433 *val = env->vscause;
2434 return RISCV_EXCP_NONE;
2437 static RISCVException write_vscause(CPURISCVState *env, int csrno,
2438 target_ulong val)
2440 env->vscause = val;
2441 return RISCV_EXCP_NONE;
2444 static RISCVException read_vstval(CPURISCVState *env, int csrno,
2445 target_ulong *val)
2447 *val = env->vstval;
2448 return RISCV_EXCP_NONE;
2451 static RISCVException write_vstval(CPURISCVState *env, int csrno,
2452 target_ulong val)
2454 env->vstval = val;
2455 return RISCV_EXCP_NONE;
2458 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
2459 target_ulong *val)
2461 *val = env->vsatp;
2462 return RISCV_EXCP_NONE;
2465 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
2466 target_ulong val)
2468 env->vsatp = val;
2469 return RISCV_EXCP_NONE;
2472 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
2473 target_ulong *val)
2475 *val = env->mtval2;
2476 return RISCV_EXCP_NONE;
2479 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
2480 target_ulong val)
2482 env->mtval2 = val;
2483 return RISCV_EXCP_NONE;
2486 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
2487 target_ulong *val)
2489 *val = env->mtinst;
2490 return RISCV_EXCP_NONE;
2493 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
2494 target_ulong val)
2496 env->mtinst = val;
2497 return RISCV_EXCP_NONE;
2500 /* Physical Memory Protection */
2501 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
2502 target_ulong *val)
2504 *val = mseccfg_csr_read(env);
2505 return RISCV_EXCP_NONE;
2508 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
2509 target_ulong val)
2511 mseccfg_csr_write(env, val);
2512 return RISCV_EXCP_NONE;
2515 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
2517 /* TODO: RV128 restriction check */
2518 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
2519 return false;
2521 return true;
2524 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
2525 target_ulong *val)
2527 uint32_t reg_index = csrno - CSR_PMPCFG0;
2529 if (!check_pmp_reg_index(env, reg_index)) {
2530 return RISCV_EXCP_ILLEGAL_INST;
2532 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
2533 return RISCV_EXCP_NONE;
2536 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
2537 target_ulong val)
2539 uint32_t reg_index = csrno - CSR_PMPCFG0;
2541 if (!check_pmp_reg_index(env, reg_index)) {
2542 return RISCV_EXCP_ILLEGAL_INST;
2544 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
2545 return RISCV_EXCP_NONE;
2548 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
2549 target_ulong *val)
2551 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
2552 return RISCV_EXCP_NONE;
2555 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
2556 target_ulong val)
2558 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
2559 return RISCV_EXCP_NONE;
2563 * Functions to access Pointer Masking feature registers
2564 * We have to check if current priv lvl could modify
2565 * csr in given mode
2567 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
2569 int csr_priv = get_field(csrno, 0x300);
2570 int pm_current;
2572 if (env->debugger) {
2573 return false;
2576 * If priv lvls differ that means we're accessing csr from higher priv lvl,
2577 * so allow the access
2579 if (env->priv != csr_priv) {
2580 return false;
2582 switch (env->priv) {
2583 case PRV_M:
2584 pm_current = get_field(env->mmte, M_PM_CURRENT);
2585 break;
2586 case PRV_S:
2587 pm_current = get_field(env->mmte, S_PM_CURRENT);
2588 break;
2589 case PRV_U:
2590 pm_current = get_field(env->mmte, U_PM_CURRENT);
2591 break;
2592 default:
2593 g_assert_not_reached();
2595 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
2596 return !pm_current;
2599 static RISCVException read_mmte(CPURISCVState *env, int csrno,
2600 target_ulong *val)
2602 *val = env->mmte & MMTE_MASK;
2603 return RISCV_EXCP_NONE;
2606 static RISCVException write_mmte(CPURISCVState *env, int csrno,
2607 target_ulong val)
2609 uint64_t mstatus;
2610 target_ulong wpri_val = val & MMTE_MASK;
2612 if (val != wpri_val) {
2613 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2614 "MMTE: WPRI violation written 0x", val,
2615 "vs expected 0x", wpri_val);
2617 /* for machine mode pm.current is hardwired to 1 */
2618 wpri_val |= MMTE_M_PM_CURRENT;
2620 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
2621 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
2622 env->mmte = wpri_val | PM_EXT_DIRTY;
2623 riscv_cpu_update_mask(env);
2625 /* Set XS and SD bits, since PM CSRs are dirty */
2626 mstatus = env->mstatus | MSTATUS_XS;
2627 write_mstatus(env, csrno, mstatus);
2628 return RISCV_EXCP_NONE;
2631 static RISCVException read_smte(CPURISCVState *env, int csrno,
2632 target_ulong *val)
2634 *val = env->mmte & SMTE_MASK;
2635 return RISCV_EXCP_NONE;
2638 static RISCVException write_smte(CPURISCVState *env, int csrno,
2639 target_ulong val)
2641 target_ulong wpri_val = val & SMTE_MASK;
2643 if (val != wpri_val) {
2644 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2645 "SMTE: WPRI violation written 0x", val,
2646 "vs expected 0x", wpri_val);
2649 /* if pm.current==0 we can't modify current PM CSRs */
2650 if (check_pm_current_disabled(env, csrno)) {
2651 return RISCV_EXCP_NONE;
2654 wpri_val |= (env->mmte & ~SMTE_MASK);
2655 write_mmte(env, csrno, wpri_val);
2656 return RISCV_EXCP_NONE;
2659 static RISCVException read_umte(CPURISCVState *env, int csrno,
2660 target_ulong *val)
2662 *val = env->mmte & UMTE_MASK;
2663 return RISCV_EXCP_NONE;
2666 static RISCVException write_umte(CPURISCVState *env, int csrno,
2667 target_ulong val)
2669 target_ulong wpri_val = val & UMTE_MASK;
2671 if (val != wpri_val) {
2672 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2673 "UMTE: WPRI violation written 0x", val,
2674 "vs expected 0x", wpri_val);
2677 if (check_pm_current_disabled(env, csrno)) {
2678 return RISCV_EXCP_NONE;
2681 wpri_val |= (env->mmte & ~UMTE_MASK);
2682 write_mmte(env, csrno, wpri_val);
2683 return RISCV_EXCP_NONE;
2686 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
2687 target_ulong *val)
2689 *val = env->mpmmask;
2690 return RISCV_EXCP_NONE;
2693 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
2694 target_ulong val)
2696 uint64_t mstatus;
2698 env->mpmmask = val;
2699 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
2700 env->cur_pmmask = val;
2702 env->mmte |= PM_EXT_DIRTY;
2704 /* Set XS and SD bits, since PM CSRs are dirty */
2705 mstatus = env->mstatus | MSTATUS_XS;
2706 write_mstatus(env, csrno, mstatus);
2707 return RISCV_EXCP_NONE;
2710 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
2711 target_ulong *val)
2713 *val = env->spmmask;
2714 return RISCV_EXCP_NONE;
2717 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
2718 target_ulong val)
2720 uint64_t mstatus;
2722 /* if pm.current==0 we can't modify current PM CSRs */
2723 if (check_pm_current_disabled(env, csrno)) {
2724 return RISCV_EXCP_NONE;
2726 env->spmmask = val;
2727 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
2728 env->cur_pmmask = val;
2730 env->mmte |= PM_EXT_DIRTY;
2732 /* Set XS and SD bits, since PM CSRs are dirty */
2733 mstatus = env->mstatus | MSTATUS_XS;
2734 write_mstatus(env, csrno, mstatus);
2735 return RISCV_EXCP_NONE;
2738 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
2739 target_ulong *val)
2741 *val = env->upmmask;
2742 return RISCV_EXCP_NONE;
2745 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
2746 target_ulong val)
2748 uint64_t mstatus;
2750 /* if pm.current==0 we can't modify current PM CSRs */
2751 if (check_pm_current_disabled(env, csrno)) {
2752 return RISCV_EXCP_NONE;
2754 env->upmmask = val;
2755 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
2756 env->cur_pmmask = val;
2758 env->mmte |= PM_EXT_DIRTY;
2760 /* Set XS and SD bits, since PM CSRs are dirty */
2761 mstatus = env->mstatus | MSTATUS_XS;
2762 write_mstatus(env, csrno, mstatus);
2763 return RISCV_EXCP_NONE;
2766 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
2767 target_ulong *val)
2769 *val = env->mpmbase;
2770 return RISCV_EXCP_NONE;
2773 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
2774 target_ulong val)
2776 uint64_t mstatus;
2778 env->mpmbase = val;
2779 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
2780 env->cur_pmbase = val;
2782 env->mmte |= PM_EXT_DIRTY;
2784 /* Set XS and SD bits, since PM CSRs are dirty */
2785 mstatus = env->mstatus | MSTATUS_XS;
2786 write_mstatus(env, csrno, mstatus);
2787 return RISCV_EXCP_NONE;
2790 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
2791 target_ulong *val)
2793 *val = env->spmbase;
2794 return RISCV_EXCP_NONE;
2797 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
2798 target_ulong val)
2800 uint64_t mstatus;
2802 /* if pm.current==0 we can't modify current PM CSRs */
2803 if (check_pm_current_disabled(env, csrno)) {
2804 return RISCV_EXCP_NONE;
2806 env->spmbase = val;
2807 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
2808 env->cur_pmbase = val;
2810 env->mmte |= PM_EXT_DIRTY;
2812 /* Set XS and SD bits, since PM CSRs are dirty */
2813 mstatus = env->mstatus | MSTATUS_XS;
2814 write_mstatus(env, csrno, mstatus);
2815 return RISCV_EXCP_NONE;
2818 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
2819 target_ulong *val)
2821 *val = env->upmbase;
2822 return RISCV_EXCP_NONE;
2825 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
2826 target_ulong val)
2828 uint64_t mstatus;
2830 /* if pm.current==0 we can't modify current PM CSRs */
2831 if (check_pm_current_disabled(env, csrno)) {
2832 return RISCV_EXCP_NONE;
2834 env->upmbase = val;
2835 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
2836 env->cur_pmbase = val;
2838 env->mmte |= PM_EXT_DIRTY;
2840 /* Set XS and SD bits, since PM CSRs are dirty */
2841 mstatus = env->mstatus | MSTATUS_XS;
2842 write_mstatus(env, csrno, mstatus);
2843 return RISCV_EXCP_NONE;
2846 #endif
2849 * riscv_csrrw - read and/or update control and status register
2851 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
2852 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
2853 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
2854 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
2857 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
2858 int csrno,
2859 bool write_mask,
2860 RISCVCPU *cpu)
2862 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
2863 int read_only = get_field(csrno, 0xC00) == 3;
2864 #if !defined(CONFIG_USER_ONLY)
2865 int effective_priv = env->priv;
2867 if (riscv_has_ext(env, RVH) &&
2868 env->priv == PRV_S &&
2869 !riscv_cpu_virt_enabled(env)) {
2871 * We are in S mode without virtualisation, therefore we are in HS Mode.
2872 * Add 1 to the effective privledge level to allow us to access the
2873 * Hypervisor CSRs.
2875 effective_priv++;
2878 if (!env->debugger && (effective_priv < get_field(csrno, 0x300))) {
2879 return RISCV_EXCP_ILLEGAL_INST;
2881 #endif
2882 if (write_mask && read_only) {
2883 return RISCV_EXCP_ILLEGAL_INST;
2886 /* ensure the CSR extension is enabled. */
2887 if (!cpu->cfg.ext_icsr) {
2888 return RISCV_EXCP_ILLEGAL_INST;
2891 /* check predicate */
2892 if (!csr_ops[csrno].predicate) {
2893 return RISCV_EXCP_ILLEGAL_INST;
2896 return csr_ops[csrno].predicate(env, csrno);
2899 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
2900 target_ulong *ret_value,
2901 target_ulong new_value,
2902 target_ulong write_mask)
2904 RISCVException ret;
2905 target_ulong old_value;
2907 /* execute combined read/write operation if it exists */
2908 if (csr_ops[csrno].op) {
2909 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
2912 /* if no accessor exists then return failure */
2913 if (!csr_ops[csrno].read) {
2914 return RISCV_EXCP_ILLEGAL_INST;
2916 /* read old value */
2917 ret = csr_ops[csrno].read(env, csrno, &old_value);
2918 if (ret != RISCV_EXCP_NONE) {
2919 return ret;
2922 /* write value if writable and write mask set, otherwise drop writes */
2923 if (write_mask) {
2924 new_value = (old_value & ~write_mask) | (new_value & write_mask);
2925 if (csr_ops[csrno].write) {
2926 ret = csr_ops[csrno].write(env, csrno, new_value);
2927 if (ret != RISCV_EXCP_NONE) {
2928 return ret;
2933 /* return old value */
2934 if (ret_value) {
2935 *ret_value = old_value;
2938 return RISCV_EXCP_NONE;
2941 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
2942 target_ulong *ret_value,
2943 target_ulong new_value, target_ulong write_mask)
2945 RISCVCPU *cpu = env_archcpu(env);
2947 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
2948 if (ret != RISCV_EXCP_NONE) {
2949 return ret;
2952 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
2955 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
2956 Int128 *ret_value,
2957 Int128 new_value,
2958 Int128 write_mask)
2960 RISCVException ret;
2961 Int128 old_value;
2963 /* read old value */
2964 ret = csr_ops[csrno].read128(env, csrno, &old_value);
2965 if (ret != RISCV_EXCP_NONE) {
2966 return ret;
2969 /* write value if writable and write mask set, otherwise drop writes */
2970 if (int128_nz(write_mask)) {
2971 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
2972 int128_and(new_value, write_mask));
2973 if (csr_ops[csrno].write128) {
2974 ret = csr_ops[csrno].write128(env, csrno, new_value);
2975 if (ret != RISCV_EXCP_NONE) {
2976 return ret;
2978 } else if (csr_ops[csrno].write) {
2979 /* avoids having to write wrappers for all registers */
2980 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
2981 if (ret != RISCV_EXCP_NONE) {
2982 return ret;
2987 /* return old value */
2988 if (ret_value) {
2989 *ret_value = old_value;
2992 return RISCV_EXCP_NONE;
2995 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
2996 Int128 *ret_value,
2997 Int128 new_value, Int128 write_mask)
2999 RISCVException ret;
3000 RISCVCPU *cpu = env_archcpu(env);
3002 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
3003 if (ret != RISCV_EXCP_NONE) {
3004 return ret;
3007 if (csr_ops[csrno].read128) {
3008 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
3012 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3013 * at all defined.
3014 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3015 * significant), for those, this fallback is correctly handling the accesses
3017 target_ulong old_value;
3018 ret = riscv_csrrw_do64(env, csrno, &old_value,
3019 int128_getlo(new_value),
3020 int128_getlo(write_mask));
3021 if (ret == RISCV_EXCP_NONE && ret_value) {
3022 *ret_value = int128_make64(old_value);
3024 return ret;
3028 * Debugger support. If not in user mode, set env->debugger before the
3029 * riscv_csrrw call and clear it after the call.
3031 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
3032 target_ulong *ret_value,
3033 target_ulong new_value,
3034 target_ulong write_mask)
3036 RISCVException ret;
3037 #if !defined(CONFIG_USER_ONLY)
3038 env->debugger = true;
3039 #endif
3040 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
3041 #if !defined(CONFIG_USER_ONLY)
3042 env->debugger = false;
3043 #endif
3044 return ret;
3047 /* Control and Status Register function table */
3048 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
3049 /* User Floating-Point CSRs */
3050 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
3051 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
3052 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
3053 /* Vector CSRs */
3054 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
3055 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
3056 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
3057 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
3058 [CSR_VL] = { "vl", vs, read_vl },
3059 [CSR_VTYPE] = { "vtype", vs, read_vtype },
3060 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
3061 /* User Timers and Counters */
3062 [CSR_CYCLE] = { "cycle", ctr, read_instret },
3063 [CSR_INSTRET] = { "instret", ctr, read_instret },
3064 [CSR_CYCLEH] = { "cycleh", ctr32, read_instreth },
3065 [CSR_INSTRETH] = { "instreth", ctr32, read_instreth },
3068 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3069 * rdtime callback is not provided by machine/platform emulation.
3071 [CSR_TIME] = { "time", ctr, read_time },
3072 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
3074 #if !defined(CONFIG_USER_ONLY)
3075 /* Machine Timers and Counters */
3076 [CSR_MCYCLE] = { "mcycle", any, read_instret },
3077 [CSR_MINSTRET] = { "minstret", any, read_instret },
3078 [CSR_MCYCLEH] = { "mcycleh", any32, read_instreth },
3079 [CSR_MINSTRETH] = { "minstreth", any32, read_instreth },
3081 /* Machine Information Registers */
3082 [CSR_MVENDORID] = { "mvendorid", any, read_zero },
3083 [CSR_MARCHID] = { "marchid", any, read_zero },
3084 [CSR_MIMPID] = { "mimpid", any, read_zero },
3085 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
3087 /* Machine Trap Setup */
3088 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, NULL,
3089 read_mstatus_i128 },
3090 [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL,
3091 read_misa_i128 },
3092 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
3093 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
3094 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
3095 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
3096 [CSR_MCOUNTEREN] = { "mcounteren", any, read_mcounteren, write_mcounteren },
3098 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush },
3100 /* Machine Trap Handling */
3101 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, NULL,
3102 read_mscratch_i128, write_mscratch_i128 },
3103 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
3104 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
3105 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
3106 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
3108 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
3109 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
3110 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
3112 /* Machine-Level Interrupts (AIA) */
3113 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
3115 /* Machine-Level IMSIC Interface (AIA) */
3116 [CSR_MSETEIPNUM] = { "mseteipnum", aia_any, NULL, NULL, rmw_xsetclreinum },
3117 [CSR_MCLREIPNUM] = { "mclreipnum", aia_any, NULL, NULL, rmw_xsetclreinum },
3118 [CSR_MSETEIENUM] = { "mseteienum", aia_any, NULL, NULL, rmw_xsetclreinum },
3119 [CSR_MCLREIENUM] = { "mclreienum", aia_any, NULL, NULL, rmw_xsetclreinum },
3120 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
3122 /* Virtual Interrupts for Supervisor Level (AIA) */
3123 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
3124 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
3126 /* Machine-Level High-Half CSRs (AIA) */
3127 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
3128 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
3129 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
3130 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
3131 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
3133 /* Supervisor Trap Setup */
3134 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL,
3135 read_sstatus_i128 },
3136 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
3137 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
3138 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren },
3140 /* Supervisor Trap Handling */
3141 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, NULL,
3142 read_sscratch_i128, write_sscratch_i128 },
3143 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
3144 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
3145 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
3146 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
3148 /* Supervisor Protection and Translation */
3149 [CSR_SATP] = { "satp", smode, read_satp, write_satp },
3151 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
3152 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
3153 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
3155 /* Supervisor-Level Interrupts (AIA) */
3156 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
3158 /* Supervisor-Level IMSIC Interface (AIA) */
3159 [CSR_SSETEIPNUM] = { "sseteipnum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3160 [CSR_SCLREIPNUM] = { "sclreipnum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3161 [CSR_SSETEIENUM] = { "sseteienum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3162 [CSR_SCLREIENUM] = { "sclreienum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3163 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
3165 /* Supervisor-Level High-Half CSRs (AIA) */
3166 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
3167 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
3169 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus },
3170 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg },
3171 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg },
3172 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip },
3173 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip },
3174 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie },
3175 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, write_hcounteren },
3176 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie },
3177 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval },
3178 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst },
3179 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, NULL },
3180 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp },
3181 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta },
3182 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, write_htimedeltah },
3184 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, write_vsstatus },
3185 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip },
3186 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie },
3187 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec },
3188 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, write_vsscratch },
3189 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc },
3190 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause },
3191 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval },
3192 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp },
3194 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 },
3195 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst },
3197 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
3198 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
3199 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, write_hvictl },
3200 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, write_hviprio1 },
3201 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, write_hviprio2 },
3204 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
3206 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL, rmw_xiselect },
3207 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
3209 /* VS-Level Interrupts (H-extension with AIA) */
3210 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
3212 /* VS-Level IMSIC Interface (H-extension with AIA) */
3213 [CSR_VSSETEIPNUM] = { "vsseteipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3214 [CSR_VSCLREIPNUM] = { "vsclreipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3215 [CSR_VSSETEIENUM] = { "vsseteienum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3216 [CSR_VSCLREIENUM] = { "vsclreienum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3217 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
3219 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
3220 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, rmw_hidelegh },
3221 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero, write_ignore },
3222 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
3223 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, write_hviprio1h },
3224 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, write_hviprio2h },
3225 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
3226 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
3228 /* Physical Memory Protection */
3229 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg },
3230 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
3231 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
3232 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
3233 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
3234 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
3235 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
3236 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
3237 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
3238 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
3239 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
3240 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
3241 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
3242 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
3243 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
3244 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
3245 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
3246 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
3247 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
3248 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
3249 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
3251 /* User Pointer Masking */
3252 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
3253 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, write_upmmask },
3254 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, write_upmbase },
3255 /* Machine Pointer Masking */
3256 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
3257 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, write_mpmmask },
3258 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, write_mpmbase },
3259 /* Supervisor Pointer Masking */
3260 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
3261 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, write_spmmask },
3262 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, write_spmbase },
3264 /* Performance Counters */
3265 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_zero },
3266 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_zero },
3267 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_zero },
3268 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_zero },
3269 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_zero },
3270 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_zero },
3271 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_zero },
3272 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_zero },
3273 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_zero },
3274 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_zero },
3275 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_zero },
3276 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_zero },
3277 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_zero },
3278 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_zero },
3279 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_zero },
3280 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_zero },
3281 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_zero },
3282 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_zero },
3283 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_zero },
3284 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_zero },
3285 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_zero },
3286 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_zero },
3287 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_zero },
3288 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_zero },
3289 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_zero },
3290 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_zero },
3291 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_zero },
3292 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_zero },
3293 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_zero },
3295 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", any, read_zero },
3296 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", any, read_zero },
3297 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", any, read_zero },
3298 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", any, read_zero },
3299 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", any, read_zero },
3300 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", any, read_zero },
3301 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", any, read_zero },
3302 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", any, read_zero },
3303 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", any, read_zero },
3304 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", any, read_zero },
3305 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", any, read_zero },
3306 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", any, read_zero },
3307 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", any, read_zero },
3308 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", any, read_zero },
3309 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", any, read_zero },
3310 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", any, read_zero },
3311 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", any, read_zero },
3312 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", any, read_zero },
3313 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", any, read_zero },
3314 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", any, read_zero },
3315 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", any, read_zero },
3316 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", any, read_zero },
3317 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", any, read_zero },
3318 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", any, read_zero },
3319 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", any, read_zero },
3320 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", any, read_zero },
3321 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", any, read_zero },
3322 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", any, read_zero },
3323 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", any, read_zero },
3325 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero },
3326 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero },
3327 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_zero },
3328 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_zero },
3329 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_zero },
3330 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_zero },
3331 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_zero },
3332 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_zero },
3333 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_zero },
3334 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_zero },
3335 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_zero },
3336 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_zero },
3337 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_zero },
3338 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_zero },
3339 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_zero },
3340 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_zero },
3341 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_zero },
3342 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_zero },
3343 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_zero },
3344 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_zero },
3345 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_zero },
3346 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_zero },
3347 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_zero },
3348 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_zero },
3349 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_zero },
3350 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_zero },
3351 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_zero },
3352 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_zero },
3353 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_zero },
3355 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_zero },
3356 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_zero },
3357 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_zero },
3358 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_zero },
3359 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_zero },
3360 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_zero },
3361 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_zero },
3362 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_zero },
3363 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_zero },
3364 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_zero },
3365 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_zero },
3366 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_zero },
3367 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_zero },
3368 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_zero },
3369 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_zero },
3370 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_zero },
3371 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_zero },
3372 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_zero },
3373 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_zero },
3374 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_zero },
3375 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_zero },
3376 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_zero },
3377 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_zero },
3378 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_zero },
3379 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_zero },
3380 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_zero },
3381 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_zero },
3382 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_zero },
3383 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_zero },
3385 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", any32, read_zero },
3386 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", any32, read_zero },
3387 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", any32, read_zero },
3388 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", any32, read_zero },
3389 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", any32, read_zero },
3390 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", any32, read_zero },
3391 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", any32, read_zero },
3392 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", any32, read_zero },
3393 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", any32, read_zero },
3394 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", any32, read_zero },
3395 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", any32, read_zero },
3396 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", any32, read_zero },
3397 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", any32, read_zero },
3398 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", any32, read_zero },
3399 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", any32, read_zero },
3400 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", any32, read_zero },
3401 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", any32, read_zero },
3402 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", any32, read_zero },
3403 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", any32, read_zero },
3404 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", any32, read_zero },
3405 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", any32, read_zero },
3406 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", any32, read_zero },
3407 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", any32, read_zero },
3408 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", any32, read_zero },
3409 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", any32, read_zero },
3410 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", any32, read_zero },
3411 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", any32, read_zero },
3412 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", any32, read_zero },
3413 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", any32, read_zero },
3414 #endif /* !CONFIG_USER_ONLY */