hw/usb/dev-mtp: create directories with a+x mode mask
[qemu/ar7.git] / target / riscv / csr.c
bloba938760a3fb7dbef1656f67e457bdaeffd81569f
1 /*
2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "qemu/main-loop.h"
25 #include "exec/exec-all.h"
27 /* CSR function table public API */
28 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
30 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
33 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
35 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
38 /* Predicates */
39 static RISCVException fs(CPURISCVState *env, int csrno)
41 #if !defined(CONFIG_USER_ONLY)
42 if (!env->debugger && !riscv_cpu_fp_enabled(env)) {
43 return RISCV_EXCP_ILLEGAL_INST;
45 #endif
46 return RISCV_EXCP_NONE;
49 static RISCVException vs(CPURISCVState *env, int csrno)
51 CPUState *cs = env_cpu(env);
52 RISCVCPU *cpu = RISCV_CPU(cs);
54 if (env->misa_ext & RVV ||
55 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
56 #if !defined(CONFIG_USER_ONLY)
57 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
58 return RISCV_EXCP_ILLEGAL_INST;
60 #endif
61 return RISCV_EXCP_NONE;
63 return RISCV_EXCP_ILLEGAL_INST;
66 static RISCVException ctr(CPURISCVState *env, int csrno)
68 #if !defined(CONFIG_USER_ONLY)
69 CPUState *cs = env_cpu(env);
70 RISCVCPU *cpu = RISCV_CPU(cs);
72 if (!cpu->cfg.ext_counters) {
73 /* The Counters extensions is not enabled */
74 return RISCV_EXCP_ILLEGAL_INST;
77 if (riscv_cpu_virt_enabled(env)) {
78 switch (csrno) {
79 case CSR_CYCLE:
80 if (!get_field(env->hcounteren, COUNTEREN_CY) &&
81 get_field(env->mcounteren, COUNTEREN_CY)) {
82 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
84 break;
85 case CSR_TIME:
86 if (!get_field(env->hcounteren, COUNTEREN_TM) &&
87 get_field(env->mcounteren, COUNTEREN_TM)) {
88 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
90 break;
91 case CSR_INSTRET:
92 if (!get_field(env->hcounteren, COUNTEREN_IR) &&
93 get_field(env->mcounteren, COUNTEREN_IR)) {
94 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
96 break;
97 case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31:
98 if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3)) &&
99 get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3))) {
100 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
102 break;
104 if (riscv_cpu_mxl(env) == MXL_RV32) {
105 switch (csrno) {
106 case CSR_CYCLEH:
107 if (!get_field(env->hcounteren, COUNTEREN_CY) &&
108 get_field(env->mcounteren, COUNTEREN_CY)) {
109 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
111 break;
112 case CSR_TIMEH:
113 if (!get_field(env->hcounteren, COUNTEREN_TM) &&
114 get_field(env->mcounteren, COUNTEREN_TM)) {
115 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
117 break;
118 case CSR_INSTRETH:
119 if (!get_field(env->hcounteren, COUNTEREN_IR) &&
120 get_field(env->mcounteren, COUNTEREN_IR)) {
121 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
123 break;
124 case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H:
125 if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3H)) &&
126 get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3H))) {
127 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
129 break;
133 #endif
134 return RISCV_EXCP_NONE;
137 static RISCVException ctr32(CPURISCVState *env, int csrno)
139 if (riscv_cpu_mxl(env) != MXL_RV32) {
140 return RISCV_EXCP_ILLEGAL_INST;
143 return ctr(env, csrno);
146 #if !defined(CONFIG_USER_ONLY)
147 static RISCVException any(CPURISCVState *env, int csrno)
149 return RISCV_EXCP_NONE;
152 static RISCVException any32(CPURISCVState *env, int csrno)
154 if (riscv_cpu_mxl(env) != MXL_RV32) {
155 return RISCV_EXCP_ILLEGAL_INST;
158 return any(env, csrno);
162 static int aia_any(CPURISCVState *env, int csrno)
164 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
165 return RISCV_EXCP_ILLEGAL_INST;
168 return any(env, csrno);
171 static int aia_any32(CPURISCVState *env, int csrno)
173 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
174 return RISCV_EXCP_ILLEGAL_INST;
177 return any32(env, csrno);
180 static RISCVException smode(CPURISCVState *env, int csrno)
182 if (riscv_has_ext(env, RVS)) {
183 return RISCV_EXCP_NONE;
186 return RISCV_EXCP_ILLEGAL_INST;
189 static int smode32(CPURISCVState *env, int csrno)
191 if (riscv_cpu_mxl(env) != MXL_RV32) {
192 return RISCV_EXCP_ILLEGAL_INST;
195 return smode(env, csrno);
198 static int aia_smode(CPURISCVState *env, int csrno)
200 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
201 return RISCV_EXCP_ILLEGAL_INST;
204 return smode(env, csrno);
207 static int aia_smode32(CPURISCVState *env, int csrno)
209 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
210 return RISCV_EXCP_ILLEGAL_INST;
213 return smode32(env, csrno);
216 static RISCVException hmode(CPURISCVState *env, int csrno)
218 if (riscv_has_ext(env, RVS) &&
219 riscv_has_ext(env, RVH)) {
220 /* Hypervisor extension is supported */
221 if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
222 env->priv == PRV_M) {
223 return RISCV_EXCP_NONE;
224 } else {
225 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
229 return RISCV_EXCP_ILLEGAL_INST;
232 static RISCVException hmode32(CPURISCVState *env, int csrno)
234 if (riscv_cpu_mxl(env) != MXL_RV32) {
235 if (!riscv_cpu_virt_enabled(env)) {
236 return RISCV_EXCP_ILLEGAL_INST;
237 } else {
238 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
242 return hmode(env, csrno);
246 /* Checks if PointerMasking registers could be accessed */
247 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
249 /* Check if j-ext is present */
250 if (riscv_has_ext(env, RVJ)) {
251 return RISCV_EXCP_NONE;
253 return RISCV_EXCP_ILLEGAL_INST;
256 static int aia_hmode(CPURISCVState *env, int csrno)
258 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
259 return RISCV_EXCP_ILLEGAL_INST;
262 return hmode(env, csrno);
265 static int aia_hmode32(CPURISCVState *env, int csrno)
267 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
268 return RISCV_EXCP_ILLEGAL_INST;
271 return hmode32(env, csrno);
274 static RISCVException pmp(CPURISCVState *env, int csrno)
276 if (riscv_feature(env, RISCV_FEATURE_PMP)) {
277 return RISCV_EXCP_NONE;
280 return RISCV_EXCP_ILLEGAL_INST;
283 static RISCVException epmp(CPURISCVState *env, int csrno)
285 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
286 return RISCV_EXCP_NONE;
289 return RISCV_EXCP_ILLEGAL_INST;
291 #endif
293 /* User Floating-Point CSRs */
294 static RISCVException read_fflags(CPURISCVState *env, int csrno,
295 target_ulong *val)
297 *val = riscv_cpu_get_fflags(env);
298 return RISCV_EXCP_NONE;
301 static RISCVException write_fflags(CPURISCVState *env, int csrno,
302 target_ulong val)
304 #if !defined(CONFIG_USER_ONLY)
305 env->mstatus |= MSTATUS_FS;
306 #endif
307 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
308 return RISCV_EXCP_NONE;
311 static RISCVException read_frm(CPURISCVState *env, int csrno,
312 target_ulong *val)
314 *val = env->frm;
315 return RISCV_EXCP_NONE;
318 static RISCVException write_frm(CPURISCVState *env, int csrno,
319 target_ulong val)
321 #if !defined(CONFIG_USER_ONLY)
322 env->mstatus |= MSTATUS_FS;
323 #endif
324 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
325 return RISCV_EXCP_NONE;
328 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
329 target_ulong *val)
331 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
332 | (env->frm << FSR_RD_SHIFT);
333 return RISCV_EXCP_NONE;
336 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
337 target_ulong val)
339 #if !defined(CONFIG_USER_ONLY)
340 env->mstatus |= MSTATUS_FS;
341 #endif
342 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
343 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
344 return RISCV_EXCP_NONE;
347 static RISCVException read_vtype(CPURISCVState *env, int csrno,
348 target_ulong *val)
350 uint64_t vill;
351 switch (env->xl) {
352 case MXL_RV32:
353 vill = (uint32_t)env->vill << 31;
354 break;
355 case MXL_RV64:
356 vill = (uint64_t)env->vill << 63;
357 break;
358 default:
359 g_assert_not_reached();
361 *val = (target_ulong)vill | env->vtype;
362 return RISCV_EXCP_NONE;
365 static RISCVException read_vl(CPURISCVState *env, int csrno,
366 target_ulong *val)
368 *val = env->vl;
369 return RISCV_EXCP_NONE;
372 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
374 *val = env_archcpu(env)->cfg.vlen >> 3;
375 return RISCV_EXCP_NONE;
378 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
379 target_ulong *val)
381 *val = env->vxrm;
382 return RISCV_EXCP_NONE;
385 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
386 target_ulong val)
388 #if !defined(CONFIG_USER_ONLY)
389 env->mstatus |= MSTATUS_VS;
390 #endif
391 env->vxrm = val;
392 return RISCV_EXCP_NONE;
395 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
396 target_ulong *val)
398 *val = env->vxsat;
399 return RISCV_EXCP_NONE;
402 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
403 target_ulong val)
405 #if !defined(CONFIG_USER_ONLY)
406 env->mstatus |= MSTATUS_VS;
407 #endif
408 env->vxsat = val;
409 return RISCV_EXCP_NONE;
412 static RISCVException read_vstart(CPURISCVState *env, int csrno,
413 target_ulong *val)
415 *val = env->vstart;
416 return RISCV_EXCP_NONE;
419 static RISCVException write_vstart(CPURISCVState *env, int csrno,
420 target_ulong val)
422 #if !defined(CONFIG_USER_ONLY)
423 env->mstatus |= MSTATUS_VS;
424 #endif
426 * The vstart CSR is defined to have only enough writable bits
427 * to hold the largest element index, i.e. lg2(VLEN) bits.
429 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
430 return RISCV_EXCP_NONE;
433 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
435 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
436 return RISCV_EXCP_NONE;
439 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
441 #if !defined(CONFIG_USER_ONLY)
442 env->mstatus |= MSTATUS_VS;
443 #endif
444 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
445 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
446 return RISCV_EXCP_NONE;
449 /* User Timers and Counters */
450 static RISCVException read_instret(CPURISCVState *env, int csrno,
451 target_ulong *val)
453 #if !defined(CONFIG_USER_ONLY)
454 if (icount_enabled()) {
455 *val = icount_get();
456 } else {
457 *val = cpu_get_host_ticks();
459 #else
460 *val = cpu_get_host_ticks();
461 #endif
462 return RISCV_EXCP_NONE;
465 static RISCVException read_instreth(CPURISCVState *env, int csrno,
466 target_ulong *val)
468 #if !defined(CONFIG_USER_ONLY)
469 if (icount_enabled()) {
470 *val = icount_get() >> 32;
471 } else {
472 *val = cpu_get_host_ticks() >> 32;
474 #else
475 *val = cpu_get_host_ticks() >> 32;
476 #endif
477 return RISCV_EXCP_NONE;
480 #if defined(CONFIG_USER_ONLY)
481 static RISCVException read_time(CPURISCVState *env, int csrno,
482 target_ulong *val)
484 *val = cpu_get_host_ticks();
485 return RISCV_EXCP_NONE;
488 static RISCVException read_timeh(CPURISCVState *env, int csrno,
489 target_ulong *val)
491 *val = cpu_get_host_ticks() >> 32;
492 return RISCV_EXCP_NONE;
495 #else /* CONFIG_USER_ONLY */
497 static RISCVException read_time(CPURISCVState *env, int csrno,
498 target_ulong *val)
500 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
502 if (!env->rdtime_fn) {
503 return RISCV_EXCP_ILLEGAL_INST;
506 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
507 return RISCV_EXCP_NONE;
510 static RISCVException read_timeh(CPURISCVState *env, int csrno,
511 target_ulong *val)
513 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
515 if (!env->rdtime_fn) {
516 return RISCV_EXCP_ILLEGAL_INST;
519 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
520 return RISCV_EXCP_NONE;
523 /* Machine constants */
525 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
526 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP))
527 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
528 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
530 #define VSTOPI_NUM_SRCS 5
532 static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
533 VS_MODE_INTERRUPTS;
534 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
535 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
536 HS_MODE_INTERRUPTS;
537 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
538 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
539 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
540 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
541 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
542 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
543 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
544 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
545 (1ULL << (RISCV_EXCP_U_ECALL)) | \
546 (1ULL << (RISCV_EXCP_S_ECALL)) | \
547 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
548 (1ULL << (RISCV_EXCP_M_ECALL)) | \
549 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
550 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
551 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
552 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
553 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
554 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
555 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
556 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
557 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
558 (1ULL << (RISCV_EXCP_VS_ECALL)) |
559 (1ULL << (RISCV_EXCP_M_ECALL)) |
560 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
561 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
562 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
563 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
564 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
565 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
566 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
567 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
568 static const target_ulong hip_writable_mask = MIP_VSSIP;
569 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
570 static const target_ulong vsip_writable_mask = MIP_VSSIP;
572 static const char valid_vm_1_10_32[16] = {
573 [VM_1_10_MBARE] = 1,
574 [VM_1_10_SV32] = 1
577 static const char valid_vm_1_10_64[16] = {
578 [VM_1_10_MBARE] = 1,
579 [VM_1_10_SV39] = 1,
580 [VM_1_10_SV48] = 1,
581 [VM_1_10_SV57] = 1
584 /* Machine Information Registers */
585 static RISCVException read_zero(CPURISCVState *env, int csrno,
586 target_ulong *val)
588 *val = 0;
589 return RISCV_EXCP_NONE;
592 static RISCVException write_ignore(CPURISCVState *env, int csrno,
593 target_ulong val)
595 return RISCV_EXCP_NONE;
598 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
599 target_ulong *val)
601 *val = env->mhartid;
602 return RISCV_EXCP_NONE;
605 /* Machine Trap Setup */
607 /* We do not store SD explicitly, only compute it on demand. */
608 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
610 if ((status & MSTATUS_FS) == MSTATUS_FS ||
611 (status & MSTATUS_VS) == MSTATUS_VS ||
612 (status & MSTATUS_XS) == MSTATUS_XS) {
613 switch (xl) {
614 case MXL_RV32:
615 return status | MSTATUS32_SD;
616 case MXL_RV64:
617 return status | MSTATUS64_SD;
618 case MXL_RV128:
619 return MSTATUSH128_SD;
620 default:
621 g_assert_not_reached();
624 return status;
627 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
628 target_ulong *val)
630 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
631 return RISCV_EXCP_NONE;
634 static int validate_vm(CPURISCVState *env, target_ulong vm)
636 if (riscv_cpu_mxl(env) == MXL_RV32) {
637 return valid_vm_1_10_32[vm & 0xf];
638 } else {
639 return valid_vm_1_10_64[vm & 0xf];
643 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
644 target_ulong val)
646 uint64_t mstatus = env->mstatus;
647 uint64_t mask = 0;
648 RISCVMXL xl = riscv_cpu_mxl(env);
650 /* flush tlb on mstatus fields that affect VM */
651 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
652 MSTATUS_MPRV | MSTATUS_SUM)) {
653 tlb_flush(env_cpu(env));
655 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
656 MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
657 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
658 MSTATUS_TW | MSTATUS_VS;
660 if (xl != MXL_RV32 || env->debugger) {
662 * RV32: MPV and GVA are not in mstatus. The current plan is to
663 * add them to mstatush. For now, we just don't support it.
665 mask |= MSTATUS_MPV | MSTATUS_GVA;
666 if ((val & MSTATUS64_UXL) != 0) {
667 mask |= MSTATUS64_UXL;
671 mstatus = (mstatus & ~mask) | (val & mask);
673 if (xl > MXL_RV32) {
674 /* SXL field is for now read only */
675 mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
677 env->mstatus = mstatus;
678 env->xl = cpu_recompute_xl(env);
680 return RISCV_EXCP_NONE;
683 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
684 target_ulong *val)
686 *val = env->mstatus >> 32;
687 return RISCV_EXCP_NONE;
690 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
691 target_ulong val)
693 uint64_t valh = (uint64_t)val << 32;
694 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
696 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) {
697 tlb_flush(env_cpu(env));
700 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
702 return RISCV_EXCP_NONE;
705 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
706 Int128 *val)
708 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
709 return RISCV_EXCP_NONE;
712 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
713 Int128 *val)
715 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
716 return RISCV_EXCP_NONE;
719 static RISCVException read_misa(CPURISCVState *env, int csrno,
720 target_ulong *val)
722 target_ulong misa;
724 switch (env->misa_mxl) {
725 case MXL_RV32:
726 misa = (target_ulong)MXL_RV32 << 30;
727 break;
728 #ifdef TARGET_RISCV64
729 case MXL_RV64:
730 misa = (target_ulong)MXL_RV64 << 62;
731 break;
732 #endif
733 default:
734 g_assert_not_reached();
737 *val = misa | env->misa_ext;
738 return RISCV_EXCP_NONE;
741 static RISCVException write_misa(CPURISCVState *env, int csrno,
742 target_ulong val)
744 if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
745 /* drop write to misa */
746 return RISCV_EXCP_NONE;
749 /* 'I' or 'E' must be present */
750 if (!(val & (RVI | RVE))) {
751 /* It is not, drop write to misa */
752 return RISCV_EXCP_NONE;
755 /* 'E' excludes all other extensions */
756 if (val & RVE) {
757 /* when we support 'E' we can do "val = RVE;" however
758 * for now we just drop writes if 'E' is present.
760 return RISCV_EXCP_NONE;
764 * misa.MXL writes are not supported by QEMU.
765 * Drop writes to those bits.
768 /* Mask extensions that are not supported by this hart */
769 val &= env->misa_ext_mask;
771 /* Mask extensions that are not supported by QEMU */
772 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
774 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
775 if ((val & RVD) && !(val & RVF)) {
776 val &= ~RVD;
779 /* Suppress 'C' if next instruction is not aligned
780 * TODO: this should check next_pc
782 if ((val & RVC) && (GETPC() & ~3) != 0) {
783 val &= ~RVC;
786 /* If nothing changed, do nothing. */
787 if (val == env->misa_ext) {
788 return RISCV_EXCP_NONE;
791 /* flush translation cache */
792 tb_flush(env_cpu(env));
793 env->misa_ext = val;
794 env->xl = riscv_cpu_mxl(env);
795 return RISCV_EXCP_NONE;
798 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
799 target_ulong *val)
801 *val = env->medeleg;
802 return RISCV_EXCP_NONE;
805 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
806 target_ulong val)
808 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
809 return RISCV_EXCP_NONE;
812 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
813 uint64_t *ret_val,
814 uint64_t new_val, uint64_t wr_mask)
816 uint64_t mask = wr_mask & delegable_ints;
818 if (ret_val) {
819 *ret_val = env->mideleg;
822 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
824 if (riscv_has_ext(env, RVH)) {
825 env->mideleg |= HS_MODE_INTERRUPTS;
828 return RISCV_EXCP_NONE;
831 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
832 target_ulong *ret_val,
833 target_ulong new_val, target_ulong wr_mask)
835 uint64_t rval;
836 RISCVException ret;
838 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
839 if (ret_val) {
840 *ret_val = rval;
843 return ret;
846 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
847 target_ulong *ret_val,
848 target_ulong new_val,
849 target_ulong wr_mask)
851 uint64_t rval;
852 RISCVException ret;
854 ret = rmw_mideleg64(env, csrno, &rval,
855 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
856 if (ret_val) {
857 *ret_val = rval >> 32;
860 return ret;
863 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
864 uint64_t *ret_val,
865 uint64_t new_val, uint64_t wr_mask)
867 uint64_t mask = wr_mask & all_ints;
869 if (ret_val) {
870 *ret_val = env->mie;
873 env->mie = (env->mie & ~mask) | (new_val & mask);
875 if (!riscv_has_ext(env, RVH)) {
876 env->mie &= ~((uint64_t)MIP_SGEIP);
879 return RISCV_EXCP_NONE;
882 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
883 target_ulong *ret_val,
884 target_ulong new_val, target_ulong wr_mask)
886 uint64_t rval;
887 RISCVException ret;
889 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
890 if (ret_val) {
891 *ret_val = rval;
894 return ret;
897 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
898 target_ulong *ret_val,
899 target_ulong new_val, target_ulong wr_mask)
901 uint64_t rval;
902 RISCVException ret;
904 ret = rmw_mie64(env, csrno, &rval,
905 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
906 if (ret_val) {
907 *ret_val = rval >> 32;
910 return ret;
913 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
915 int irq;
916 uint8_t iprio;
918 irq = riscv_cpu_mirq_pending(env);
919 if (irq <= 0 || irq > 63) {
920 *val = 0;
921 } else {
922 iprio = env->miprio[irq];
923 if (!iprio) {
924 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
925 iprio = IPRIO_MMAXIPRIO;
928 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
929 *val |= iprio;
932 return RISCV_EXCP_NONE;
935 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
937 if (!riscv_cpu_virt_enabled(env)) {
938 return csrno;
941 switch (csrno) {
942 case CSR_SISELECT:
943 return CSR_VSISELECT;
944 case CSR_SIREG:
945 return CSR_VSIREG;
946 case CSR_SSETEIPNUM:
947 return CSR_VSSETEIPNUM;
948 case CSR_SCLREIPNUM:
949 return CSR_VSCLREIPNUM;
950 case CSR_SSETEIENUM:
951 return CSR_VSSETEIENUM;
952 case CSR_SCLREIENUM:
953 return CSR_VSCLREIENUM;
954 case CSR_STOPEI:
955 return CSR_VSTOPEI;
956 default:
957 return csrno;
961 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
962 target_ulong new_val, target_ulong wr_mask)
964 target_ulong *iselect;
966 /* Translate CSR number for VS-mode */
967 csrno = aia_xlate_vs_csrno(env, csrno);
969 /* Find the iselect CSR based on CSR number */
970 switch (csrno) {
971 case CSR_MISELECT:
972 iselect = &env->miselect;
973 break;
974 case CSR_SISELECT:
975 iselect = &env->siselect;
976 break;
977 case CSR_VSISELECT:
978 iselect = &env->vsiselect;
979 break;
980 default:
981 return RISCV_EXCP_ILLEGAL_INST;
984 if (val) {
985 *val = *iselect;
988 wr_mask &= ISELECT_MASK;
989 if (wr_mask) {
990 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
993 return RISCV_EXCP_NONE;
996 static int rmw_iprio(target_ulong xlen,
997 target_ulong iselect, uint8_t *iprio,
998 target_ulong *val, target_ulong new_val,
999 target_ulong wr_mask, int ext_irq_no)
1001 int i, firq, nirqs;
1002 target_ulong old_val;
1004 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1005 return -EINVAL;
1007 if (xlen != 32 && iselect & 0x1) {
1008 return -EINVAL;
1011 nirqs = 4 * (xlen / 32);
1012 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1014 old_val = 0;
1015 for (i = 0; i < nirqs; i++) {
1016 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1019 if (val) {
1020 *val = old_val;
1023 if (wr_mask) {
1024 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1025 for (i = 0; i < nirqs; i++) {
1027 * M-level and S-level external IRQ priority always read-only
1028 * zero. This means default priority order is always preferred
1029 * for M-level and S-level external IRQs.
1031 if ((firq + i) == ext_irq_no) {
1032 continue;
1034 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
1038 return 0;
1041 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
1042 target_ulong new_val, target_ulong wr_mask)
1044 bool virt;
1045 uint8_t *iprio;
1046 int ret = -EINVAL;
1047 target_ulong priv, isel, vgein;
1049 /* Translate CSR number for VS-mode */
1050 csrno = aia_xlate_vs_csrno(env, csrno);
1052 /* Decode register details from CSR number */
1053 virt = false;
1054 switch (csrno) {
1055 case CSR_MIREG:
1056 iprio = env->miprio;
1057 isel = env->miselect;
1058 priv = PRV_M;
1059 break;
1060 case CSR_SIREG:
1061 iprio = env->siprio;
1062 isel = env->siselect;
1063 priv = PRV_S;
1064 break;
1065 case CSR_VSIREG:
1066 iprio = env->hviprio;
1067 isel = env->vsiselect;
1068 priv = PRV_S;
1069 virt = true;
1070 break;
1071 default:
1072 goto done;
1075 /* Find the selected guest interrupt file */
1076 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1078 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
1079 /* Local interrupt priority registers not available for VS-mode */
1080 if (!virt) {
1081 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
1082 isel, iprio, val, new_val, wr_mask,
1083 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
1085 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
1086 /* IMSIC registers only available when machine implements it. */
1087 if (env->aia_ireg_rmw_fn[priv]) {
1088 /* Selected guest interrupt file should not be zero */
1089 if (virt && (!vgein || env->geilen < vgein)) {
1090 goto done;
1092 /* Call machine specific IMSIC register emulation */
1093 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1094 AIA_MAKE_IREG(isel, priv, virt, vgein,
1095 riscv_cpu_mxl_bits(env)),
1096 val, new_val, wr_mask);
1100 done:
1101 if (ret) {
1102 return (riscv_cpu_virt_enabled(env) && virt) ?
1103 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1105 return RISCV_EXCP_NONE;
1108 static int rmw_xsetclreinum(CPURISCVState *env, int csrno, target_ulong *val,
1109 target_ulong new_val, target_ulong wr_mask)
1111 int ret = -EINVAL;
1112 bool set, pend, virt;
1113 target_ulong priv, isel, vgein, xlen, nval, wmask;
1115 /* Translate CSR number for VS-mode */
1116 csrno = aia_xlate_vs_csrno(env, csrno);
1118 /* Decode register details from CSR number */
1119 virt = set = pend = false;
1120 switch (csrno) {
1121 case CSR_MSETEIPNUM:
1122 priv = PRV_M;
1123 set = true;
1124 pend = true;
1125 break;
1126 case CSR_MCLREIPNUM:
1127 priv = PRV_M;
1128 pend = true;
1129 break;
1130 case CSR_MSETEIENUM:
1131 priv = PRV_M;
1132 set = true;
1133 break;
1134 case CSR_MCLREIENUM:
1135 priv = PRV_M;
1136 break;
1137 case CSR_SSETEIPNUM:
1138 priv = PRV_S;
1139 set = true;
1140 pend = true;
1141 break;
1142 case CSR_SCLREIPNUM:
1143 priv = PRV_S;
1144 pend = true;
1145 break;
1146 case CSR_SSETEIENUM:
1147 priv = PRV_S;
1148 set = true;
1149 break;
1150 case CSR_SCLREIENUM:
1151 priv = PRV_S;
1152 break;
1153 case CSR_VSSETEIPNUM:
1154 priv = PRV_S;
1155 virt = true;
1156 set = true;
1157 pend = true;
1158 break;
1159 case CSR_VSCLREIPNUM:
1160 priv = PRV_S;
1161 virt = true;
1162 pend = true;
1163 break;
1164 case CSR_VSSETEIENUM:
1165 priv = PRV_S;
1166 virt = true;
1167 set = true;
1168 break;
1169 case CSR_VSCLREIENUM:
1170 priv = PRV_S;
1171 virt = true;
1172 break;
1173 default:
1174 goto done;
1177 /* IMSIC CSRs only available when machine implements IMSIC. */
1178 if (!env->aia_ireg_rmw_fn[priv]) {
1179 goto done;
1182 /* Find the selected guest interrupt file */
1183 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1185 /* Selected guest interrupt file should be valid */
1186 if (virt && (!vgein || env->geilen < vgein)) {
1187 goto done;
1190 /* Set/Clear CSRs always read zero */
1191 if (val) {
1192 *val = 0;
1195 if (wr_mask) {
1196 /* Get interrupt number */
1197 new_val &= wr_mask;
1199 /* Find target interrupt pending/enable register */
1200 xlen = riscv_cpu_mxl_bits(env);
1201 isel = (new_val / xlen);
1202 isel *= (xlen / IMSIC_EIPx_BITS);
1203 isel += (pend) ? ISELECT_IMSIC_EIP0 : ISELECT_IMSIC_EIE0;
1205 /* Find the interrupt bit to be set/clear */
1206 wmask = ((target_ulong)1) << (new_val % xlen);
1207 nval = (set) ? wmask : 0;
1209 /* Call machine specific IMSIC register emulation */
1210 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1211 AIA_MAKE_IREG(isel, priv, virt,
1212 vgein, xlen),
1213 NULL, nval, wmask);
1214 } else {
1215 ret = 0;
1218 done:
1219 if (ret) {
1220 return (riscv_cpu_virt_enabled(env) && virt) ?
1221 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1223 return RISCV_EXCP_NONE;
1226 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
1227 target_ulong new_val, target_ulong wr_mask)
1229 bool virt;
1230 int ret = -EINVAL;
1231 target_ulong priv, vgein;
1233 /* Translate CSR number for VS-mode */
1234 csrno = aia_xlate_vs_csrno(env, csrno);
1236 /* Decode register details from CSR number */
1237 virt = false;
1238 switch (csrno) {
1239 case CSR_MTOPEI:
1240 priv = PRV_M;
1241 break;
1242 case CSR_STOPEI:
1243 priv = PRV_S;
1244 break;
1245 case CSR_VSTOPEI:
1246 priv = PRV_S;
1247 virt = true;
1248 break;
1249 default:
1250 goto done;
1253 /* IMSIC CSRs only available when machine implements IMSIC. */
1254 if (!env->aia_ireg_rmw_fn[priv]) {
1255 goto done;
1258 /* Find the selected guest interrupt file */
1259 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1261 /* Selected guest interrupt file should be valid */
1262 if (virt && (!vgein || env->geilen < vgein)) {
1263 goto done;
1266 /* Call machine specific IMSIC register emulation for TOPEI */
1267 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1268 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
1269 riscv_cpu_mxl_bits(env)),
1270 val, new_val, wr_mask);
1272 done:
1273 if (ret) {
1274 return (riscv_cpu_virt_enabled(env) && virt) ?
1275 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1277 return RISCV_EXCP_NONE;
1280 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
1281 target_ulong *val)
1283 *val = env->mtvec;
1284 return RISCV_EXCP_NONE;
1287 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
1288 target_ulong val)
1290 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1291 if ((val & 3) < 2) {
1292 env->mtvec = val;
1293 } else {
1294 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
1296 return RISCV_EXCP_NONE;
1299 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
1300 target_ulong *val)
1302 *val = env->mcounteren;
1303 return RISCV_EXCP_NONE;
1306 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
1307 target_ulong val)
1309 env->mcounteren = val;
1310 return RISCV_EXCP_NONE;
1313 /* Machine Trap Handling */
1314 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
1315 Int128 *val)
1317 *val = int128_make128(env->mscratch, env->mscratchh);
1318 return RISCV_EXCP_NONE;
1321 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
1322 Int128 val)
1324 env->mscratch = int128_getlo(val);
1325 env->mscratchh = int128_gethi(val);
1326 return RISCV_EXCP_NONE;
1329 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
1330 target_ulong *val)
1332 *val = env->mscratch;
1333 return RISCV_EXCP_NONE;
1336 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
1337 target_ulong val)
1339 env->mscratch = val;
1340 return RISCV_EXCP_NONE;
1343 static RISCVException read_mepc(CPURISCVState *env, int csrno,
1344 target_ulong *val)
1346 *val = env->mepc;
1347 return RISCV_EXCP_NONE;
1350 static RISCVException write_mepc(CPURISCVState *env, int csrno,
1351 target_ulong val)
1353 env->mepc = val;
1354 return RISCV_EXCP_NONE;
1357 static RISCVException read_mcause(CPURISCVState *env, int csrno,
1358 target_ulong *val)
1360 *val = env->mcause;
1361 return RISCV_EXCP_NONE;
1364 static RISCVException write_mcause(CPURISCVState *env, int csrno,
1365 target_ulong val)
1367 env->mcause = val;
1368 return RISCV_EXCP_NONE;
1371 static RISCVException read_mtval(CPURISCVState *env, int csrno,
1372 target_ulong *val)
1374 *val = env->mtval;
1375 return RISCV_EXCP_NONE;
1378 static RISCVException write_mtval(CPURISCVState *env, int csrno,
1379 target_ulong val)
1381 env->mtval = val;
1382 return RISCV_EXCP_NONE;
1385 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
1386 uint64_t *ret_val,
1387 uint64_t new_val, uint64_t wr_mask)
1389 RISCVCPU *cpu = env_archcpu(env);
1390 /* Allow software control of delegable interrupts not claimed by hardware */
1391 uint64_t old_mip, mask = wr_mask & delegable_ints & ~env->miclaim;
1392 uint32_t gin;
1394 if (mask) {
1395 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask));
1396 } else {
1397 old_mip = env->mip;
1400 if (csrno != CSR_HVIP) {
1401 gin = get_field(env->hstatus, HSTATUS_VGEIN);
1402 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
1405 if (ret_val) {
1406 *ret_val = old_mip;
1409 return RISCV_EXCP_NONE;
1412 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
1413 target_ulong *ret_val,
1414 target_ulong new_val, target_ulong wr_mask)
1416 uint64_t rval;
1417 RISCVException ret;
1419 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
1420 if (ret_val) {
1421 *ret_val = rval;
1424 return ret;
1427 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
1428 target_ulong *ret_val,
1429 target_ulong new_val, target_ulong wr_mask)
1431 uint64_t rval;
1432 RISCVException ret;
1434 ret = rmw_mip64(env, csrno, &rval,
1435 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1436 if (ret_val) {
1437 *ret_val = rval >> 32;
1440 return ret;
1443 /* Supervisor Trap Setup */
1444 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
1445 Int128 *val)
1447 uint64_t mask = sstatus_v1_10_mask;
1448 uint64_t sstatus = env->mstatus & mask;
1449 if (env->xl != MXL_RV32 || env->debugger) {
1450 mask |= SSTATUS64_UXL;
1453 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
1454 return RISCV_EXCP_NONE;
1457 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
1458 target_ulong *val)
1460 target_ulong mask = (sstatus_v1_10_mask);
1461 if (env->xl != MXL_RV32 || env->debugger) {
1462 mask |= SSTATUS64_UXL;
1464 /* TODO: Use SXL not MXL. */
1465 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
1466 return RISCV_EXCP_NONE;
1469 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
1470 target_ulong val)
1472 target_ulong mask = (sstatus_v1_10_mask);
1474 if (env->xl != MXL_RV32 || env->debugger) {
1475 if ((val & SSTATUS64_UXL) != 0) {
1476 mask |= SSTATUS64_UXL;
1479 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
1480 return write_mstatus(env, CSR_MSTATUS, newval);
1483 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
1484 uint64_t *ret_val,
1485 uint64_t new_val, uint64_t wr_mask)
1487 RISCVException ret;
1488 uint64_t rval, vsbits, mask = env->hideleg & VS_MODE_INTERRUPTS;
1490 /* Bring VS-level bits to correct position */
1491 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
1492 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
1493 new_val |= vsbits << 1;
1494 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
1495 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
1496 wr_mask |= vsbits << 1;
1498 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
1499 if (ret_val) {
1500 rval &= mask;
1501 vsbits = rval & VS_MODE_INTERRUPTS;
1502 rval &= ~VS_MODE_INTERRUPTS;
1503 *ret_val = rval | (vsbits >> 1);
1506 return ret;
1509 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
1510 target_ulong *ret_val,
1511 target_ulong new_val, target_ulong wr_mask)
1513 uint64_t rval;
1514 RISCVException ret;
1516 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
1517 if (ret_val) {
1518 *ret_val = rval;
1521 return ret;
1524 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
1525 target_ulong *ret_val,
1526 target_ulong new_val, target_ulong wr_mask)
1528 uint64_t rval;
1529 RISCVException ret;
1531 ret = rmw_vsie64(env, csrno, &rval,
1532 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1533 if (ret_val) {
1534 *ret_val = rval >> 32;
1537 return ret;
1540 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
1541 uint64_t *ret_val,
1542 uint64_t new_val, uint64_t wr_mask)
1544 RISCVException ret;
1545 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
1547 if (riscv_cpu_virt_enabled(env)) {
1548 if (env->hvictl & HVICTL_VTI) {
1549 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1551 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
1552 } else {
1553 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
1556 if (ret_val) {
1557 *ret_val &= mask;
1560 return ret;
1563 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
1564 target_ulong *ret_val,
1565 target_ulong new_val, target_ulong wr_mask)
1567 uint64_t rval;
1568 RISCVException ret;
1570 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
1571 if (ret == RISCV_EXCP_NONE && ret_val) {
1572 *ret_val = rval;
1575 return ret;
1578 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
1579 target_ulong *ret_val,
1580 target_ulong new_val, target_ulong wr_mask)
1582 uint64_t rval;
1583 RISCVException ret;
1585 ret = rmw_sie64(env, csrno, &rval,
1586 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1587 if (ret_val) {
1588 *ret_val = rval >> 32;
1591 return ret;
1594 static RISCVException read_stvec(CPURISCVState *env, int csrno,
1595 target_ulong *val)
1597 *val = env->stvec;
1598 return RISCV_EXCP_NONE;
1601 static RISCVException write_stvec(CPURISCVState *env, int csrno,
1602 target_ulong val)
1604 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1605 if ((val & 3) < 2) {
1606 env->stvec = val;
1607 } else {
1608 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
1610 return RISCV_EXCP_NONE;
1613 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
1614 target_ulong *val)
1616 *val = env->scounteren;
1617 return RISCV_EXCP_NONE;
1620 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
1621 target_ulong val)
1623 env->scounteren = val;
1624 return RISCV_EXCP_NONE;
1627 /* Supervisor Trap Handling */
1628 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
1629 Int128 *val)
1631 *val = int128_make128(env->sscratch, env->sscratchh);
1632 return RISCV_EXCP_NONE;
1635 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
1636 Int128 val)
1638 env->sscratch = int128_getlo(val);
1639 env->sscratchh = int128_gethi(val);
1640 return RISCV_EXCP_NONE;
1643 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
1644 target_ulong *val)
1646 *val = env->sscratch;
1647 return RISCV_EXCP_NONE;
1650 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
1651 target_ulong val)
1653 env->sscratch = val;
1654 return RISCV_EXCP_NONE;
1657 static RISCVException read_sepc(CPURISCVState *env, int csrno,
1658 target_ulong *val)
1660 *val = env->sepc;
1661 return RISCV_EXCP_NONE;
1664 static RISCVException write_sepc(CPURISCVState *env, int csrno,
1665 target_ulong val)
1667 env->sepc = val;
1668 return RISCV_EXCP_NONE;
1671 static RISCVException read_scause(CPURISCVState *env, int csrno,
1672 target_ulong *val)
1674 *val = env->scause;
1675 return RISCV_EXCP_NONE;
1678 static RISCVException write_scause(CPURISCVState *env, int csrno,
1679 target_ulong val)
1681 env->scause = val;
1682 return RISCV_EXCP_NONE;
1685 static RISCVException read_stval(CPURISCVState *env, int csrno,
1686 target_ulong *val)
1688 *val = env->stval;
1689 return RISCV_EXCP_NONE;
1692 static RISCVException write_stval(CPURISCVState *env, int csrno,
1693 target_ulong val)
1695 env->stval = val;
1696 return RISCV_EXCP_NONE;
1699 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
1700 uint64_t *ret_val,
1701 uint64_t new_val, uint64_t wr_mask)
1703 RISCVException ret;
1704 uint64_t rval, vsbits, mask = env->hideleg & vsip_writable_mask;
1706 /* Bring VS-level bits to correct position */
1707 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
1708 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
1709 new_val |= vsbits << 1;
1710 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
1711 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
1712 wr_mask |= vsbits << 1;
1714 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask & mask);
1715 if (ret_val) {
1716 rval &= mask;
1717 vsbits = rval & VS_MODE_INTERRUPTS;
1718 rval &= ~VS_MODE_INTERRUPTS;
1719 *ret_val = rval | (vsbits >> 1);
1722 return ret;
1725 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
1726 target_ulong *ret_val,
1727 target_ulong new_val, target_ulong wr_mask)
1729 uint64_t rval;
1730 RISCVException ret;
1732 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
1733 if (ret_val) {
1734 *ret_val = rval;
1737 return ret;
1740 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
1741 target_ulong *ret_val,
1742 target_ulong new_val, target_ulong wr_mask)
1744 uint64_t rval;
1745 RISCVException ret;
1747 ret = rmw_vsip64(env, csrno, &rval,
1748 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1749 if (ret_val) {
1750 *ret_val = rval >> 32;
1753 return ret;
1756 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
1757 uint64_t *ret_val,
1758 uint64_t new_val, uint64_t wr_mask)
1760 RISCVException ret;
1761 uint64_t mask = env->mideleg & sip_writable_mask;
1763 if (riscv_cpu_virt_enabled(env)) {
1764 if (env->hvictl & HVICTL_VTI) {
1765 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1767 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
1768 } else {
1769 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
1772 if (ret_val) {
1773 *ret_val &= env->mideleg & S_MODE_INTERRUPTS;
1776 return ret;
1779 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
1780 target_ulong *ret_val,
1781 target_ulong new_val, target_ulong wr_mask)
1783 uint64_t rval;
1784 RISCVException ret;
1786 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
1787 if (ret_val) {
1788 *ret_val = rval;
1791 return ret;
1794 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
1795 target_ulong *ret_val,
1796 target_ulong new_val, target_ulong wr_mask)
1798 uint64_t rval;
1799 RISCVException ret;
1801 ret = rmw_sip64(env, csrno, &rval,
1802 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1803 if (ret_val) {
1804 *ret_val = rval >> 32;
1807 return ret;
1810 /* Supervisor Protection and Translation */
1811 static RISCVException read_satp(CPURISCVState *env, int csrno,
1812 target_ulong *val)
1814 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
1815 *val = 0;
1816 return RISCV_EXCP_NONE;
1819 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
1820 return RISCV_EXCP_ILLEGAL_INST;
1821 } else {
1822 *val = env->satp;
1825 return RISCV_EXCP_NONE;
1828 static RISCVException write_satp(CPURISCVState *env, int csrno,
1829 target_ulong val)
1831 target_ulong vm, mask, asid;
1833 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
1834 return RISCV_EXCP_NONE;
1837 if (riscv_cpu_mxl(env) == MXL_RV32) {
1838 vm = validate_vm(env, get_field(val, SATP32_MODE));
1839 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
1840 asid = (val ^ env->satp) & SATP32_ASID;
1841 } else {
1842 vm = validate_vm(env, get_field(val, SATP64_MODE));
1843 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
1844 asid = (val ^ env->satp) & SATP64_ASID;
1847 if (vm && mask) {
1848 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
1849 return RISCV_EXCP_ILLEGAL_INST;
1850 } else {
1851 if (asid) {
1852 tlb_flush(env_cpu(env));
1854 env->satp = val;
1857 return RISCV_EXCP_NONE;
1860 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
1862 int irq, ret;
1863 target_ulong topei;
1864 uint64_t vseip, vsgein;
1865 uint32_t iid, iprio, hviid, hviprio, gein;
1866 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
1868 gein = get_field(env->hstatus, HSTATUS_VGEIN);
1869 hviid = get_field(env->hvictl, HVICTL_IID);
1870 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
1872 if (gein) {
1873 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
1874 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
1875 if (gein <= env->geilen && vseip) {
1876 siid[scount] = IRQ_S_EXT;
1877 siprio[scount] = IPRIO_MMAXIPRIO + 1;
1878 if (env->aia_ireg_rmw_fn[PRV_S]) {
1880 * Call machine specific IMSIC register emulation for
1881 * reading TOPEI.
1883 ret = env->aia_ireg_rmw_fn[PRV_S](
1884 env->aia_ireg_rmw_fn_arg[PRV_S],
1885 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
1886 riscv_cpu_mxl_bits(env)),
1887 &topei, 0, 0);
1888 if (!ret && topei) {
1889 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
1892 scount++;
1894 } else {
1895 if (hviid == IRQ_S_EXT && hviprio) {
1896 siid[scount] = IRQ_S_EXT;
1897 siprio[scount] = hviprio;
1898 scount++;
1902 if (env->hvictl & HVICTL_VTI) {
1903 if (hviid != IRQ_S_EXT) {
1904 siid[scount] = hviid;
1905 siprio[scount] = hviprio;
1906 scount++;
1908 } else {
1909 irq = riscv_cpu_vsirq_pending(env);
1910 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
1911 siid[scount] = irq;
1912 siprio[scount] = env->hviprio[irq];
1913 scount++;
1917 iid = 0;
1918 iprio = UINT_MAX;
1919 for (s = 0; s < scount; s++) {
1920 if (siprio[s] < iprio) {
1921 iid = siid[s];
1922 iprio = siprio[s];
1926 if (iid) {
1927 if (env->hvictl & HVICTL_IPRIOM) {
1928 if (iprio > IPRIO_MMAXIPRIO) {
1929 iprio = IPRIO_MMAXIPRIO;
1931 if (!iprio) {
1932 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
1933 iprio = IPRIO_MMAXIPRIO;
1936 } else {
1937 iprio = 1;
1939 } else {
1940 iprio = 0;
1943 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1944 *val |= iprio;
1945 return RISCV_EXCP_NONE;
1948 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
1950 int irq;
1951 uint8_t iprio;
1953 if (riscv_cpu_virt_enabled(env)) {
1954 return read_vstopi(env, CSR_VSTOPI, val);
1957 irq = riscv_cpu_sirq_pending(env);
1958 if (irq <= 0 || irq > 63) {
1959 *val = 0;
1960 } else {
1961 iprio = env->siprio[irq];
1962 if (!iprio) {
1963 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
1964 iprio = IPRIO_MMAXIPRIO;
1967 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1968 *val |= iprio;
1971 return RISCV_EXCP_NONE;
1974 /* Hypervisor Extensions */
1975 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
1976 target_ulong *val)
1978 *val = env->hstatus;
1979 if (riscv_cpu_mxl(env) != MXL_RV32) {
1980 /* We only support 64-bit VSXL */
1981 *val = set_field(*val, HSTATUS_VSXL, 2);
1983 /* We only support little endian */
1984 *val = set_field(*val, HSTATUS_VSBE, 0);
1985 return RISCV_EXCP_NONE;
1988 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
1989 target_ulong val)
1991 env->hstatus = val;
1992 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
1993 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options.");
1995 if (get_field(val, HSTATUS_VSBE) != 0) {
1996 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
1998 return RISCV_EXCP_NONE;
2001 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
2002 target_ulong *val)
2004 *val = env->hedeleg;
2005 return RISCV_EXCP_NONE;
2008 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
2009 target_ulong val)
2011 env->hedeleg = val & vs_delegable_excps;
2012 return RISCV_EXCP_NONE;
2015 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
2016 uint64_t *ret_val,
2017 uint64_t new_val, uint64_t wr_mask)
2019 uint64_t mask = wr_mask & vs_delegable_ints;
2021 if (ret_val) {
2022 *ret_val = env->hideleg & vs_delegable_ints;
2025 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
2026 return RISCV_EXCP_NONE;
2029 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
2030 target_ulong *ret_val,
2031 target_ulong new_val, target_ulong wr_mask)
2033 uint64_t rval;
2034 RISCVException ret;
2036 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
2037 if (ret_val) {
2038 *ret_val = rval;
2041 return ret;
2044 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
2045 target_ulong *ret_val,
2046 target_ulong new_val, target_ulong wr_mask)
2048 uint64_t rval;
2049 RISCVException ret;
2051 ret = rmw_hideleg64(env, csrno, &rval,
2052 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2053 if (ret_val) {
2054 *ret_val = rval >> 32;
2057 return ret;
2060 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
2061 uint64_t *ret_val,
2062 uint64_t new_val, uint64_t wr_mask)
2064 RISCVException ret;
2066 ret = rmw_mip64(env, csrno, ret_val, new_val,
2067 wr_mask & hvip_writable_mask);
2068 if (ret_val) {
2069 *ret_val &= VS_MODE_INTERRUPTS;
2072 return ret;
2075 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
2076 target_ulong *ret_val,
2077 target_ulong new_val, target_ulong wr_mask)
2079 uint64_t rval;
2080 RISCVException ret;
2082 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
2083 if (ret_val) {
2084 *ret_val = rval;
2087 return ret;
2090 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
2091 target_ulong *ret_val,
2092 target_ulong new_val, target_ulong wr_mask)
2094 uint64_t rval;
2095 RISCVException ret;
2097 ret = rmw_hvip64(env, csrno, &rval,
2098 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2099 if (ret_val) {
2100 *ret_val = rval >> 32;
2103 return ret;
2106 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
2107 target_ulong *ret_value,
2108 target_ulong new_value, target_ulong write_mask)
2110 int ret = rmw_mip(env, csrno, ret_value, new_value,
2111 write_mask & hip_writable_mask);
2113 if (ret_value) {
2114 *ret_value &= HS_MODE_INTERRUPTS;
2116 return ret;
2119 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
2120 target_ulong *ret_val,
2121 target_ulong new_val, target_ulong wr_mask)
2123 uint64_t rval;
2124 RISCVException ret;
2126 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
2127 if (ret_val) {
2128 *ret_val = rval & HS_MODE_INTERRUPTS;
2131 return ret;
2134 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
2135 target_ulong *val)
2137 *val = env->hcounteren;
2138 return RISCV_EXCP_NONE;
2141 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
2142 target_ulong val)
2144 env->hcounteren = val;
2145 return RISCV_EXCP_NONE;
2148 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
2149 target_ulong *val)
2151 if (val) {
2152 *val = env->hgeie;
2154 return RISCV_EXCP_NONE;
2157 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
2158 target_ulong val)
2160 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2161 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
2162 env->hgeie = val;
2163 /* Update mip.SGEIP bit */
2164 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP,
2165 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
2166 return RISCV_EXCP_NONE;
2169 static RISCVException read_htval(CPURISCVState *env, int csrno,
2170 target_ulong *val)
2172 *val = env->htval;
2173 return RISCV_EXCP_NONE;
2176 static RISCVException write_htval(CPURISCVState *env, int csrno,
2177 target_ulong val)
2179 env->htval = val;
2180 return RISCV_EXCP_NONE;
2183 static RISCVException read_htinst(CPURISCVState *env, int csrno,
2184 target_ulong *val)
2186 *val = env->htinst;
2187 return RISCV_EXCP_NONE;
2190 static RISCVException write_htinst(CPURISCVState *env, int csrno,
2191 target_ulong val)
2193 return RISCV_EXCP_NONE;
2196 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
2197 target_ulong *val)
2199 if (val) {
2200 *val = env->hgeip;
2202 return RISCV_EXCP_NONE;
2205 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
2206 target_ulong *val)
2208 *val = env->hgatp;
2209 return RISCV_EXCP_NONE;
2212 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
2213 target_ulong val)
2215 env->hgatp = val;
2216 return RISCV_EXCP_NONE;
2219 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
2220 target_ulong *val)
2222 if (!env->rdtime_fn) {
2223 return RISCV_EXCP_ILLEGAL_INST;
2226 *val = env->htimedelta;
2227 return RISCV_EXCP_NONE;
2230 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
2231 target_ulong val)
2233 if (!env->rdtime_fn) {
2234 return RISCV_EXCP_ILLEGAL_INST;
2237 if (riscv_cpu_mxl(env) == MXL_RV32) {
2238 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
2239 } else {
2240 env->htimedelta = val;
2242 return RISCV_EXCP_NONE;
2245 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
2246 target_ulong *val)
2248 if (!env->rdtime_fn) {
2249 return RISCV_EXCP_ILLEGAL_INST;
2252 *val = env->htimedelta >> 32;
2253 return RISCV_EXCP_NONE;
2256 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
2257 target_ulong val)
2259 if (!env->rdtime_fn) {
2260 return RISCV_EXCP_ILLEGAL_INST;
2263 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
2264 return RISCV_EXCP_NONE;
2267 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
2269 *val = env->hvictl;
2270 return RISCV_EXCP_NONE;
2273 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
2275 env->hvictl = val & HVICTL_VALID_MASK;
2276 return RISCV_EXCP_NONE;
2279 static int read_hvipriox(CPURISCVState *env, int first_index,
2280 uint8_t *iprio, target_ulong *val)
2282 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
2284 /* First index has to be a multiple of number of irqs per register */
2285 if (first_index % num_irqs) {
2286 return (riscv_cpu_virt_enabled(env)) ?
2287 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2290 /* Fill-up return value */
2291 *val = 0;
2292 for (i = 0; i < num_irqs; i++) {
2293 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
2294 continue;
2296 if (rdzero) {
2297 continue;
2299 *val |= ((target_ulong)iprio[irq]) << (i * 8);
2302 return RISCV_EXCP_NONE;
2305 static int write_hvipriox(CPURISCVState *env, int first_index,
2306 uint8_t *iprio, target_ulong val)
2308 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
2310 /* First index has to be a multiple of number of irqs per register */
2311 if (first_index % num_irqs) {
2312 return (riscv_cpu_virt_enabled(env)) ?
2313 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2316 /* Fill-up priority arrary */
2317 for (i = 0; i < num_irqs; i++) {
2318 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
2319 continue;
2321 if (rdzero) {
2322 iprio[irq] = 0;
2323 } else {
2324 iprio[irq] = (val >> (i * 8)) & 0xff;
2328 return RISCV_EXCP_NONE;
2331 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
2333 return read_hvipriox(env, 0, env->hviprio, val);
2336 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
2338 return write_hvipriox(env, 0, env->hviprio, val);
2341 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
2343 return read_hvipriox(env, 4, env->hviprio, val);
2346 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
2348 return write_hvipriox(env, 4, env->hviprio, val);
2351 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
2353 return read_hvipriox(env, 8, env->hviprio, val);
2356 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
2358 return write_hvipriox(env, 8, env->hviprio, val);
2361 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
2363 return read_hvipriox(env, 12, env->hviprio, val);
2366 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
2368 return write_hvipriox(env, 12, env->hviprio, val);
2371 /* Virtual CSR Registers */
2372 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
2373 target_ulong *val)
2375 *val = env->vsstatus;
2376 return RISCV_EXCP_NONE;
2379 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
2380 target_ulong val)
2382 uint64_t mask = (target_ulong)-1;
2383 if ((val & VSSTATUS64_UXL) == 0) {
2384 mask &= ~VSSTATUS64_UXL;
2386 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
2387 return RISCV_EXCP_NONE;
2390 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
2392 *val = env->vstvec;
2393 return RISCV_EXCP_NONE;
2396 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
2397 target_ulong val)
2399 env->vstvec = val;
2400 return RISCV_EXCP_NONE;
2403 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
2404 target_ulong *val)
2406 *val = env->vsscratch;
2407 return RISCV_EXCP_NONE;
2410 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
2411 target_ulong val)
2413 env->vsscratch = val;
2414 return RISCV_EXCP_NONE;
2417 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
2418 target_ulong *val)
2420 *val = env->vsepc;
2421 return RISCV_EXCP_NONE;
2424 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
2425 target_ulong val)
2427 env->vsepc = val;
2428 return RISCV_EXCP_NONE;
2431 static RISCVException read_vscause(CPURISCVState *env, int csrno,
2432 target_ulong *val)
2434 *val = env->vscause;
2435 return RISCV_EXCP_NONE;
2438 static RISCVException write_vscause(CPURISCVState *env, int csrno,
2439 target_ulong val)
2441 env->vscause = val;
2442 return RISCV_EXCP_NONE;
2445 static RISCVException read_vstval(CPURISCVState *env, int csrno,
2446 target_ulong *val)
2448 *val = env->vstval;
2449 return RISCV_EXCP_NONE;
2452 static RISCVException write_vstval(CPURISCVState *env, int csrno,
2453 target_ulong val)
2455 env->vstval = val;
2456 return RISCV_EXCP_NONE;
2459 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
2460 target_ulong *val)
2462 *val = env->vsatp;
2463 return RISCV_EXCP_NONE;
2466 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
2467 target_ulong val)
2469 env->vsatp = val;
2470 return RISCV_EXCP_NONE;
2473 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
2474 target_ulong *val)
2476 *val = env->mtval2;
2477 return RISCV_EXCP_NONE;
2480 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
2481 target_ulong val)
2483 env->mtval2 = val;
2484 return RISCV_EXCP_NONE;
2487 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
2488 target_ulong *val)
2490 *val = env->mtinst;
2491 return RISCV_EXCP_NONE;
2494 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
2495 target_ulong val)
2497 env->mtinst = val;
2498 return RISCV_EXCP_NONE;
2501 /* Physical Memory Protection */
2502 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
2503 target_ulong *val)
2505 *val = mseccfg_csr_read(env);
2506 return RISCV_EXCP_NONE;
2509 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
2510 target_ulong val)
2512 mseccfg_csr_write(env, val);
2513 return RISCV_EXCP_NONE;
2516 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
2518 /* TODO: RV128 restriction check */
2519 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
2520 return false;
2522 return true;
2525 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
2526 target_ulong *val)
2528 uint32_t reg_index = csrno - CSR_PMPCFG0;
2530 if (!check_pmp_reg_index(env, reg_index)) {
2531 return RISCV_EXCP_ILLEGAL_INST;
2533 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
2534 return RISCV_EXCP_NONE;
2537 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
2538 target_ulong val)
2540 uint32_t reg_index = csrno - CSR_PMPCFG0;
2542 if (!check_pmp_reg_index(env, reg_index)) {
2543 return RISCV_EXCP_ILLEGAL_INST;
2545 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
2546 return RISCV_EXCP_NONE;
2549 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
2550 target_ulong *val)
2552 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
2553 return RISCV_EXCP_NONE;
2556 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
2557 target_ulong val)
2559 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
2560 return RISCV_EXCP_NONE;
2564 * Functions to access Pointer Masking feature registers
2565 * We have to check if current priv lvl could modify
2566 * csr in given mode
2568 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
2570 int csr_priv = get_field(csrno, 0x300);
2571 int pm_current;
2573 if (env->debugger) {
2574 return false;
2577 * If priv lvls differ that means we're accessing csr from higher priv lvl,
2578 * so allow the access
2580 if (env->priv != csr_priv) {
2581 return false;
2583 switch (env->priv) {
2584 case PRV_M:
2585 pm_current = get_field(env->mmte, M_PM_CURRENT);
2586 break;
2587 case PRV_S:
2588 pm_current = get_field(env->mmte, S_PM_CURRENT);
2589 break;
2590 case PRV_U:
2591 pm_current = get_field(env->mmte, U_PM_CURRENT);
2592 break;
2593 default:
2594 g_assert_not_reached();
2596 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
2597 return !pm_current;
2600 static RISCVException read_mmte(CPURISCVState *env, int csrno,
2601 target_ulong *val)
2603 *val = env->mmte & MMTE_MASK;
2604 return RISCV_EXCP_NONE;
2607 static RISCVException write_mmte(CPURISCVState *env, int csrno,
2608 target_ulong val)
2610 uint64_t mstatus;
2611 target_ulong wpri_val = val & MMTE_MASK;
2613 if (val != wpri_val) {
2614 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2615 "MMTE: WPRI violation written 0x", val,
2616 "vs expected 0x", wpri_val);
2618 /* for machine mode pm.current is hardwired to 1 */
2619 wpri_val |= MMTE_M_PM_CURRENT;
2621 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
2622 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
2623 env->mmte = wpri_val | PM_EXT_DIRTY;
2624 riscv_cpu_update_mask(env);
2626 /* Set XS and SD bits, since PM CSRs are dirty */
2627 mstatus = env->mstatus | MSTATUS_XS;
2628 write_mstatus(env, csrno, mstatus);
2629 return RISCV_EXCP_NONE;
2632 static RISCVException read_smte(CPURISCVState *env, int csrno,
2633 target_ulong *val)
2635 *val = env->mmte & SMTE_MASK;
2636 return RISCV_EXCP_NONE;
2639 static RISCVException write_smte(CPURISCVState *env, int csrno,
2640 target_ulong val)
2642 target_ulong wpri_val = val & SMTE_MASK;
2644 if (val != wpri_val) {
2645 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2646 "SMTE: WPRI violation written 0x", val,
2647 "vs expected 0x", wpri_val);
2650 /* if pm.current==0 we can't modify current PM CSRs */
2651 if (check_pm_current_disabled(env, csrno)) {
2652 return RISCV_EXCP_NONE;
2655 wpri_val |= (env->mmte & ~SMTE_MASK);
2656 write_mmte(env, csrno, wpri_val);
2657 return RISCV_EXCP_NONE;
2660 static RISCVException read_umte(CPURISCVState *env, int csrno,
2661 target_ulong *val)
2663 *val = env->mmte & UMTE_MASK;
2664 return RISCV_EXCP_NONE;
2667 static RISCVException write_umte(CPURISCVState *env, int csrno,
2668 target_ulong val)
2670 target_ulong wpri_val = val & UMTE_MASK;
2672 if (val != wpri_val) {
2673 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2674 "UMTE: WPRI violation written 0x", val,
2675 "vs expected 0x", wpri_val);
2678 if (check_pm_current_disabled(env, csrno)) {
2679 return RISCV_EXCP_NONE;
2682 wpri_val |= (env->mmte & ~UMTE_MASK);
2683 write_mmte(env, csrno, wpri_val);
2684 return RISCV_EXCP_NONE;
2687 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
2688 target_ulong *val)
2690 *val = env->mpmmask;
2691 return RISCV_EXCP_NONE;
2694 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
2695 target_ulong val)
2697 uint64_t mstatus;
2699 env->mpmmask = val;
2700 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
2701 env->cur_pmmask = val;
2703 env->mmte |= PM_EXT_DIRTY;
2705 /* Set XS and SD bits, since PM CSRs are dirty */
2706 mstatus = env->mstatus | MSTATUS_XS;
2707 write_mstatus(env, csrno, mstatus);
2708 return RISCV_EXCP_NONE;
2711 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
2712 target_ulong *val)
2714 *val = env->spmmask;
2715 return RISCV_EXCP_NONE;
2718 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
2719 target_ulong val)
2721 uint64_t mstatus;
2723 /* if pm.current==0 we can't modify current PM CSRs */
2724 if (check_pm_current_disabled(env, csrno)) {
2725 return RISCV_EXCP_NONE;
2727 env->spmmask = val;
2728 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
2729 env->cur_pmmask = val;
2731 env->mmte |= PM_EXT_DIRTY;
2733 /* Set XS and SD bits, since PM CSRs are dirty */
2734 mstatus = env->mstatus | MSTATUS_XS;
2735 write_mstatus(env, csrno, mstatus);
2736 return RISCV_EXCP_NONE;
2739 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
2740 target_ulong *val)
2742 *val = env->upmmask;
2743 return RISCV_EXCP_NONE;
2746 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
2747 target_ulong val)
2749 uint64_t mstatus;
2751 /* if pm.current==0 we can't modify current PM CSRs */
2752 if (check_pm_current_disabled(env, csrno)) {
2753 return RISCV_EXCP_NONE;
2755 env->upmmask = val;
2756 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
2757 env->cur_pmmask = val;
2759 env->mmte |= PM_EXT_DIRTY;
2761 /* Set XS and SD bits, since PM CSRs are dirty */
2762 mstatus = env->mstatus | MSTATUS_XS;
2763 write_mstatus(env, csrno, mstatus);
2764 return RISCV_EXCP_NONE;
2767 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
2768 target_ulong *val)
2770 *val = env->mpmbase;
2771 return RISCV_EXCP_NONE;
2774 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
2775 target_ulong val)
2777 uint64_t mstatus;
2779 env->mpmbase = val;
2780 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
2781 env->cur_pmbase = val;
2783 env->mmte |= PM_EXT_DIRTY;
2785 /* Set XS and SD bits, since PM CSRs are dirty */
2786 mstatus = env->mstatus | MSTATUS_XS;
2787 write_mstatus(env, csrno, mstatus);
2788 return RISCV_EXCP_NONE;
2791 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
2792 target_ulong *val)
2794 *val = env->spmbase;
2795 return RISCV_EXCP_NONE;
2798 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
2799 target_ulong val)
2801 uint64_t mstatus;
2803 /* if pm.current==0 we can't modify current PM CSRs */
2804 if (check_pm_current_disabled(env, csrno)) {
2805 return RISCV_EXCP_NONE;
2807 env->spmbase = val;
2808 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
2809 env->cur_pmbase = val;
2811 env->mmte |= PM_EXT_DIRTY;
2813 /* Set XS and SD bits, since PM CSRs are dirty */
2814 mstatus = env->mstatus | MSTATUS_XS;
2815 write_mstatus(env, csrno, mstatus);
2816 return RISCV_EXCP_NONE;
2819 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
2820 target_ulong *val)
2822 *val = env->upmbase;
2823 return RISCV_EXCP_NONE;
2826 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
2827 target_ulong val)
2829 uint64_t mstatus;
2831 /* if pm.current==0 we can't modify current PM CSRs */
2832 if (check_pm_current_disabled(env, csrno)) {
2833 return RISCV_EXCP_NONE;
2835 env->upmbase = val;
2836 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
2837 env->cur_pmbase = val;
2839 env->mmte |= PM_EXT_DIRTY;
2841 /* Set XS and SD bits, since PM CSRs are dirty */
2842 mstatus = env->mstatus | MSTATUS_XS;
2843 write_mstatus(env, csrno, mstatus);
2844 return RISCV_EXCP_NONE;
2847 #endif
2850 * riscv_csrrw - read and/or update control and status register
2852 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
2853 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
2854 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
2855 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
2858 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
2859 int csrno,
2860 bool write_mask,
2861 RISCVCPU *cpu)
2863 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
2864 int read_only = get_field(csrno, 0xC00) == 3;
2865 #if !defined(CONFIG_USER_ONLY)
2866 int effective_priv = env->priv;
2868 if (riscv_has_ext(env, RVH) &&
2869 env->priv == PRV_S &&
2870 !riscv_cpu_virt_enabled(env)) {
2872 * We are in S mode without virtualisation, therefore we are in HS Mode.
2873 * Add 1 to the effective privledge level to allow us to access the
2874 * Hypervisor CSRs.
2876 effective_priv++;
2879 if (!env->debugger && (effective_priv < get_field(csrno, 0x300))) {
2880 return RISCV_EXCP_ILLEGAL_INST;
2882 #endif
2883 if (write_mask && read_only) {
2884 return RISCV_EXCP_ILLEGAL_INST;
2887 /* ensure the CSR extension is enabled. */
2888 if (!cpu->cfg.ext_icsr) {
2889 return RISCV_EXCP_ILLEGAL_INST;
2892 /* check predicate */
2893 if (!csr_ops[csrno].predicate) {
2894 return RISCV_EXCP_ILLEGAL_INST;
2897 return csr_ops[csrno].predicate(env, csrno);
2900 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
2901 target_ulong *ret_value,
2902 target_ulong new_value,
2903 target_ulong write_mask)
2905 RISCVException ret;
2906 target_ulong old_value;
2908 /* execute combined read/write operation if it exists */
2909 if (csr_ops[csrno].op) {
2910 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
2913 /* if no accessor exists then return failure */
2914 if (!csr_ops[csrno].read) {
2915 return RISCV_EXCP_ILLEGAL_INST;
2917 /* read old value */
2918 ret = csr_ops[csrno].read(env, csrno, &old_value);
2919 if (ret != RISCV_EXCP_NONE) {
2920 return ret;
2923 /* write value if writable and write mask set, otherwise drop writes */
2924 if (write_mask) {
2925 new_value = (old_value & ~write_mask) | (new_value & write_mask);
2926 if (csr_ops[csrno].write) {
2927 ret = csr_ops[csrno].write(env, csrno, new_value);
2928 if (ret != RISCV_EXCP_NONE) {
2929 return ret;
2934 /* return old value */
2935 if (ret_value) {
2936 *ret_value = old_value;
2939 return RISCV_EXCP_NONE;
2942 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
2943 target_ulong *ret_value,
2944 target_ulong new_value, target_ulong write_mask)
2946 RISCVCPU *cpu = env_archcpu(env);
2948 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
2949 if (ret != RISCV_EXCP_NONE) {
2950 return ret;
2953 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
2956 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
2957 Int128 *ret_value,
2958 Int128 new_value,
2959 Int128 write_mask)
2961 RISCVException ret;
2962 Int128 old_value;
2964 /* read old value */
2965 ret = csr_ops[csrno].read128(env, csrno, &old_value);
2966 if (ret != RISCV_EXCP_NONE) {
2967 return ret;
2970 /* write value if writable and write mask set, otherwise drop writes */
2971 if (int128_nz(write_mask)) {
2972 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
2973 int128_and(new_value, write_mask));
2974 if (csr_ops[csrno].write128) {
2975 ret = csr_ops[csrno].write128(env, csrno, new_value);
2976 if (ret != RISCV_EXCP_NONE) {
2977 return ret;
2979 } else if (csr_ops[csrno].write) {
2980 /* avoids having to write wrappers for all registers */
2981 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
2982 if (ret != RISCV_EXCP_NONE) {
2983 return ret;
2988 /* return old value */
2989 if (ret_value) {
2990 *ret_value = old_value;
2993 return RISCV_EXCP_NONE;
2996 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
2997 Int128 *ret_value,
2998 Int128 new_value, Int128 write_mask)
3000 RISCVException ret;
3001 RISCVCPU *cpu = env_archcpu(env);
3003 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
3004 if (ret != RISCV_EXCP_NONE) {
3005 return ret;
3008 if (csr_ops[csrno].read128) {
3009 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
3013 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3014 * at all defined.
3015 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3016 * significant), for those, this fallback is correctly handling the accesses
3018 target_ulong old_value;
3019 ret = riscv_csrrw_do64(env, csrno, &old_value,
3020 int128_getlo(new_value),
3021 int128_getlo(write_mask));
3022 if (ret == RISCV_EXCP_NONE && ret_value) {
3023 *ret_value = int128_make64(old_value);
3025 return ret;
3029 * Debugger support. If not in user mode, set env->debugger before the
3030 * riscv_csrrw call and clear it after the call.
3032 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
3033 target_ulong *ret_value,
3034 target_ulong new_value,
3035 target_ulong write_mask)
3037 RISCVException ret;
3038 #if !defined(CONFIG_USER_ONLY)
3039 env->debugger = true;
3040 #endif
3041 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
3042 #if !defined(CONFIG_USER_ONLY)
3043 env->debugger = false;
3044 #endif
3045 return ret;
3048 /* Control and Status Register function table */
3049 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
3050 /* User Floating-Point CSRs */
3051 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
3052 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
3053 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
3054 /* Vector CSRs */
3055 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart },
3056 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat },
3057 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm },
3058 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr },
3059 [CSR_VL] = { "vl", vs, read_vl },
3060 [CSR_VTYPE] = { "vtype", vs, read_vtype },
3061 [CSR_VLENB] = { "vlenb", vs, read_vlenb },
3062 /* User Timers and Counters */
3063 [CSR_CYCLE] = { "cycle", ctr, read_instret },
3064 [CSR_INSTRET] = { "instret", ctr, read_instret },
3065 [CSR_CYCLEH] = { "cycleh", ctr32, read_instreth },
3066 [CSR_INSTRETH] = { "instreth", ctr32, read_instreth },
3069 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3070 * rdtime callback is not provided by machine/platform emulation.
3072 [CSR_TIME] = { "time", ctr, read_time },
3073 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
3075 #if !defined(CONFIG_USER_ONLY)
3076 /* Machine Timers and Counters */
3077 [CSR_MCYCLE] = { "mcycle", any, read_instret },
3078 [CSR_MINSTRET] = { "minstret", any, read_instret },
3079 [CSR_MCYCLEH] = { "mcycleh", any32, read_instreth },
3080 [CSR_MINSTRETH] = { "minstreth", any32, read_instreth },
3082 /* Machine Information Registers */
3083 [CSR_MVENDORID] = { "mvendorid", any, read_zero },
3084 [CSR_MARCHID] = { "marchid", any, read_zero },
3085 [CSR_MIMPID] = { "mimpid", any, read_zero },
3086 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
3088 /* Machine Trap Setup */
3089 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, NULL,
3090 read_mstatus_i128 },
3091 [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL,
3092 read_misa_i128 },
3093 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
3094 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
3095 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
3096 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
3097 [CSR_MCOUNTEREN] = { "mcounteren", any, read_mcounteren, write_mcounteren },
3099 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush },
3101 /* Machine Trap Handling */
3102 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, NULL,
3103 read_mscratch_i128, write_mscratch_i128 },
3104 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
3105 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
3106 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
3107 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
3109 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
3110 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
3111 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
3113 /* Machine-Level Interrupts (AIA) */
3114 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
3116 /* Machine-Level IMSIC Interface (AIA) */
3117 [CSR_MSETEIPNUM] = { "mseteipnum", aia_any, NULL, NULL, rmw_xsetclreinum },
3118 [CSR_MCLREIPNUM] = { "mclreipnum", aia_any, NULL, NULL, rmw_xsetclreinum },
3119 [CSR_MSETEIENUM] = { "mseteienum", aia_any, NULL, NULL, rmw_xsetclreinum },
3120 [CSR_MCLREIENUM] = { "mclreienum", aia_any, NULL, NULL, rmw_xsetclreinum },
3121 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
3123 /* Virtual Interrupts for Supervisor Level (AIA) */
3124 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
3125 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
3127 /* Machine-Level High-Half CSRs (AIA) */
3128 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
3129 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
3130 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
3131 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
3132 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
3134 /* Supervisor Trap Setup */
3135 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL,
3136 read_sstatus_i128 },
3137 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
3138 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
3139 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren },
3141 /* Supervisor Trap Handling */
3142 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, NULL,
3143 read_sscratch_i128, write_sscratch_i128 },
3144 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
3145 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
3146 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
3147 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
3149 /* Supervisor Protection and Translation */
3150 [CSR_SATP] = { "satp", smode, read_satp, write_satp },
3152 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
3153 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
3154 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
3156 /* Supervisor-Level Interrupts (AIA) */
3157 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
3159 /* Supervisor-Level IMSIC Interface (AIA) */
3160 [CSR_SSETEIPNUM] = { "sseteipnum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3161 [CSR_SCLREIPNUM] = { "sclreipnum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3162 [CSR_SSETEIENUM] = { "sseteienum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3163 [CSR_SCLREIENUM] = { "sclreienum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3164 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
3166 /* Supervisor-Level High-Half CSRs (AIA) */
3167 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
3168 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
3170 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus },
3171 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg },
3172 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg },
3173 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip },
3174 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip },
3175 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie },
3176 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, write_hcounteren },
3177 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie },
3178 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval },
3179 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst },
3180 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, NULL },
3181 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp },
3182 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta },
3183 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, write_htimedeltah },
3185 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, write_vsstatus },
3186 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip },
3187 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie },
3188 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec },
3189 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, write_vsscratch },
3190 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc },
3191 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause },
3192 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval },
3193 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp },
3195 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 },
3196 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst },
3198 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
3199 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
3200 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, write_hvictl },
3201 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, write_hviprio1 },
3202 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, write_hviprio2 },
3205 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
3207 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL, rmw_xiselect },
3208 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
3210 /* VS-Level Interrupts (H-extension with AIA) */
3211 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
3213 /* VS-Level IMSIC Interface (H-extension with AIA) */
3214 [CSR_VSSETEIPNUM] = { "vsseteipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3215 [CSR_VSCLREIPNUM] = { "vsclreipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3216 [CSR_VSSETEIENUM] = { "vsseteienum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3217 [CSR_VSCLREIENUM] = { "vsclreienum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3218 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
3220 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
3221 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, rmw_hidelegh },
3222 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero, write_ignore },
3223 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
3224 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, write_hviprio1h },
3225 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, write_hviprio2h },
3226 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
3227 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
3229 /* Physical Memory Protection */
3230 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg },
3231 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
3232 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
3233 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
3234 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
3235 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
3236 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
3237 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
3238 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
3239 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
3240 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
3241 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
3242 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
3243 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
3244 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
3245 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
3246 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
3247 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
3248 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
3249 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
3250 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
3252 /* User Pointer Masking */
3253 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
3254 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, write_upmmask },
3255 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, write_upmbase },
3256 /* Machine Pointer Masking */
3257 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
3258 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, write_mpmmask },
3259 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, write_mpmbase },
3260 /* Supervisor Pointer Masking */
3261 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
3262 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, write_spmmask },
3263 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, write_spmbase },
3265 /* Performance Counters */
3266 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_zero },
3267 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_zero },
3268 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_zero },
3269 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_zero },
3270 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_zero },
3271 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_zero },
3272 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_zero },
3273 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_zero },
3274 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_zero },
3275 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_zero },
3276 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_zero },
3277 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_zero },
3278 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_zero },
3279 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_zero },
3280 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_zero },
3281 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_zero },
3282 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_zero },
3283 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_zero },
3284 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_zero },
3285 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_zero },
3286 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_zero },
3287 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_zero },
3288 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_zero },
3289 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_zero },
3290 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_zero },
3291 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_zero },
3292 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_zero },
3293 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_zero },
3294 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_zero },
3296 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", any, read_zero },
3297 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", any, read_zero },
3298 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", any, read_zero },
3299 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", any, read_zero },
3300 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", any, read_zero },
3301 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", any, read_zero },
3302 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", any, read_zero },
3303 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", any, read_zero },
3304 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", any, read_zero },
3305 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", any, read_zero },
3306 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", any, read_zero },
3307 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", any, read_zero },
3308 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", any, read_zero },
3309 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", any, read_zero },
3310 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", any, read_zero },
3311 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", any, read_zero },
3312 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", any, read_zero },
3313 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", any, read_zero },
3314 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", any, read_zero },
3315 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", any, read_zero },
3316 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", any, read_zero },
3317 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", any, read_zero },
3318 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", any, read_zero },
3319 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", any, read_zero },
3320 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", any, read_zero },
3321 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", any, read_zero },
3322 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", any, read_zero },
3323 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", any, read_zero },
3324 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", any, read_zero },
3326 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero },
3327 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero },
3328 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_zero },
3329 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_zero },
3330 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_zero },
3331 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_zero },
3332 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_zero },
3333 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_zero },
3334 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_zero },
3335 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_zero },
3336 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_zero },
3337 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_zero },
3338 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_zero },
3339 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_zero },
3340 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_zero },
3341 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_zero },
3342 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_zero },
3343 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_zero },
3344 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_zero },
3345 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_zero },
3346 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_zero },
3347 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_zero },
3348 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_zero },
3349 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_zero },
3350 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_zero },
3351 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_zero },
3352 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_zero },
3353 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_zero },
3354 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_zero },
3356 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_zero },
3357 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_zero },
3358 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_zero },
3359 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_zero },
3360 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_zero },
3361 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_zero },
3362 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_zero },
3363 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_zero },
3364 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_zero },
3365 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_zero },
3366 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_zero },
3367 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_zero },
3368 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_zero },
3369 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_zero },
3370 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_zero },
3371 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_zero },
3372 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_zero },
3373 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_zero },
3374 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_zero },
3375 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_zero },
3376 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_zero },
3377 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_zero },
3378 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_zero },
3379 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_zero },
3380 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_zero },
3381 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_zero },
3382 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_zero },
3383 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_zero },
3384 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_zero },
3386 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", any32, read_zero },
3387 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", any32, read_zero },
3388 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", any32, read_zero },
3389 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", any32, read_zero },
3390 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", any32, read_zero },
3391 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", any32, read_zero },
3392 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", any32, read_zero },
3393 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", any32, read_zero },
3394 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", any32, read_zero },
3395 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", any32, read_zero },
3396 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", any32, read_zero },
3397 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", any32, read_zero },
3398 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", any32, read_zero },
3399 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", any32, read_zero },
3400 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", any32, read_zero },
3401 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", any32, read_zero },
3402 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", any32, read_zero },
3403 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", any32, read_zero },
3404 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", any32, read_zero },
3405 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", any32, read_zero },
3406 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", any32, read_zero },
3407 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", any32, read_zero },
3408 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", any32, read_zero },
3409 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", any32, read_zero },
3410 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", any32, read_zero },
3411 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", any32, read_zero },
3412 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", any32, read_zero },
3413 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", any32, read_zero },
3414 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", any32, read_zero },
3415 #endif /* !CONFIG_USER_ONLY */