target/s390x: Restrict sysemu/reset.h to system emulation
[qemu/ar7.git] / target / riscv / csr.c
blob0db2c233e5dfb7419150208712d8199612099e1f
1 /*
2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "pmu.h"
25 #include "time_helper.h"
26 #include "qemu/main-loop.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/cpu-timers.h"
29 #include "qemu/guest-random.h"
30 #include "qapi/error.h"
32 /* CSR function table public API */
33 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
35 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
38 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
40 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
43 /* Predicates */
44 #if !defined(CONFIG_USER_ONLY)
45 static RISCVException smstateen_acc_ok(CPURISCVState *env, int index,
46 uint64_t bit)
48 bool virt = riscv_cpu_virt_enabled(env);
49 CPUState *cs = env_cpu(env);
50 RISCVCPU *cpu = RISCV_CPU(cs);
52 if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) {
53 return RISCV_EXCP_NONE;
56 if (!(env->mstateen[index] & bit)) {
57 return RISCV_EXCP_ILLEGAL_INST;
60 if (virt) {
61 if (!(env->hstateen[index] & bit)) {
62 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
66 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
70 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
71 if (!(env->sstateen[index] & bit)) {
72 return RISCV_EXCP_ILLEGAL_INST;
76 return RISCV_EXCP_NONE;
78 #endif
80 static RISCVException fs(CPURISCVState *env, int csrno)
82 #if !defined(CONFIG_USER_ONLY)
83 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
84 !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
85 return RISCV_EXCP_ILLEGAL_INST;
87 #endif
88 return RISCV_EXCP_NONE;
91 static RISCVException vs(CPURISCVState *env, int csrno)
93 CPUState *cs = env_cpu(env);
94 RISCVCPU *cpu = RISCV_CPU(cs);
96 if (env->misa_ext & RVV ||
97 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
98 #if !defined(CONFIG_USER_ONLY)
99 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
100 return RISCV_EXCP_ILLEGAL_INST;
102 #endif
103 return RISCV_EXCP_NONE;
105 return RISCV_EXCP_ILLEGAL_INST;
108 static RISCVException ctr(CPURISCVState *env, int csrno)
110 #if !defined(CONFIG_USER_ONLY)
111 CPUState *cs = env_cpu(env);
112 RISCVCPU *cpu = RISCV_CPU(cs);
113 int ctr_index;
114 target_ulong ctr_mask;
115 int base_csrno = CSR_CYCLE;
116 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
118 if (rv32 && csrno >= CSR_CYCLEH) {
119 /* Offset for RV32 hpmcounternh counters */
120 base_csrno += 0x80;
122 ctr_index = csrno - base_csrno;
123 ctr_mask = BIT(ctr_index);
125 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
126 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
127 goto skip_ext_pmu_check;
130 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
131 /* No counter is enabled in PMU or the counter is out of range */
132 return RISCV_EXCP_ILLEGAL_INST;
135 skip_ext_pmu_check:
137 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
138 return RISCV_EXCP_ILLEGAL_INST;
141 if (riscv_cpu_virt_enabled(env)) {
142 if (!get_field(env->hcounteren, ctr_mask) ||
143 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
144 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
148 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
149 !get_field(env->scounteren, ctr_mask)) {
150 return RISCV_EXCP_ILLEGAL_INST;
153 #endif
154 return RISCV_EXCP_NONE;
157 static RISCVException ctr32(CPURISCVState *env, int csrno)
159 if (riscv_cpu_mxl(env) != MXL_RV32) {
160 return RISCV_EXCP_ILLEGAL_INST;
163 return ctr(env, csrno);
166 #if !defined(CONFIG_USER_ONLY)
167 static RISCVException mctr(CPURISCVState *env, int csrno)
169 CPUState *cs = env_cpu(env);
170 RISCVCPU *cpu = RISCV_CPU(cs);
171 int ctr_index;
172 int base_csrno = CSR_MHPMCOUNTER3;
174 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
175 /* Offset for RV32 mhpmcounternh counters */
176 base_csrno += 0x80;
178 ctr_index = csrno - base_csrno;
179 if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) {
180 /* The PMU is not enabled or counter is out of range*/
181 return RISCV_EXCP_ILLEGAL_INST;
184 return RISCV_EXCP_NONE;
187 static RISCVException mctr32(CPURISCVState *env, int csrno)
189 if (riscv_cpu_mxl(env) != MXL_RV32) {
190 return RISCV_EXCP_ILLEGAL_INST;
193 return mctr(env, csrno);
196 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
198 CPUState *cs = env_cpu(env);
199 RISCVCPU *cpu = RISCV_CPU(cs);
201 if (!cpu->cfg.ext_sscofpmf) {
202 return RISCV_EXCP_ILLEGAL_INST;
205 return RISCV_EXCP_NONE;
208 static RISCVException any(CPURISCVState *env, int csrno)
210 return RISCV_EXCP_NONE;
213 static RISCVException any32(CPURISCVState *env, int csrno)
215 if (riscv_cpu_mxl(env) != MXL_RV32) {
216 return RISCV_EXCP_ILLEGAL_INST;
219 return any(env, csrno);
223 static int aia_any(CPURISCVState *env, int csrno)
225 RISCVCPU *cpu = env_archcpu(env);
227 if (!cpu->cfg.ext_smaia) {
228 return RISCV_EXCP_ILLEGAL_INST;
231 return any(env, csrno);
234 static int aia_any32(CPURISCVState *env, int csrno)
236 RISCVCPU *cpu = env_archcpu(env);
238 if (!cpu->cfg.ext_smaia) {
239 return RISCV_EXCP_ILLEGAL_INST;
242 return any32(env, csrno);
245 static RISCVException smode(CPURISCVState *env, int csrno)
247 if (riscv_has_ext(env, RVS)) {
248 return RISCV_EXCP_NONE;
251 return RISCV_EXCP_ILLEGAL_INST;
254 static int smode32(CPURISCVState *env, int csrno)
256 if (riscv_cpu_mxl(env) != MXL_RV32) {
257 return RISCV_EXCP_ILLEGAL_INST;
260 return smode(env, csrno);
263 static int aia_smode(CPURISCVState *env, int csrno)
265 RISCVCPU *cpu = env_archcpu(env);
267 if (!cpu->cfg.ext_ssaia) {
268 return RISCV_EXCP_ILLEGAL_INST;
271 return smode(env, csrno);
274 static int aia_smode32(CPURISCVState *env, int csrno)
276 RISCVCPU *cpu = env_archcpu(env);
278 if (!cpu->cfg.ext_ssaia) {
279 return RISCV_EXCP_ILLEGAL_INST;
282 return smode32(env, csrno);
285 static RISCVException hmode(CPURISCVState *env, int csrno)
287 if (riscv_has_ext(env, RVH)) {
288 return RISCV_EXCP_NONE;
291 return RISCV_EXCP_ILLEGAL_INST;
294 static RISCVException hmode32(CPURISCVState *env, int csrno)
296 if (riscv_cpu_mxl(env) != MXL_RV32) {
297 return RISCV_EXCP_ILLEGAL_INST;
300 return hmode(env, csrno);
304 static RISCVException umode(CPURISCVState *env, int csrno)
306 if (riscv_has_ext(env, RVU)) {
307 return RISCV_EXCP_NONE;
310 return RISCV_EXCP_ILLEGAL_INST;
313 static RISCVException umode32(CPURISCVState *env, int csrno)
315 if (riscv_cpu_mxl(env) != MXL_RV32) {
316 return RISCV_EXCP_ILLEGAL_INST;
319 return umode(env, csrno);
322 static RISCVException mstateen(CPURISCVState *env, int csrno)
324 CPUState *cs = env_cpu(env);
325 RISCVCPU *cpu = RISCV_CPU(cs);
327 if (!cpu->cfg.ext_smstateen) {
328 return RISCV_EXCP_ILLEGAL_INST;
331 return any(env, csrno);
334 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
336 CPUState *cs = env_cpu(env);
337 RISCVCPU *cpu = RISCV_CPU(cs);
339 if (!cpu->cfg.ext_smstateen) {
340 return RISCV_EXCP_ILLEGAL_INST;
343 if (env->priv < PRV_M) {
344 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
345 return RISCV_EXCP_ILLEGAL_INST;
349 return hmode(env, csrno);
352 static RISCVException hstateen(CPURISCVState *env, int csrno)
354 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
357 static RISCVException hstateenh(CPURISCVState *env, int csrno)
359 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
362 static RISCVException sstateen(CPURISCVState *env, int csrno)
364 bool virt = riscv_cpu_virt_enabled(env);
365 int index = csrno - CSR_SSTATEEN0;
366 CPUState *cs = env_cpu(env);
367 RISCVCPU *cpu = RISCV_CPU(cs);
369 if (!cpu->cfg.ext_smstateen) {
370 return RISCV_EXCP_ILLEGAL_INST;
373 if (env->priv < PRV_M) {
374 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
375 return RISCV_EXCP_ILLEGAL_INST;
378 if (virt) {
379 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
380 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
385 return smode(env, csrno);
388 /* Checks if PointerMasking registers could be accessed */
389 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
391 /* Check if j-ext is present */
392 if (riscv_has_ext(env, RVJ)) {
393 return RISCV_EXCP_NONE;
395 return RISCV_EXCP_ILLEGAL_INST;
398 static int aia_hmode(CPURISCVState *env, int csrno)
400 RISCVCPU *cpu = env_archcpu(env);
402 if (!cpu->cfg.ext_ssaia) {
403 return RISCV_EXCP_ILLEGAL_INST;
406 return hmode(env, csrno);
409 static int aia_hmode32(CPURISCVState *env, int csrno)
411 RISCVCPU *cpu = env_archcpu(env);
413 if (!cpu->cfg.ext_ssaia) {
414 return RISCV_EXCP_ILLEGAL_INST;
417 return hmode32(env, csrno);
420 static RISCVException pmp(CPURISCVState *env, int csrno)
422 if (riscv_feature(env, RISCV_FEATURE_PMP)) {
423 return RISCV_EXCP_NONE;
426 return RISCV_EXCP_ILLEGAL_INST;
429 static RISCVException epmp(CPURISCVState *env, int csrno)
431 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
432 return RISCV_EXCP_NONE;
435 return RISCV_EXCP_ILLEGAL_INST;
438 static RISCVException debug(CPURISCVState *env, int csrno)
440 if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
441 return RISCV_EXCP_NONE;
444 return RISCV_EXCP_ILLEGAL_INST;
446 #endif
448 static RISCVException seed(CPURISCVState *env, int csrno)
450 RISCVCPU *cpu = env_archcpu(env);
452 if (!cpu->cfg.ext_zkr) {
453 return RISCV_EXCP_ILLEGAL_INST;
456 #if !defined(CONFIG_USER_ONLY)
458 * With a CSR read-write instruction:
459 * 1) The seed CSR is always available in machine mode as normal.
460 * 2) Attempted access to seed from virtual modes VS and VU always raises
461 * an exception(virtual instruction exception only if mseccfg.sseed=1).
462 * 3) Without the corresponding access control bit set to 1, any attempted
463 * access to seed from U, S or HS modes will raise an illegal instruction
464 * exception.
466 if (env->priv == PRV_M) {
467 return RISCV_EXCP_NONE;
468 } else if (riscv_cpu_virt_enabled(env)) {
469 if (env->mseccfg & MSECCFG_SSEED) {
470 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
471 } else {
472 return RISCV_EXCP_ILLEGAL_INST;
474 } else {
475 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
476 return RISCV_EXCP_NONE;
477 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
478 return RISCV_EXCP_NONE;
479 } else {
480 return RISCV_EXCP_ILLEGAL_INST;
483 #else
484 return RISCV_EXCP_NONE;
485 #endif
488 /* User Floating-Point CSRs */
489 static RISCVException read_fflags(CPURISCVState *env, int csrno,
490 target_ulong *val)
492 *val = riscv_cpu_get_fflags(env);
493 return RISCV_EXCP_NONE;
496 static RISCVException write_fflags(CPURISCVState *env, int csrno,
497 target_ulong val)
499 #if !defined(CONFIG_USER_ONLY)
500 if (riscv_has_ext(env, RVF)) {
501 env->mstatus |= MSTATUS_FS;
503 #endif
504 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
505 return RISCV_EXCP_NONE;
508 static RISCVException read_frm(CPURISCVState *env, int csrno,
509 target_ulong *val)
511 *val = env->frm;
512 return RISCV_EXCP_NONE;
515 static RISCVException write_frm(CPURISCVState *env, int csrno,
516 target_ulong val)
518 #if !defined(CONFIG_USER_ONLY)
519 if (riscv_has_ext(env, RVF)) {
520 env->mstatus |= MSTATUS_FS;
522 #endif
523 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
524 return RISCV_EXCP_NONE;
527 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
528 target_ulong *val)
530 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
531 | (env->frm << FSR_RD_SHIFT);
532 return RISCV_EXCP_NONE;
535 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
536 target_ulong val)
538 #if !defined(CONFIG_USER_ONLY)
539 if (riscv_has_ext(env, RVF)) {
540 env->mstatus |= MSTATUS_FS;
542 #endif
543 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
544 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
545 return RISCV_EXCP_NONE;
548 static RISCVException read_vtype(CPURISCVState *env, int csrno,
549 target_ulong *val)
551 uint64_t vill;
552 switch (env->xl) {
553 case MXL_RV32:
554 vill = (uint32_t)env->vill << 31;
555 break;
556 case MXL_RV64:
557 vill = (uint64_t)env->vill << 63;
558 break;
559 default:
560 g_assert_not_reached();
562 *val = (target_ulong)vill | env->vtype;
563 return RISCV_EXCP_NONE;
566 static RISCVException read_vl(CPURISCVState *env, int csrno,
567 target_ulong *val)
569 *val = env->vl;
570 return RISCV_EXCP_NONE;
573 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
575 *val = env_archcpu(env)->cfg.vlen >> 3;
576 return RISCV_EXCP_NONE;
579 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
580 target_ulong *val)
582 *val = env->vxrm;
583 return RISCV_EXCP_NONE;
586 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
587 target_ulong val)
589 #if !defined(CONFIG_USER_ONLY)
590 env->mstatus |= MSTATUS_VS;
591 #endif
592 env->vxrm = val;
593 return RISCV_EXCP_NONE;
596 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
597 target_ulong *val)
599 *val = env->vxsat;
600 return RISCV_EXCP_NONE;
603 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
604 target_ulong val)
606 #if !defined(CONFIG_USER_ONLY)
607 env->mstatus |= MSTATUS_VS;
608 #endif
609 env->vxsat = val;
610 return RISCV_EXCP_NONE;
613 static RISCVException read_vstart(CPURISCVState *env, int csrno,
614 target_ulong *val)
616 *val = env->vstart;
617 return RISCV_EXCP_NONE;
620 static RISCVException write_vstart(CPURISCVState *env, int csrno,
621 target_ulong val)
623 #if !defined(CONFIG_USER_ONLY)
624 env->mstatus |= MSTATUS_VS;
625 #endif
627 * The vstart CSR is defined to have only enough writable bits
628 * to hold the largest element index, i.e. lg2(VLEN) bits.
630 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
631 return RISCV_EXCP_NONE;
634 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
636 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
637 return RISCV_EXCP_NONE;
640 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
642 #if !defined(CONFIG_USER_ONLY)
643 env->mstatus |= MSTATUS_VS;
644 #endif
645 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
646 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
647 return RISCV_EXCP_NONE;
650 /* User Timers and Counters */
651 static target_ulong get_ticks(bool shift)
653 int64_t val;
654 target_ulong result;
656 #if !defined(CONFIG_USER_ONLY)
657 if (icount_enabled()) {
658 val = icount_get();
659 } else {
660 val = cpu_get_host_ticks();
662 #else
663 val = cpu_get_host_ticks();
664 #endif
666 if (shift) {
667 result = val >> 32;
668 } else {
669 result = val;
672 return result;
675 #if defined(CONFIG_USER_ONLY)
676 static RISCVException read_time(CPURISCVState *env, int csrno,
677 target_ulong *val)
679 *val = cpu_get_host_ticks();
680 return RISCV_EXCP_NONE;
683 static RISCVException read_timeh(CPURISCVState *env, int csrno,
684 target_ulong *val)
686 *val = cpu_get_host_ticks() >> 32;
687 return RISCV_EXCP_NONE;
690 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
692 *val = get_ticks(false);
693 return RISCV_EXCP_NONE;
696 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
698 *val = get_ticks(true);
699 return RISCV_EXCP_NONE;
702 #else /* CONFIG_USER_ONLY */
704 static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
706 int evt_index = csrno - CSR_MCOUNTINHIBIT;
708 *val = env->mhpmevent_val[evt_index];
710 return RISCV_EXCP_NONE;
713 static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
715 int evt_index = csrno - CSR_MCOUNTINHIBIT;
716 uint64_t mhpmevt_val = val;
718 env->mhpmevent_val[evt_index] = val;
720 if (riscv_cpu_mxl(env) == MXL_RV32) {
721 mhpmevt_val = mhpmevt_val |
722 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
724 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
726 return RISCV_EXCP_NONE;
729 static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val)
731 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
733 *val = env->mhpmeventh_val[evt_index];
735 return RISCV_EXCP_NONE;
738 static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val)
740 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
741 uint64_t mhpmevth_val = val;
742 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
744 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
745 env->mhpmeventh_val[evt_index] = val;
747 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
749 return RISCV_EXCP_NONE;
752 static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
754 int ctr_idx = csrno - CSR_MCYCLE;
755 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
756 uint64_t mhpmctr_val = val;
758 counter->mhpmcounter_val = val;
759 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
760 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
761 counter->mhpmcounter_prev = get_ticks(false);
762 if (ctr_idx > 2) {
763 if (riscv_cpu_mxl(env) == MXL_RV32) {
764 mhpmctr_val = mhpmctr_val |
765 ((uint64_t)counter->mhpmcounterh_val << 32);
767 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
769 } else {
770 /* Other counters can keep incrementing from the given value */
771 counter->mhpmcounter_prev = val;
774 return RISCV_EXCP_NONE;
777 static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
779 int ctr_idx = csrno - CSR_MCYCLEH;
780 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
781 uint64_t mhpmctr_val = counter->mhpmcounter_val;
782 uint64_t mhpmctrh_val = val;
784 counter->mhpmcounterh_val = val;
785 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
786 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
787 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
788 counter->mhpmcounterh_prev = get_ticks(true);
789 if (ctr_idx > 2) {
790 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
792 } else {
793 counter->mhpmcounterh_prev = val;
796 return RISCV_EXCP_NONE;
799 static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
800 bool upper_half, uint32_t ctr_idx)
802 PMUCTRState counter = env->pmu_ctrs[ctr_idx];
803 target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev :
804 counter.mhpmcounter_prev;
805 target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val :
806 counter.mhpmcounter_val;
808 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
810 * Counter should not increment if inhibit bit is set. We can't really
811 * stop the icount counting. Just return the counter value written by
812 * the supervisor to indicate that counter was not incremented.
814 if (!counter.started) {
815 *val = ctr_val;
816 return RISCV_EXCP_NONE;
817 } else {
818 /* Mark that the counter has been stopped */
819 counter.started = false;
824 * The kernel computes the perf delta by subtracting the current value from
825 * the value it initialized previously (ctr_val).
827 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
828 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
829 *val = get_ticks(upper_half) - ctr_prev + ctr_val;
830 } else {
831 *val = ctr_val;
834 return RISCV_EXCP_NONE;
837 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
839 uint16_t ctr_index;
841 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
842 ctr_index = csrno - CSR_MCYCLE;
843 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
844 ctr_index = csrno - CSR_CYCLE;
845 } else {
846 return RISCV_EXCP_ILLEGAL_INST;
849 return riscv_pmu_read_ctr(env, val, false, ctr_index);
852 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
854 uint16_t ctr_index;
856 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
857 ctr_index = csrno - CSR_MCYCLEH;
858 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
859 ctr_index = csrno - CSR_CYCLEH;
860 } else {
861 return RISCV_EXCP_ILLEGAL_INST;
864 return riscv_pmu_read_ctr(env, val, true, ctr_index);
867 static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val)
869 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
870 int i;
871 *val = 0;
872 target_ulong *mhpm_evt_val;
873 uint64_t of_bit_mask;
875 if (riscv_cpu_mxl(env) == MXL_RV32) {
876 mhpm_evt_val = env->mhpmeventh_val;
877 of_bit_mask = MHPMEVENTH_BIT_OF;
878 } else {
879 mhpm_evt_val = env->mhpmevent_val;
880 of_bit_mask = MHPMEVENT_BIT_OF;
883 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
884 if ((get_field(env->mcounteren, BIT(i))) &&
885 (mhpm_evt_val[i] & of_bit_mask)) {
886 *val |= BIT(i);
890 return RISCV_EXCP_NONE;
893 static RISCVException read_time(CPURISCVState *env, int csrno,
894 target_ulong *val)
896 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
898 if (!env->rdtime_fn) {
899 return RISCV_EXCP_ILLEGAL_INST;
902 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
903 return RISCV_EXCP_NONE;
906 static RISCVException read_timeh(CPURISCVState *env, int csrno,
907 target_ulong *val)
909 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
911 if (!env->rdtime_fn) {
912 return RISCV_EXCP_ILLEGAL_INST;
915 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
916 return RISCV_EXCP_NONE;
919 static RISCVException sstc(CPURISCVState *env, int csrno)
921 CPUState *cs = env_cpu(env);
922 RISCVCPU *cpu = RISCV_CPU(cs);
923 bool hmode_check = false;
925 if (!cpu->cfg.ext_sstc || !env->rdtime_fn) {
926 return RISCV_EXCP_ILLEGAL_INST;
929 if (env->priv == PRV_M) {
930 return RISCV_EXCP_NONE;
934 * No need of separate function for rv32 as menvcfg stores both menvcfg
935 * menvcfgh for RV32.
937 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
938 get_field(env->menvcfg, MENVCFG_STCE))) {
939 return RISCV_EXCP_ILLEGAL_INST;
942 if (riscv_cpu_virt_enabled(env)) {
943 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
944 get_field(env->henvcfg, HENVCFG_STCE))) {
945 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
949 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
950 hmode_check = true;
953 return hmode_check ? hmode(env, csrno) : smode(env, csrno);
956 static RISCVException sstc_32(CPURISCVState *env, int csrno)
958 if (riscv_cpu_mxl(env) != MXL_RV32) {
959 return RISCV_EXCP_ILLEGAL_INST;
962 return sstc(env, csrno);
965 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
966 target_ulong *val)
968 *val = env->vstimecmp;
970 return RISCV_EXCP_NONE;
973 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
974 target_ulong *val)
976 *val = env->vstimecmp >> 32;
978 return RISCV_EXCP_NONE;
981 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
982 target_ulong val)
984 RISCVCPU *cpu = env_archcpu(env);
986 if (riscv_cpu_mxl(env) == MXL_RV32) {
987 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
988 } else {
989 env->vstimecmp = val;
992 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
993 env->htimedelta, MIP_VSTIP);
995 return RISCV_EXCP_NONE;
998 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
999 target_ulong val)
1001 RISCVCPU *cpu = env_archcpu(env);
1003 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1004 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
1005 env->htimedelta, MIP_VSTIP);
1007 return RISCV_EXCP_NONE;
1010 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1011 target_ulong *val)
1013 if (riscv_cpu_virt_enabled(env)) {
1014 *val = env->vstimecmp;
1015 } else {
1016 *val = env->stimecmp;
1019 return RISCV_EXCP_NONE;
1022 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1023 target_ulong *val)
1025 if (riscv_cpu_virt_enabled(env)) {
1026 *val = env->vstimecmp >> 32;
1027 } else {
1028 *val = env->stimecmp >> 32;
1031 return RISCV_EXCP_NONE;
1034 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1035 target_ulong val)
1037 RISCVCPU *cpu = env_archcpu(env);
1039 if (riscv_cpu_virt_enabled(env)) {
1040 return write_vstimecmp(env, csrno, val);
1043 if (riscv_cpu_mxl(env) == MXL_RV32) {
1044 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1045 } else {
1046 env->stimecmp = val;
1049 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP);
1051 return RISCV_EXCP_NONE;
1054 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1055 target_ulong val)
1057 RISCVCPU *cpu = env_archcpu(env);
1059 if (riscv_cpu_virt_enabled(env)) {
1060 return write_vstimecmph(env, csrno, val);
1063 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1064 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP);
1066 return RISCV_EXCP_NONE;
1069 /* Machine constants */
1071 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
1072 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \
1073 MIP_LCOFIP))
1074 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
1075 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
1077 #define VSTOPI_NUM_SRCS 5
1079 static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
1080 VS_MODE_INTERRUPTS;
1081 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
1082 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1083 HS_MODE_INTERRUPTS;
1084 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1085 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1086 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1087 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1088 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1089 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1090 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1091 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1092 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1093 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1094 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1095 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1096 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1097 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1098 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1099 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1100 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1101 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1102 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1103 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1104 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1105 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1106 (1ULL << (RISCV_EXCP_M_ECALL)) |
1107 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1108 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1109 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1110 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1111 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1112 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1113 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1114 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP |
1115 SIP_LCOFIP;
1116 static const target_ulong hip_writable_mask = MIP_VSSIP;
1117 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
1118 static const target_ulong vsip_writable_mask = MIP_VSSIP;
1120 static const char valid_vm_1_10_32[16] = {
1121 [VM_1_10_MBARE] = 1,
1122 [VM_1_10_SV32] = 1
1125 static const char valid_vm_1_10_64[16] = {
1126 [VM_1_10_MBARE] = 1,
1127 [VM_1_10_SV39] = 1,
1128 [VM_1_10_SV48] = 1,
1129 [VM_1_10_SV57] = 1
1132 /* Machine Information Registers */
1133 static RISCVException read_zero(CPURISCVState *env, int csrno,
1134 target_ulong *val)
1136 *val = 0;
1137 return RISCV_EXCP_NONE;
1140 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1141 target_ulong val)
1143 return RISCV_EXCP_NONE;
1146 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1147 target_ulong *val)
1149 CPUState *cs = env_cpu(env);
1150 RISCVCPU *cpu = RISCV_CPU(cs);
1152 *val = cpu->cfg.mvendorid;
1153 return RISCV_EXCP_NONE;
1156 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1157 target_ulong *val)
1159 CPUState *cs = env_cpu(env);
1160 RISCVCPU *cpu = RISCV_CPU(cs);
1162 *val = cpu->cfg.marchid;
1163 return RISCV_EXCP_NONE;
1166 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1167 target_ulong *val)
1169 CPUState *cs = env_cpu(env);
1170 RISCVCPU *cpu = RISCV_CPU(cs);
1172 *val = cpu->cfg.mimpid;
1173 return RISCV_EXCP_NONE;
1176 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1177 target_ulong *val)
1179 *val = env->mhartid;
1180 return RISCV_EXCP_NONE;
1183 /* Machine Trap Setup */
1185 /* We do not store SD explicitly, only compute it on demand. */
1186 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1188 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1189 (status & MSTATUS_VS) == MSTATUS_VS ||
1190 (status & MSTATUS_XS) == MSTATUS_XS) {
1191 switch (xl) {
1192 case MXL_RV32:
1193 return status | MSTATUS32_SD;
1194 case MXL_RV64:
1195 return status | MSTATUS64_SD;
1196 case MXL_RV128:
1197 return MSTATUSH128_SD;
1198 default:
1199 g_assert_not_reached();
1202 return status;
1205 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1206 target_ulong *val)
1208 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1209 return RISCV_EXCP_NONE;
1212 static int validate_vm(CPURISCVState *env, target_ulong vm)
1214 if (riscv_cpu_mxl(env) == MXL_RV32) {
1215 return valid_vm_1_10_32[vm & 0xf];
1216 } else {
1217 return valid_vm_1_10_64[vm & 0xf];
1221 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1222 target_ulong val)
1224 uint64_t mstatus = env->mstatus;
1225 uint64_t mask = 0;
1226 RISCVMXL xl = riscv_cpu_mxl(env);
1228 /* flush tlb on mstatus fields that affect VM */
1229 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
1230 MSTATUS_MPRV | MSTATUS_SUM)) {
1231 tlb_flush(env_cpu(env));
1233 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1234 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1235 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1236 MSTATUS_TW | MSTATUS_VS;
1238 if (riscv_has_ext(env, RVF)) {
1239 mask |= MSTATUS_FS;
1242 if (xl != MXL_RV32 || env->debugger) {
1244 * RV32: MPV and GVA are not in mstatus. The current plan is to
1245 * add them to mstatush. For now, we just don't support it.
1247 mask |= MSTATUS_MPV | MSTATUS_GVA;
1248 if ((val & MSTATUS64_UXL) != 0) {
1249 mask |= MSTATUS64_UXL;
1253 mstatus = (mstatus & ~mask) | (val & mask);
1255 if (xl > MXL_RV32) {
1256 /* SXL field is for now read only */
1257 mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
1259 env->mstatus = mstatus;
1260 env->xl = cpu_recompute_xl(env);
1262 return RISCV_EXCP_NONE;
1265 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
1266 target_ulong *val)
1268 *val = env->mstatus >> 32;
1269 return RISCV_EXCP_NONE;
1272 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
1273 target_ulong val)
1275 uint64_t valh = (uint64_t)val << 32;
1276 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
1278 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) {
1279 tlb_flush(env_cpu(env));
1282 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
1284 return RISCV_EXCP_NONE;
1287 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
1288 Int128 *val)
1290 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
1291 return RISCV_EXCP_NONE;
1294 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
1295 Int128 *val)
1297 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
1298 return RISCV_EXCP_NONE;
1301 static RISCVException read_misa(CPURISCVState *env, int csrno,
1302 target_ulong *val)
1304 target_ulong misa;
1306 switch (env->misa_mxl) {
1307 case MXL_RV32:
1308 misa = (target_ulong)MXL_RV32 << 30;
1309 break;
1310 #ifdef TARGET_RISCV64
1311 case MXL_RV64:
1312 misa = (target_ulong)MXL_RV64 << 62;
1313 break;
1314 #endif
1315 default:
1316 g_assert_not_reached();
1319 *val = misa | env->misa_ext;
1320 return RISCV_EXCP_NONE;
1323 static RISCVException write_misa(CPURISCVState *env, int csrno,
1324 target_ulong val)
1326 if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
1327 /* drop write to misa */
1328 return RISCV_EXCP_NONE;
1331 /* 'I' or 'E' must be present */
1332 if (!(val & (RVI | RVE))) {
1333 /* It is not, drop write to misa */
1334 return RISCV_EXCP_NONE;
1337 /* 'E' excludes all other extensions */
1338 if (val & RVE) {
1339 /* when we support 'E' we can do "val = RVE;" however
1340 * for now we just drop writes if 'E' is present.
1342 return RISCV_EXCP_NONE;
1346 * misa.MXL writes are not supported by QEMU.
1347 * Drop writes to those bits.
1350 /* Mask extensions that are not supported by this hart */
1351 val &= env->misa_ext_mask;
1353 /* Mask extensions that are not supported by QEMU */
1354 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
1356 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
1357 if ((val & RVD) && !(val & RVF)) {
1358 val &= ~RVD;
1361 /* Suppress 'C' if next instruction is not aligned
1362 * TODO: this should check next_pc
1364 if ((val & RVC) && (GETPC() & ~3) != 0) {
1365 val &= ~RVC;
1368 /* If nothing changed, do nothing. */
1369 if (val == env->misa_ext) {
1370 return RISCV_EXCP_NONE;
1373 if (!(val & RVF)) {
1374 env->mstatus &= ~MSTATUS_FS;
1377 /* flush translation cache */
1378 tb_flush(env_cpu(env));
1379 env->misa_ext = val;
1380 env->xl = riscv_cpu_mxl(env);
1381 return RISCV_EXCP_NONE;
1384 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
1385 target_ulong *val)
1387 *val = env->medeleg;
1388 return RISCV_EXCP_NONE;
1391 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
1392 target_ulong val)
1394 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
1395 return RISCV_EXCP_NONE;
1398 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
1399 uint64_t *ret_val,
1400 uint64_t new_val, uint64_t wr_mask)
1402 uint64_t mask = wr_mask & delegable_ints;
1404 if (ret_val) {
1405 *ret_val = env->mideleg;
1408 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
1410 if (riscv_has_ext(env, RVH)) {
1411 env->mideleg |= HS_MODE_INTERRUPTS;
1414 return RISCV_EXCP_NONE;
1417 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
1418 target_ulong *ret_val,
1419 target_ulong new_val, target_ulong wr_mask)
1421 uint64_t rval;
1422 RISCVException ret;
1424 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
1425 if (ret_val) {
1426 *ret_val = rval;
1429 return ret;
1432 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
1433 target_ulong *ret_val,
1434 target_ulong new_val,
1435 target_ulong wr_mask)
1437 uint64_t rval;
1438 RISCVException ret;
1440 ret = rmw_mideleg64(env, csrno, &rval,
1441 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1442 if (ret_val) {
1443 *ret_val = rval >> 32;
1446 return ret;
1449 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
1450 uint64_t *ret_val,
1451 uint64_t new_val, uint64_t wr_mask)
1453 uint64_t mask = wr_mask & all_ints;
1455 if (ret_val) {
1456 *ret_val = env->mie;
1459 env->mie = (env->mie & ~mask) | (new_val & mask);
1461 if (!riscv_has_ext(env, RVH)) {
1462 env->mie &= ~((uint64_t)MIP_SGEIP);
1465 return RISCV_EXCP_NONE;
1468 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
1469 target_ulong *ret_val,
1470 target_ulong new_val, target_ulong wr_mask)
1472 uint64_t rval;
1473 RISCVException ret;
1475 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
1476 if (ret_val) {
1477 *ret_val = rval;
1480 return ret;
1483 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
1484 target_ulong *ret_val,
1485 target_ulong new_val, target_ulong wr_mask)
1487 uint64_t rval;
1488 RISCVException ret;
1490 ret = rmw_mie64(env, csrno, &rval,
1491 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1492 if (ret_val) {
1493 *ret_val = rval >> 32;
1496 return ret;
1499 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
1501 int irq;
1502 uint8_t iprio;
1504 irq = riscv_cpu_mirq_pending(env);
1505 if (irq <= 0 || irq > 63) {
1506 *val = 0;
1507 } else {
1508 iprio = env->miprio[irq];
1509 if (!iprio) {
1510 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1511 iprio = IPRIO_MMAXIPRIO;
1514 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1515 *val |= iprio;
1518 return RISCV_EXCP_NONE;
1521 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1523 if (!riscv_cpu_virt_enabled(env)) {
1524 return csrno;
1527 switch (csrno) {
1528 case CSR_SISELECT:
1529 return CSR_VSISELECT;
1530 case CSR_SIREG:
1531 return CSR_VSIREG;
1532 case CSR_STOPEI:
1533 return CSR_VSTOPEI;
1534 default:
1535 return csrno;
1539 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
1540 target_ulong new_val, target_ulong wr_mask)
1542 target_ulong *iselect;
1544 /* Translate CSR number for VS-mode */
1545 csrno = aia_xlate_vs_csrno(env, csrno);
1547 /* Find the iselect CSR based on CSR number */
1548 switch (csrno) {
1549 case CSR_MISELECT:
1550 iselect = &env->miselect;
1551 break;
1552 case CSR_SISELECT:
1553 iselect = &env->siselect;
1554 break;
1555 case CSR_VSISELECT:
1556 iselect = &env->vsiselect;
1557 break;
1558 default:
1559 return RISCV_EXCP_ILLEGAL_INST;
1562 if (val) {
1563 *val = *iselect;
1566 wr_mask &= ISELECT_MASK;
1567 if (wr_mask) {
1568 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
1571 return RISCV_EXCP_NONE;
1574 static int rmw_iprio(target_ulong xlen,
1575 target_ulong iselect, uint8_t *iprio,
1576 target_ulong *val, target_ulong new_val,
1577 target_ulong wr_mask, int ext_irq_no)
1579 int i, firq, nirqs;
1580 target_ulong old_val;
1582 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1583 return -EINVAL;
1585 if (xlen != 32 && iselect & 0x1) {
1586 return -EINVAL;
1589 nirqs = 4 * (xlen / 32);
1590 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1592 old_val = 0;
1593 for (i = 0; i < nirqs; i++) {
1594 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1597 if (val) {
1598 *val = old_val;
1601 if (wr_mask) {
1602 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1603 for (i = 0; i < nirqs; i++) {
1605 * M-level and S-level external IRQ priority always read-only
1606 * zero. This means default priority order is always preferred
1607 * for M-level and S-level external IRQs.
1609 if ((firq + i) == ext_irq_no) {
1610 continue;
1612 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
1616 return 0;
1619 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
1620 target_ulong new_val, target_ulong wr_mask)
1622 bool virt;
1623 uint8_t *iprio;
1624 int ret = -EINVAL;
1625 target_ulong priv, isel, vgein;
1627 /* Translate CSR number for VS-mode */
1628 csrno = aia_xlate_vs_csrno(env, csrno);
1630 /* Decode register details from CSR number */
1631 virt = false;
1632 switch (csrno) {
1633 case CSR_MIREG:
1634 iprio = env->miprio;
1635 isel = env->miselect;
1636 priv = PRV_M;
1637 break;
1638 case CSR_SIREG:
1639 iprio = env->siprio;
1640 isel = env->siselect;
1641 priv = PRV_S;
1642 break;
1643 case CSR_VSIREG:
1644 iprio = env->hviprio;
1645 isel = env->vsiselect;
1646 priv = PRV_S;
1647 virt = true;
1648 break;
1649 default:
1650 goto done;
1653 /* Find the selected guest interrupt file */
1654 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1656 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
1657 /* Local interrupt priority registers not available for VS-mode */
1658 if (!virt) {
1659 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
1660 isel, iprio, val, new_val, wr_mask,
1661 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
1663 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
1664 /* IMSIC registers only available when machine implements it. */
1665 if (env->aia_ireg_rmw_fn[priv]) {
1666 /* Selected guest interrupt file should not be zero */
1667 if (virt && (!vgein || env->geilen < vgein)) {
1668 goto done;
1670 /* Call machine specific IMSIC register emulation */
1671 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1672 AIA_MAKE_IREG(isel, priv, virt, vgein,
1673 riscv_cpu_mxl_bits(env)),
1674 val, new_val, wr_mask);
1678 done:
1679 if (ret) {
1680 return (riscv_cpu_virt_enabled(env) && virt) ?
1681 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1683 return RISCV_EXCP_NONE;
1686 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
1687 target_ulong new_val, target_ulong wr_mask)
1689 bool virt;
1690 int ret = -EINVAL;
1691 target_ulong priv, vgein;
1693 /* Translate CSR number for VS-mode */
1694 csrno = aia_xlate_vs_csrno(env, csrno);
1696 /* Decode register details from CSR number */
1697 virt = false;
1698 switch (csrno) {
1699 case CSR_MTOPEI:
1700 priv = PRV_M;
1701 break;
1702 case CSR_STOPEI:
1703 priv = PRV_S;
1704 break;
1705 case CSR_VSTOPEI:
1706 priv = PRV_S;
1707 virt = true;
1708 break;
1709 default:
1710 goto done;
1713 /* IMSIC CSRs only available when machine implements IMSIC. */
1714 if (!env->aia_ireg_rmw_fn[priv]) {
1715 goto done;
1718 /* Find the selected guest interrupt file */
1719 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1721 /* Selected guest interrupt file should be valid */
1722 if (virt && (!vgein || env->geilen < vgein)) {
1723 goto done;
1726 /* Call machine specific IMSIC register emulation for TOPEI */
1727 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1728 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
1729 riscv_cpu_mxl_bits(env)),
1730 val, new_val, wr_mask);
1732 done:
1733 if (ret) {
1734 return (riscv_cpu_virt_enabled(env) && virt) ?
1735 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1737 return RISCV_EXCP_NONE;
1740 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
1741 target_ulong *val)
1743 *val = env->mtvec;
1744 return RISCV_EXCP_NONE;
1747 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
1748 target_ulong val)
1750 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1751 if ((val & 3) < 2) {
1752 env->mtvec = val;
1753 } else {
1754 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
1756 return RISCV_EXCP_NONE;
1759 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
1760 target_ulong *val)
1762 *val = env->mcountinhibit;
1763 return RISCV_EXCP_NONE;
1766 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
1767 target_ulong val)
1769 int cidx;
1770 PMUCTRState *counter;
1772 env->mcountinhibit = val;
1774 /* Check if any other counter is also monitoring cycles/instructions */
1775 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
1776 if (!get_field(env->mcountinhibit, BIT(cidx))) {
1777 counter = &env->pmu_ctrs[cidx];
1778 counter->started = true;
1782 return RISCV_EXCP_NONE;
1785 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
1786 target_ulong *val)
1788 *val = env->mcounteren;
1789 return RISCV_EXCP_NONE;
1792 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
1793 target_ulong val)
1795 env->mcounteren = val;
1796 return RISCV_EXCP_NONE;
1799 /* Machine Trap Handling */
1800 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
1801 Int128 *val)
1803 *val = int128_make128(env->mscratch, env->mscratchh);
1804 return RISCV_EXCP_NONE;
1807 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
1808 Int128 val)
1810 env->mscratch = int128_getlo(val);
1811 env->mscratchh = int128_gethi(val);
1812 return RISCV_EXCP_NONE;
1815 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
1816 target_ulong *val)
1818 *val = env->mscratch;
1819 return RISCV_EXCP_NONE;
1822 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
1823 target_ulong val)
1825 env->mscratch = val;
1826 return RISCV_EXCP_NONE;
1829 static RISCVException read_mepc(CPURISCVState *env, int csrno,
1830 target_ulong *val)
1832 *val = env->mepc;
1833 return RISCV_EXCP_NONE;
1836 static RISCVException write_mepc(CPURISCVState *env, int csrno,
1837 target_ulong val)
1839 env->mepc = val;
1840 return RISCV_EXCP_NONE;
1843 static RISCVException read_mcause(CPURISCVState *env, int csrno,
1844 target_ulong *val)
1846 *val = env->mcause;
1847 return RISCV_EXCP_NONE;
1850 static RISCVException write_mcause(CPURISCVState *env, int csrno,
1851 target_ulong val)
1853 env->mcause = val;
1854 return RISCV_EXCP_NONE;
1857 static RISCVException read_mtval(CPURISCVState *env, int csrno,
1858 target_ulong *val)
1860 *val = env->mtval;
1861 return RISCV_EXCP_NONE;
1864 static RISCVException write_mtval(CPURISCVState *env, int csrno,
1865 target_ulong val)
1867 env->mtval = val;
1868 return RISCV_EXCP_NONE;
1871 /* Execution environment configuration setup */
1872 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
1873 target_ulong *val)
1875 *val = env->menvcfg;
1876 return RISCV_EXCP_NONE;
1879 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
1880 target_ulong val)
1882 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
1884 if (riscv_cpu_mxl(env) == MXL_RV64) {
1885 mask |= MENVCFG_PBMTE | MENVCFG_STCE;
1887 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
1889 return RISCV_EXCP_NONE;
1892 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
1893 target_ulong *val)
1895 *val = env->menvcfg >> 32;
1896 return RISCV_EXCP_NONE;
1899 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
1900 target_ulong val)
1902 uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE;
1903 uint64_t valh = (uint64_t)val << 32;
1905 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
1907 return RISCV_EXCP_NONE;
1910 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
1911 target_ulong *val)
1913 RISCVException ret;
1915 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1916 if (ret != RISCV_EXCP_NONE) {
1917 return ret;
1920 *val = env->senvcfg;
1921 return RISCV_EXCP_NONE;
1924 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
1925 target_ulong val)
1927 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
1928 RISCVException ret;
1930 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1931 if (ret != RISCV_EXCP_NONE) {
1932 return ret;
1935 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
1936 return RISCV_EXCP_NONE;
1939 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
1940 target_ulong *val)
1942 RISCVException ret;
1944 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1945 if (ret != RISCV_EXCP_NONE) {
1946 return ret;
1949 *val = env->henvcfg;
1950 return RISCV_EXCP_NONE;
1953 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
1954 target_ulong val)
1956 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
1957 RISCVException ret;
1959 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1960 if (ret != RISCV_EXCP_NONE) {
1961 return ret;
1964 if (riscv_cpu_mxl(env) == MXL_RV64) {
1965 mask |= HENVCFG_PBMTE | HENVCFG_STCE;
1968 env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
1970 return RISCV_EXCP_NONE;
1973 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
1974 target_ulong *val)
1976 RISCVException ret;
1978 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1979 if (ret != RISCV_EXCP_NONE) {
1980 return ret;
1983 *val = env->henvcfg >> 32;
1984 return RISCV_EXCP_NONE;
1987 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
1988 target_ulong val)
1990 uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE;
1991 uint64_t valh = (uint64_t)val << 32;
1992 RISCVException ret;
1994 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1995 if (ret != RISCV_EXCP_NONE) {
1996 return ret;
1999 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
2000 return RISCV_EXCP_NONE;
2003 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
2004 target_ulong *val)
2006 *val = env->mstateen[csrno - CSR_MSTATEEN0];
2008 return RISCV_EXCP_NONE;
2011 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
2012 uint64_t wr_mask, target_ulong new_val)
2014 uint64_t *reg;
2016 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
2017 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2019 return RISCV_EXCP_NONE;
2022 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
2023 target_ulong new_val)
2025 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2027 return write_mstateen(env, csrno, wr_mask, new_val);
2030 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
2031 target_ulong new_val)
2033 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2036 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
2037 target_ulong *val)
2039 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
2041 return RISCV_EXCP_NONE;
2044 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
2045 uint64_t wr_mask, target_ulong new_val)
2047 uint64_t *reg, val;
2049 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
2050 val = (uint64_t)new_val << 32;
2051 val |= *reg & 0xFFFFFFFF;
2052 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2054 return RISCV_EXCP_NONE;
2057 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
2058 target_ulong new_val)
2060 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2062 return write_mstateenh(env, csrno, wr_mask, new_val);
2065 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
2066 target_ulong new_val)
2068 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2071 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
2072 target_ulong *val)
2074 int index = csrno - CSR_HSTATEEN0;
2076 *val = env->hstateen[index] & env->mstateen[index];
2078 return RISCV_EXCP_NONE;
2081 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
2082 uint64_t mask, target_ulong new_val)
2084 int index = csrno - CSR_HSTATEEN0;
2085 uint64_t *reg, wr_mask;
2087 reg = &env->hstateen[index];
2088 wr_mask = env->mstateen[index] & mask;
2089 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2091 return RISCV_EXCP_NONE;
2094 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
2095 target_ulong new_val)
2097 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2099 return write_hstateen(env, csrno, wr_mask, new_val);
2102 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
2103 target_ulong new_val)
2105 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2108 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
2109 target_ulong *val)
2111 int index = csrno - CSR_HSTATEEN0H;
2113 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
2115 return RISCV_EXCP_NONE;
2118 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
2119 uint64_t mask, target_ulong new_val)
2121 int index = csrno - CSR_HSTATEEN0H;
2122 uint64_t *reg, wr_mask, val;
2124 reg = &env->hstateen[index];
2125 val = (uint64_t)new_val << 32;
2126 val |= *reg & 0xFFFFFFFF;
2127 wr_mask = env->mstateen[index] & mask;
2128 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2130 return RISCV_EXCP_NONE;
2133 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
2134 target_ulong new_val)
2136 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2138 return write_hstateenh(env, csrno, wr_mask, new_val);
2141 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
2142 target_ulong new_val)
2144 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2147 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
2148 target_ulong *val)
2150 bool virt = riscv_cpu_virt_enabled(env);
2151 int index = csrno - CSR_SSTATEEN0;
2153 *val = env->sstateen[index] & env->mstateen[index];
2154 if (virt) {
2155 *val &= env->hstateen[index];
2158 return RISCV_EXCP_NONE;
2161 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
2162 uint64_t mask, target_ulong new_val)
2164 bool virt = riscv_cpu_virt_enabled(env);
2165 int index = csrno - CSR_SSTATEEN0;
2166 uint64_t wr_mask;
2167 uint64_t *reg;
2169 wr_mask = env->mstateen[index] & mask;
2170 if (virt) {
2171 wr_mask &= env->hstateen[index];
2174 reg = &env->sstateen[index];
2175 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2177 return RISCV_EXCP_NONE;
2180 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
2181 target_ulong new_val)
2183 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2185 return write_sstateen(env, csrno, wr_mask, new_val);
2188 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
2189 target_ulong new_val)
2191 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2194 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
2195 uint64_t *ret_val,
2196 uint64_t new_val, uint64_t wr_mask)
2198 RISCVCPU *cpu = env_archcpu(env);
2199 uint64_t old_mip, mask = wr_mask & delegable_ints;
2200 uint32_t gin;
2202 if (mask & MIP_SEIP) {
2203 env->software_seip = new_val & MIP_SEIP;
2204 new_val |= env->external_seip * MIP_SEIP;
2207 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
2208 get_field(env->menvcfg, MENVCFG_STCE)) {
2209 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
2210 mask = mask & ~(MIP_STIP | MIP_VSTIP);
2213 if (mask) {
2214 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask));
2215 } else {
2216 old_mip = env->mip;
2219 if (csrno != CSR_HVIP) {
2220 gin = get_field(env->hstatus, HSTATUS_VGEIN);
2221 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
2222 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
2225 if (ret_val) {
2226 *ret_val = old_mip;
2229 return RISCV_EXCP_NONE;
2232 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
2233 target_ulong *ret_val,
2234 target_ulong new_val, target_ulong wr_mask)
2236 uint64_t rval;
2237 RISCVException ret;
2239 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
2240 if (ret_val) {
2241 *ret_val = rval;
2244 return ret;
2247 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
2248 target_ulong *ret_val,
2249 target_ulong new_val, target_ulong wr_mask)
2251 uint64_t rval;
2252 RISCVException ret;
2254 ret = rmw_mip64(env, csrno, &rval,
2255 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2256 if (ret_val) {
2257 *ret_val = rval >> 32;
2260 return ret;
2263 /* Supervisor Trap Setup */
2264 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
2265 Int128 *val)
2267 uint64_t mask = sstatus_v1_10_mask;
2268 uint64_t sstatus = env->mstatus & mask;
2269 if (env->xl != MXL_RV32 || env->debugger) {
2270 mask |= SSTATUS64_UXL;
2273 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
2274 return RISCV_EXCP_NONE;
2277 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
2278 target_ulong *val)
2280 target_ulong mask = (sstatus_v1_10_mask);
2281 if (env->xl != MXL_RV32 || env->debugger) {
2282 mask |= SSTATUS64_UXL;
2284 /* TODO: Use SXL not MXL. */
2285 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
2286 return RISCV_EXCP_NONE;
2289 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
2290 target_ulong val)
2292 target_ulong mask = (sstatus_v1_10_mask);
2294 if (env->xl != MXL_RV32 || env->debugger) {
2295 if ((val & SSTATUS64_UXL) != 0) {
2296 mask |= SSTATUS64_UXL;
2299 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
2300 return write_mstatus(env, CSR_MSTATUS, newval);
2303 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
2304 uint64_t *ret_val,
2305 uint64_t new_val, uint64_t wr_mask)
2307 RISCVException ret;
2308 uint64_t rval, vsbits, mask = env->hideleg & VS_MODE_INTERRUPTS;
2310 /* Bring VS-level bits to correct position */
2311 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
2312 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
2313 new_val |= vsbits << 1;
2314 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
2315 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
2316 wr_mask |= vsbits << 1;
2318 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
2319 if (ret_val) {
2320 rval &= mask;
2321 vsbits = rval & VS_MODE_INTERRUPTS;
2322 rval &= ~VS_MODE_INTERRUPTS;
2323 *ret_val = rval | (vsbits >> 1);
2326 return ret;
2329 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
2330 target_ulong *ret_val,
2331 target_ulong new_val, target_ulong wr_mask)
2333 uint64_t rval;
2334 RISCVException ret;
2336 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
2337 if (ret_val) {
2338 *ret_val = rval;
2341 return ret;
2344 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
2345 target_ulong *ret_val,
2346 target_ulong new_val, target_ulong wr_mask)
2348 uint64_t rval;
2349 RISCVException ret;
2351 ret = rmw_vsie64(env, csrno, &rval,
2352 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2353 if (ret_val) {
2354 *ret_val = rval >> 32;
2357 return ret;
2360 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
2361 uint64_t *ret_val,
2362 uint64_t new_val, uint64_t wr_mask)
2364 RISCVException ret;
2365 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
2367 if (riscv_cpu_virt_enabled(env)) {
2368 if (env->hvictl & HVICTL_VTI) {
2369 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2371 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
2372 } else {
2373 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
2376 if (ret_val) {
2377 *ret_val &= mask;
2380 return ret;
2383 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
2384 target_ulong *ret_val,
2385 target_ulong new_val, target_ulong wr_mask)
2387 uint64_t rval;
2388 RISCVException ret;
2390 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
2391 if (ret == RISCV_EXCP_NONE && ret_val) {
2392 *ret_val = rval;
2395 return ret;
2398 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
2399 target_ulong *ret_val,
2400 target_ulong new_val, target_ulong wr_mask)
2402 uint64_t rval;
2403 RISCVException ret;
2405 ret = rmw_sie64(env, csrno, &rval,
2406 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2407 if (ret_val) {
2408 *ret_val = rval >> 32;
2411 return ret;
2414 static RISCVException read_stvec(CPURISCVState *env, int csrno,
2415 target_ulong *val)
2417 *val = env->stvec;
2418 return RISCV_EXCP_NONE;
2421 static RISCVException write_stvec(CPURISCVState *env, int csrno,
2422 target_ulong val)
2424 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2425 if ((val & 3) < 2) {
2426 env->stvec = val;
2427 } else {
2428 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
2430 return RISCV_EXCP_NONE;
2433 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
2434 target_ulong *val)
2436 *val = env->scounteren;
2437 return RISCV_EXCP_NONE;
2440 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
2441 target_ulong val)
2443 env->scounteren = val;
2444 return RISCV_EXCP_NONE;
2447 /* Supervisor Trap Handling */
2448 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
2449 Int128 *val)
2451 *val = int128_make128(env->sscratch, env->sscratchh);
2452 return RISCV_EXCP_NONE;
2455 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
2456 Int128 val)
2458 env->sscratch = int128_getlo(val);
2459 env->sscratchh = int128_gethi(val);
2460 return RISCV_EXCP_NONE;
2463 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
2464 target_ulong *val)
2466 *val = env->sscratch;
2467 return RISCV_EXCP_NONE;
2470 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
2471 target_ulong val)
2473 env->sscratch = val;
2474 return RISCV_EXCP_NONE;
2477 static RISCVException read_sepc(CPURISCVState *env, int csrno,
2478 target_ulong *val)
2480 *val = env->sepc;
2481 return RISCV_EXCP_NONE;
2484 static RISCVException write_sepc(CPURISCVState *env, int csrno,
2485 target_ulong val)
2487 env->sepc = val;
2488 return RISCV_EXCP_NONE;
2491 static RISCVException read_scause(CPURISCVState *env, int csrno,
2492 target_ulong *val)
2494 *val = env->scause;
2495 return RISCV_EXCP_NONE;
2498 static RISCVException write_scause(CPURISCVState *env, int csrno,
2499 target_ulong val)
2501 env->scause = val;
2502 return RISCV_EXCP_NONE;
2505 static RISCVException read_stval(CPURISCVState *env, int csrno,
2506 target_ulong *val)
2508 *val = env->stval;
2509 return RISCV_EXCP_NONE;
2512 static RISCVException write_stval(CPURISCVState *env, int csrno,
2513 target_ulong val)
2515 env->stval = val;
2516 return RISCV_EXCP_NONE;
2519 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
2520 uint64_t *ret_val,
2521 uint64_t new_val, uint64_t wr_mask)
2523 RISCVException ret;
2524 uint64_t rval, vsbits, mask = env->hideleg & vsip_writable_mask;
2526 /* Bring VS-level bits to correct position */
2527 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
2528 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
2529 new_val |= vsbits << 1;
2530 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
2531 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
2532 wr_mask |= vsbits << 1;
2534 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask & mask);
2535 if (ret_val) {
2536 rval &= mask;
2537 vsbits = rval & VS_MODE_INTERRUPTS;
2538 rval &= ~VS_MODE_INTERRUPTS;
2539 *ret_val = rval | (vsbits >> 1);
2542 return ret;
2545 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
2546 target_ulong *ret_val,
2547 target_ulong new_val, target_ulong wr_mask)
2549 uint64_t rval;
2550 RISCVException ret;
2552 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
2553 if (ret_val) {
2554 *ret_val = rval;
2557 return ret;
2560 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
2561 target_ulong *ret_val,
2562 target_ulong new_val, target_ulong wr_mask)
2564 uint64_t rval;
2565 RISCVException ret;
2567 ret = rmw_vsip64(env, csrno, &rval,
2568 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2569 if (ret_val) {
2570 *ret_val = rval >> 32;
2573 return ret;
2576 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
2577 uint64_t *ret_val,
2578 uint64_t new_val, uint64_t wr_mask)
2580 RISCVException ret;
2581 uint64_t mask = env->mideleg & sip_writable_mask;
2583 if (riscv_cpu_virt_enabled(env)) {
2584 if (env->hvictl & HVICTL_VTI) {
2585 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2587 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
2588 } else {
2589 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
2592 if (ret_val) {
2593 *ret_val &= env->mideleg & S_MODE_INTERRUPTS;
2596 return ret;
2599 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
2600 target_ulong *ret_val,
2601 target_ulong new_val, target_ulong wr_mask)
2603 uint64_t rval;
2604 RISCVException ret;
2606 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
2607 if (ret_val) {
2608 *ret_val = rval;
2611 return ret;
2614 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
2615 target_ulong *ret_val,
2616 target_ulong new_val, target_ulong wr_mask)
2618 uint64_t rval;
2619 RISCVException ret;
2621 ret = rmw_sip64(env, csrno, &rval,
2622 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2623 if (ret_val) {
2624 *ret_val = rval >> 32;
2627 return ret;
2630 /* Supervisor Protection and Translation */
2631 static RISCVException read_satp(CPURISCVState *env, int csrno,
2632 target_ulong *val)
2634 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2635 *val = 0;
2636 return RISCV_EXCP_NONE;
2639 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2640 return RISCV_EXCP_ILLEGAL_INST;
2641 } else {
2642 *val = env->satp;
2645 return RISCV_EXCP_NONE;
2648 static RISCVException write_satp(CPURISCVState *env, int csrno,
2649 target_ulong val)
2651 target_ulong vm, mask;
2653 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2654 return RISCV_EXCP_NONE;
2657 if (riscv_cpu_mxl(env) == MXL_RV32) {
2658 vm = validate_vm(env, get_field(val, SATP32_MODE));
2659 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
2660 } else {
2661 vm = validate_vm(env, get_field(val, SATP64_MODE));
2662 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
2665 if (vm && mask) {
2666 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2667 return RISCV_EXCP_ILLEGAL_INST;
2668 } else {
2670 * The ISA defines SATP.MODE=Bare as "no translation", but we still
2671 * pass these through QEMU's TLB emulation as it improves
2672 * performance. Flushing the TLB on SATP writes with paging
2673 * enabled avoids leaking those invalid cached mappings.
2675 tlb_flush(env_cpu(env));
2676 env->satp = val;
2679 return RISCV_EXCP_NONE;
2682 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
2684 int irq, ret;
2685 target_ulong topei;
2686 uint64_t vseip, vsgein;
2687 uint32_t iid, iprio, hviid, hviprio, gein;
2688 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
2690 gein = get_field(env->hstatus, HSTATUS_VGEIN);
2691 hviid = get_field(env->hvictl, HVICTL_IID);
2692 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
2694 if (gein) {
2695 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
2696 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
2697 if (gein <= env->geilen && vseip) {
2698 siid[scount] = IRQ_S_EXT;
2699 siprio[scount] = IPRIO_MMAXIPRIO + 1;
2700 if (env->aia_ireg_rmw_fn[PRV_S]) {
2702 * Call machine specific IMSIC register emulation for
2703 * reading TOPEI.
2705 ret = env->aia_ireg_rmw_fn[PRV_S](
2706 env->aia_ireg_rmw_fn_arg[PRV_S],
2707 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
2708 riscv_cpu_mxl_bits(env)),
2709 &topei, 0, 0);
2710 if (!ret && topei) {
2711 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
2714 scount++;
2716 } else {
2717 if (hviid == IRQ_S_EXT && hviprio) {
2718 siid[scount] = IRQ_S_EXT;
2719 siprio[scount] = hviprio;
2720 scount++;
2724 if (env->hvictl & HVICTL_VTI) {
2725 if (hviid != IRQ_S_EXT) {
2726 siid[scount] = hviid;
2727 siprio[scount] = hviprio;
2728 scount++;
2730 } else {
2731 irq = riscv_cpu_vsirq_pending(env);
2732 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
2733 siid[scount] = irq;
2734 siprio[scount] = env->hviprio[irq];
2735 scount++;
2739 iid = 0;
2740 iprio = UINT_MAX;
2741 for (s = 0; s < scount; s++) {
2742 if (siprio[s] < iprio) {
2743 iid = siid[s];
2744 iprio = siprio[s];
2748 if (iid) {
2749 if (env->hvictl & HVICTL_IPRIOM) {
2750 if (iprio > IPRIO_MMAXIPRIO) {
2751 iprio = IPRIO_MMAXIPRIO;
2753 if (!iprio) {
2754 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
2755 iprio = IPRIO_MMAXIPRIO;
2758 } else {
2759 iprio = 1;
2761 } else {
2762 iprio = 0;
2765 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2766 *val |= iprio;
2767 return RISCV_EXCP_NONE;
2770 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
2772 int irq;
2773 uint8_t iprio;
2775 if (riscv_cpu_virt_enabled(env)) {
2776 return read_vstopi(env, CSR_VSTOPI, val);
2779 irq = riscv_cpu_sirq_pending(env);
2780 if (irq <= 0 || irq > 63) {
2781 *val = 0;
2782 } else {
2783 iprio = env->siprio[irq];
2784 if (!iprio) {
2785 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
2786 iprio = IPRIO_MMAXIPRIO;
2789 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2790 *val |= iprio;
2793 return RISCV_EXCP_NONE;
2796 /* Hypervisor Extensions */
2797 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
2798 target_ulong *val)
2800 *val = env->hstatus;
2801 if (riscv_cpu_mxl(env) != MXL_RV32) {
2802 /* We only support 64-bit VSXL */
2803 *val = set_field(*val, HSTATUS_VSXL, 2);
2805 /* We only support little endian */
2806 *val = set_field(*val, HSTATUS_VSBE, 0);
2807 return RISCV_EXCP_NONE;
2810 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
2811 target_ulong val)
2813 env->hstatus = val;
2814 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
2815 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options.");
2817 if (get_field(val, HSTATUS_VSBE) != 0) {
2818 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
2820 return RISCV_EXCP_NONE;
2823 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
2824 target_ulong *val)
2826 *val = env->hedeleg;
2827 return RISCV_EXCP_NONE;
2830 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
2831 target_ulong val)
2833 env->hedeleg = val & vs_delegable_excps;
2834 return RISCV_EXCP_NONE;
2837 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
2838 uint64_t *ret_val,
2839 uint64_t new_val, uint64_t wr_mask)
2841 uint64_t mask = wr_mask & vs_delegable_ints;
2843 if (ret_val) {
2844 *ret_val = env->hideleg & vs_delegable_ints;
2847 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
2848 return RISCV_EXCP_NONE;
2851 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
2852 target_ulong *ret_val,
2853 target_ulong new_val, target_ulong wr_mask)
2855 uint64_t rval;
2856 RISCVException ret;
2858 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
2859 if (ret_val) {
2860 *ret_val = rval;
2863 return ret;
2866 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
2867 target_ulong *ret_val,
2868 target_ulong new_val, target_ulong wr_mask)
2870 uint64_t rval;
2871 RISCVException ret;
2873 ret = rmw_hideleg64(env, csrno, &rval,
2874 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2875 if (ret_val) {
2876 *ret_val = rval >> 32;
2879 return ret;
2882 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
2883 uint64_t *ret_val,
2884 uint64_t new_val, uint64_t wr_mask)
2886 RISCVException ret;
2888 ret = rmw_mip64(env, csrno, ret_val, new_val,
2889 wr_mask & hvip_writable_mask);
2890 if (ret_val) {
2891 *ret_val &= VS_MODE_INTERRUPTS;
2894 return ret;
2897 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
2898 target_ulong *ret_val,
2899 target_ulong new_val, target_ulong wr_mask)
2901 uint64_t rval;
2902 RISCVException ret;
2904 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
2905 if (ret_val) {
2906 *ret_val = rval;
2909 return ret;
2912 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
2913 target_ulong *ret_val,
2914 target_ulong new_val, target_ulong wr_mask)
2916 uint64_t rval;
2917 RISCVException ret;
2919 ret = rmw_hvip64(env, csrno, &rval,
2920 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2921 if (ret_val) {
2922 *ret_val = rval >> 32;
2925 return ret;
2928 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
2929 target_ulong *ret_value,
2930 target_ulong new_value, target_ulong write_mask)
2932 int ret = rmw_mip(env, csrno, ret_value, new_value,
2933 write_mask & hip_writable_mask);
2935 if (ret_value) {
2936 *ret_value &= HS_MODE_INTERRUPTS;
2938 return ret;
2941 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
2942 target_ulong *ret_val,
2943 target_ulong new_val, target_ulong wr_mask)
2945 uint64_t rval;
2946 RISCVException ret;
2948 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
2949 if (ret_val) {
2950 *ret_val = rval & HS_MODE_INTERRUPTS;
2953 return ret;
2956 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
2957 target_ulong *val)
2959 *val = env->hcounteren;
2960 return RISCV_EXCP_NONE;
2963 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
2964 target_ulong val)
2966 env->hcounteren = val;
2967 return RISCV_EXCP_NONE;
2970 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
2971 target_ulong *val)
2973 if (val) {
2974 *val = env->hgeie;
2976 return RISCV_EXCP_NONE;
2979 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
2980 target_ulong val)
2982 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2983 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
2984 env->hgeie = val;
2985 /* Update mip.SGEIP bit */
2986 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP,
2987 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
2988 return RISCV_EXCP_NONE;
2991 static RISCVException read_htval(CPURISCVState *env, int csrno,
2992 target_ulong *val)
2994 *val = env->htval;
2995 return RISCV_EXCP_NONE;
2998 static RISCVException write_htval(CPURISCVState *env, int csrno,
2999 target_ulong val)
3001 env->htval = val;
3002 return RISCV_EXCP_NONE;
3005 static RISCVException read_htinst(CPURISCVState *env, int csrno,
3006 target_ulong *val)
3008 *val = env->htinst;
3009 return RISCV_EXCP_NONE;
3012 static RISCVException write_htinst(CPURISCVState *env, int csrno,
3013 target_ulong val)
3015 return RISCV_EXCP_NONE;
3018 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
3019 target_ulong *val)
3021 if (val) {
3022 *val = env->hgeip;
3024 return RISCV_EXCP_NONE;
3027 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
3028 target_ulong *val)
3030 *val = env->hgatp;
3031 return RISCV_EXCP_NONE;
3034 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
3035 target_ulong val)
3037 env->hgatp = val;
3038 return RISCV_EXCP_NONE;
3041 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
3042 target_ulong *val)
3044 if (!env->rdtime_fn) {
3045 return RISCV_EXCP_ILLEGAL_INST;
3048 *val = env->htimedelta;
3049 return RISCV_EXCP_NONE;
3052 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
3053 target_ulong val)
3055 if (!env->rdtime_fn) {
3056 return RISCV_EXCP_ILLEGAL_INST;
3059 if (riscv_cpu_mxl(env) == MXL_RV32) {
3060 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
3061 } else {
3062 env->htimedelta = val;
3064 return RISCV_EXCP_NONE;
3067 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
3068 target_ulong *val)
3070 if (!env->rdtime_fn) {
3071 return RISCV_EXCP_ILLEGAL_INST;
3074 *val = env->htimedelta >> 32;
3075 return RISCV_EXCP_NONE;
3078 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
3079 target_ulong val)
3081 if (!env->rdtime_fn) {
3082 return RISCV_EXCP_ILLEGAL_INST;
3085 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
3086 return RISCV_EXCP_NONE;
3089 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
3091 *val = env->hvictl;
3092 return RISCV_EXCP_NONE;
3095 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
3097 env->hvictl = val & HVICTL_VALID_MASK;
3098 return RISCV_EXCP_NONE;
3101 static int read_hvipriox(CPURISCVState *env, int first_index,
3102 uint8_t *iprio, target_ulong *val)
3104 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3106 /* First index has to be a multiple of number of irqs per register */
3107 if (first_index % num_irqs) {
3108 return (riscv_cpu_virt_enabled(env)) ?
3109 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3112 /* Fill-up return value */
3113 *val = 0;
3114 for (i = 0; i < num_irqs; i++) {
3115 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3116 continue;
3118 if (rdzero) {
3119 continue;
3121 *val |= ((target_ulong)iprio[irq]) << (i * 8);
3124 return RISCV_EXCP_NONE;
3127 static int write_hvipriox(CPURISCVState *env, int first_index,
3128 uint8_t *iprio, target_ulong val)
3130 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3132 /* First index has to be a multiple of number of irqs per register */
3133 if (first_index % num_irqs) {
3134 return (riscv_cpu_virt_enabled(env)) ?
3135 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3138 /* Fill-up priority arrary */
3139 for (i = 0; i < num_irqs; i++) {
3140 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3141 continue;
3143 if (rdzero) {
3144 iprio[irq] = 0;
3145 } else {
3146 iprio[irq] = (val >> (i * 8)) & 0xff;
3150 return RISCV_EXCP_NONE;
3153 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
3155 return read_hvipriox(env, 0, env->hviprio, val);
3158 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
3160 return write_hvipriox(env, 0, env->hviprio, val);
3163 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
3165 return read_hvipriox(env, 4, env->hviprio, val);
3168 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
3170 return write_hvipriox(env, 4, env->hviprio, val);
3173 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
3175 return read_hvipriox(env, 8, env->hviprio, val);
3178 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
3180 return write_hvipriox(env, 8, env->hviprio, val);
3183 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
3185 return read_hvipriox(env, 12, env->hviprio, val);
3188 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
3190 return write_hvipriox(env, 12, env->hviprio, val);
3193 /* Virtual CSR Registers */
3194 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
3195 target_ulong *val)
3197 *val = env->vsstatus;
3198 return RISCV_EXCP_NONE;
3201 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
3202 target_ulong val)
3204 uint64_t mask = (target_ulong)-1;
3205 if ((val & VSSTATUS64_UXL) == 0) {
3206 mask &= ~VSSTATUS64_UXL;
3208 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
3209 return RISCV_EXCP_NONE;
3212 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
3214 *val = env->vstvec;
3215 return RISCV_EXCP_NONE;
3218 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
3219 target_ulong val)
3221 env->vstvec = val;
3222 return RISCV_EXCP_NONE;
3225 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
3226 target_ulong *val)
3228 *val = env->vsscratch;
3229 return RISCV_EXCP_NONE;
3232 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
3233 target_ulong val)
3235 env->vsscratch = val;
3236 return RISCV_EXCP_NONE;
3239 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
3240 target_ulong *val)
3242 *val = env->vsepc;
3243 return RISCV_EXCP_NONE;
3246 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
3247 target_ulong val)
3249 env->vsepc = val;
3250 return RISCV_EXCP_NONE;
3253 static RISCVException read_vscause(CPURISCVState *env, int csrno,
3254 target_ulong *val)
3256 *val = env->vscause;
3257 return RISCV_EXCP_NONE;
3260 static RISCVException write_vscause(CPURISCVState *env, int csrno,
3261 target_ulong val)
3263 env->vscause = val;
3264 return RISCV_EXCP_NONE;
3267 static RISCVException read_vstval(CPURISCVState *env, int csrno,
3268 target_ulong *val)
3270 *val = env->vstval;
3271 return RISCV_EXCP_NONE;
3274 static RISCVException write_vstval(CPURISCVState *env, int csrno,
3275 target_ulong val)
3277 env->vstval = val;
3278 return RISCV_EXCP_NONE;
3281 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
3282 target_ulong *val)
3284 *val = env->vsatp;
3285 return RISCV_EXCP_NONE;
3288 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
3289 target_ulong val)
3291 env->vsatp = val;
3292 return RISCV_EXCP_NONE;
3295 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
3296 target_ulong *val)
3298 *val = env->mtval2;
3299 return RISCV_EXCP_NONE;
3302 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
3303 target_ulong val)
3305 env->mtval2 = val;
3306 return RISCV_EXCP_NONE;
3309 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
3310 target_ulong *val)
3312 *val = env->mtinst;
3313 return RISCV_EXCP_NONE;
3316 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
3317 target_ulong val)
3319 env->mtinst = val;
3320 return RISCV_EXCP_NONE;
3323 /* Physical Memory Protection */
3324 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
3325 target_ulong *val)
3327 *val = mseccfg_csr_read(env);
3328 return RISCV_EXCP_NONE;
3331 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
3332 target_ulong val)
3334 mseccfg_csr_write(env, val);
3335 return RISCV_EXCP_NONE;
3338 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
3340 /* TODO: RV128 restriction check */
3341 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
3342 return false;
3344 return true;
3347 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
3348 target_ulong *val)
3350 uint32_t reg_index = csrno - CSR_PMPCFG0;
3352 if (!check_pmp_reg_index(env, reg_index)) {
3353 return RISCV_EXCP_ILLEGAL_INST;
3355 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
3356 return RISCV_EXCP_NONE;
3359 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
3360 target_ulong val)
3362 uint32_t reg_index = csrno - CSR_PMPCFG0;
3364 if (!check_pmp_reg_index(env, reg_index)) {
3365 return RISCV_EXCP_ILLEGAL_INST;
3367 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
3368 return RISCV_EXCP_NONE;
3371 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
3372 target_ulong *val)
3374 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
3375 return RISCV_EXCP_NONE;
3378 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
3379 target_ulong val)
3381 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
3382 return RISCV_EXCP_NONE;
3385 static RISCVException read_tselect(CPURISCVState *env, int csrno,
3386 target_ulong *val)
3388 *val = tselect_csr_read(env);
3389 return RISCV_EXCP_NONE;
3392 static RISCVException write_tselect(CPURISCVState *env, int csrno,
3393 target_ulong val)
3395 tselect_csr_write(env, val);
3396 return RISCV_EXCP_NONE;
3399 static RISCVException read_tdata(CPURISCVState *env, int csrno,
3400 target_ulong *val)
3402 /* return 0 in tdata1 to end the trigger enumeration */
3403 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
3404 *val = 0;
3405 return RISCV_EXCP_NONE;
3408 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3409 return RISCV_EXCP_ILLEGAL_INST;
3412 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
3413 return RISCV_EXCP_NONE;
3416 static RISCVException write_tdata(CPURISCVState *env, int csrno,
3417 target_ulong val)
3419 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3420 return RISCV_EXCP_ILLEGAL_INST;
3423 tdata_csr_write(env, csrno - CSR_TDATA1, val);
3424 return RISCV_EXCP_NONE;
3427 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
3428 target_ulong *val)
3430 *val = tinfo_csr_read(env);
3431 return RISCV_EXCP_NONE;
3435 * Functions to access Pointer Masking feature registers
3436 * We have to check if current priv lvl could modify
3437 * csr in given mode
3439 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
3441 int csr_priv = get_field(csrno, 0x300);
3442 int pm_current;
3444 if (env->debugger) {
3445 return false;
3448 * If priv lvls differ that means we're accessing csr from higher priv lvl,
3449 * so allow the access
3451 if (env->priv != csr_priv) {
3452 return false;
3454 switch (env->priv) {
3455 case PRV_M:
3456 pm_current = get_field(env->mmte, M_PM_CURRENT);
3457 break;
3458 case PRV_S:
3459 pm_current = get_field(env->mmte, S_PM_CURRENT);
3460 break;
3461 case PRV_U:
3462 pm_current = get_field(env->mmte, U_PM_CURRENT);
3463 break;
3464 default:
3465 g_assert_not_reached();
3467 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
3468 return !pm_current;
3471 static RISCVException read_mmte(CPURISCVState *env, int csrno,
3472 target_ulong *val)
3474 *val = env->mmte & MMTE_MASK;
3475 return RISCV_EXCP_NONE;
3478 static RISCVException write_mmte(CPURISCVState *env, int csrno,
3479 target_ulong val)
3481 uint64_t mstatus;
3482 target_ulong wpri_val = val & MMTE_MASK;
3484 if (val != wpri_val) {
3485 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3486 "MMTE: WPRI violation written 0x", val,
3487 "vs expected 0x", wpri_val);
3489 /* for machine mode pm.current is hardwired to 1 */
3490 wpri_val |= MMTE_M_PM_CURRENT;
3492 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
3493 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
3494 env->mmte = wpri_val | PM_EXT_DIRTY;
3495 riscv_cpu_update_mask(env);
3497 /* Set XS and SD bits, since PM CSRs are dirty */
3498 mstatus = env->mstatus | MSTATUS_XS;
3499 write_mstatus(env, csrno, mstatus);
3500 return RISCV_EXCP_NONE;
3503 static RISCVException read_smte(CPURISCVState *env, int csrno,
3504 target_ulong *val)
3506 *val = env->mmte & SMTE_MASK;
3507 return RISCV_EXCP_NONE;
3510 static RISCVException write_smte(CPURISCVState *env, int csrno,
3511 target_ulong val)
3513 target_ulong wpri_val = val & SMTE_MASK;
3515 if (val != wpri_val) {
3516 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3517 "SMTE: WPRI violation written 0x", val,
3518 "vs expected 0x", wpri_val);
3521 /* if pm.current==0 we can't modify current PM CSRs */
3522 if (check_pm_current_disabled(env, csrno)) {
3523 return RISCV_EXCP_NONE;
3526 wpri_val |= (env->mmte & ~SMTE_MASK);
3527 write_mmte(env, csrno, wpri_val);
3528 return RISCV_EXCP_NONE;
3531 static RISCVException read_umte(CPURISCVState *env, int csrno,
3532 target_ulong *val)
3534 *val = env->mmte & UMTE_MASK;
3535 return RISCV_EXCP_NONE;
3538 static RISCVException write_umte(CPURISCVState *env, int csrno,
3539 target_ulong val)
3541 target_ulong wpri_val = val & UMTE_MASK;
3543 if (val != wpri_val) {
3544 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3545 "UMTE: WPRI violation written 0x", val,
3546 "vs expected 0x", wpri_val);
3549 if (check_pm_current_disabled(env, csrno)) {
3550 return RISCV_EXCP_NONE;
3553 wpri_val |= (env->mmte & ~UMTE_MASK);
3554 write_mmte(env, csrno, wpri_val);
3555 return RISCV_EXCP_NONE;
3558 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
3559 target_ulong *val)
3561 *val = env->mpmmask;
3562 return RISCV_EXCP_NONE;
3565 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
3566 target_ulong val)
3568 uint64_t mstatus;
3570 env->mpmmask = val;
3571 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3572 env->cur_pmmask = val;
3574 env->mmte |= PM_EXT_DIRTY;
3576 /* Set XS and SD bits, since PM CSRs are dirty */
3577 mstatus = env->mstatus | MSTATUS_XS;
3578 write_mstatus(env, csrno, mstatus);
3579 return RISCV_EXCP_NONE;
3582 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
3583 target_ulong *val)
3585 *val = env->spmmask;
3586 return RISCV_EXCP_NONE;
3589 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
3590 target_ulong val)
3592 uint64_t mstatus;
3594 /* if pm.current==0 we can't modify current PM CSRs */
3595 if (check_pm_current_disabled(env, csrno)) {
3596 return RISCV_EXCP_NONE;
3598 env->spmmask = val;
3599 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3600 env->cur_pmmask = val;
3602 env->mmte |= PM_EXT_DIRTY;
3604 /* Set XS and SD bits, since PM CSRs are dirty */
3605 mstatus = env->mstatus | MSTATUS_XS;
3606 write_mstatus(env, csrno, mstatus);
3607 return RISCV_EXCP_NONE;
3610 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
3611 target_ulong *val)
3613 *val = env->upmmask;
3614 return RISCV_EXCP_NONE;
3617 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
3618 target_ulong val)
3620 uint64_t mstatus;
3622 /* if pm.current==0 we can't modify current PM CSRs */
3623 if (check_pm_current_disabled(env, csrno)) {
3624 return RISCV_EXCP_NONE;
3626 env->upmmask = val;
3627 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3628 env->cur_pmmask = val;
3630 env->mmte |= PM_EXT_DIRTY;
3632 /* Set XS and SD bits, since PM CSRs are dirty */
3633 mstatus = env->mstatus | MSTATUS_XS;
3634 write_mstatus(env, csrno, mstatus);
3635 return RISCV_EXCP_NONE;
3638 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
3639 target_ulong *val)
3641 *val = env->mpmbase;
3642 return RISCV_EXCP_NONE;
3645 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
3646 target_ulong val)
3648 uint64_t mstatus;
3650 env->mpmbase = val;
3651 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3652 env->cur_pmbase = val;
3654 env->mmte |= PM_EXT_DIRTY;
3656 /* Set XS and SD bits, since PM CSRs are dirty */
3657 mstatus = env->mstatus | MSTATUS_XS;
3658 write_mstatus(env, csrno, mstatus);
3659 return RISCV_EXCP_NONE;
3662 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
3663 target_ulong *val)
3665 *val = env->spmbase;
3666 return RISCV_EXCP_NONE;
3669 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
3670 target_ulong val)
3672 uint64_t mstatus;
3674 /* if pm.current==0 we can't modify current PM CSRs */
3675 if (check_pm_current_disabled(env, csrno)) {
3676 return RISCV_EXCP_NONE;
3678 env->spmbase = val;
3679 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3680 env->cur_pmbase = val;
3682 env->mmte |= PM_EXT_DIRTY;
3684 /* Set XS and SD bits, since PM CSRs are dirty */
3685 mstatus = env->mstatus | MSTATUS_XS;
3686 write_mstatus(env, csrno, mstatus);
3687 return RISCV_EXCP_NONE;
3690 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
3691 target_ulong *val)
3693 *val = env->upmbase;
3694 return RISCV_EXCP_NONE;
3697 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
3698 target_ulong val)
3700 uint64_t mstatus;
3702 /* if pm.current==0 we can't modify current PM CSRs */
3703 if (check_pm_current_disabled(env, csrno)) {
3704 return RISCV_EXCP_NONE;
3706 env->upmbase = val;
3707 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3708 env->cur_pmbase = val;
3710 env->mmte |= PM_EXT_DIRTY;
3712 /* Set XS and SD bits, since PM CSRs are dirty */
3713 mstatus = env->mstatus | MSTATUS_XS;
3714 write_mstatus(env, csrno, mstatus);
3715 return RISCV_EXCP_NONE;
3718 #endif
3720 /* Crypto Extension */
3721 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
3722 target_ulong *ret_value,
3723 target_ulong new_value,
3724 target_ulong write_mask)
3726 uint16_t random_v;
3727 Error *random_e = NULL;
3728 int random_r;
3729 target_ulong rval;
3731 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
3732 if (unlikely(random_r < 0)) {
3734 * Failed, for unknown reasons in the crypto subsystem.
3735 * The best we can do is log the reason and return a
3736 * failure indication to the guest. There is no reason
3737 * we know to expect the failure to be transitory, so
3738 * indicate DEAD to avoid having the guest spin on WAIT.
3740 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
3741 __func__, error_get_pretty(random_e));
3742 error_free(random_e);
3743 rval = SEED_OPST_DEAD;
3744 } else {
3745 rval = random_v | SEED_OPST_ES16;
3748 if (ret_value) {
3749 *ret_value = rval;
3752 return RISCV_EXCP_NONE;
3756 * riscv_csrrw - read and/or update control and status register
3758 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
3759 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
3760 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
3761 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
3764 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
3765 int csrno,
3766 bool write_mask,
3767 RISCVCPU *cpu)
3769 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
3770 int read_only = get_field(csrno, 0xC00) == 3;
3771 int csr_min_priv = csr_ops[csrno].min_priv_ver;
3773 /* ensure the CSR extension is enabled. */
3774 if (!cpu->cfg.ext_icsr) {
3775 return RISCV_EXCP_ILLEGAL_INST;
3778 if (env->priv_ver < csr_min_priv) {
3779 return RISCV_EXCP_ILLEGAL_INST;
3782 /* check predicate */
3783 if (!csr_ops[csrno].predicate) {
3784 return RISCV_EXCP_ILLEGAL_INST;
3787 if (write_mask && read_only) {
3788 return RISCV_EXCP_ILLEGAL_INST;
3791 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
3792 if (ret != RISCV_EXCP_NONE) {
3793 return ret;
3796 #if !defined(CONFIG_USER_ONLY)
3797 int csr_priv, effective_priv = env->priv;
3799 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
3800 !riscv_cpu_virt_enabled(env)) {
3802 * We are in HS mode. Add 1 to the effective privledge level to
3803 * allow us to access the Hypervisor CSRs.
3805 effective_priv++;
3808 csr_priv = get_field(csrno, 0x300);
3809 if (!env->debugger && (effective_priv < csr_priv)) {
3810 if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) {
3811 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3813 return RISCV_EXCP_ILLEGAL_INST;
3815 #endif
3816 return RISCV_EXCP_NONE;
3819 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
3820 target_ulong *ret_value,
3821 target_ulong new_value,
3822 target_ulong write_mask)
3824 RISCVException ret;
3825 target_ulong old_value;
3827 /* execute combined read/write operation if it exists */
3828 if (csr_ops[csrno].op) {
3829 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
3832 /* if no accessor exists then return failure */
3833 if (!csr_ops[csrno].read) {
3834 return RISCV_EXCP_ILLEGAL_INST;
3836 /* read old value */
3837 ret = csr_ops[csrno].read(env, csrno, &old_value);
3838 if (ret != RISCV_EXCP_NONE) {
3839 return ret;
3842 /* write value if writable and write mask set, otherwise drop writes */
3843 if (write_mask) {
3844 new_value = (old_value & ~write_mask) | (new_value & write_mask);
3845 if (csr_ops[csrno].write) {
3846 ret = csr_ops[csrno].write(env, csrno, new_value);
3847 if (ret != RISCV_EXCP_NONE) {
3848 return ret;
3853 /* return old value */
3854 if (ret_value) {
3855 *ret_value = old_value;
3858 return RISCV_EXCP_NONE;
3861 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
3862 target_ulong *ret_value,
3863 target_ulong new_value, target_ulong write_mask)
3865 RISCVCPU *cpu = env_archcpu(env);
3867 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
3868 if (ret != RISCV_EXCP_NONE) {
3869 return ret;
3872 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
3875 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
3876 Int128 *ret_value,
3877 Int128 new_value,
3878 Int128 write_mask)
3880 RISCVException ret;
3881 Int128 old_value;
3883 /* read old value */
3884 ret = csr_ops[csrno].read128(env, csrno, &old_value);
3885 if (ret != RISCV_EXCP_NONE) {
3886 return ret;
3889 /* write value if writable and write mask set, otherwise drop writes */
3890 if (int128_nz(write_mask)) {
3891 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
3892 int128_and(new_value, write_mask));
3893 if (csr_ops[csrno].write128) {
3894 ret = csr_ops[csrno].write128(env, csrno, new_value);
3895 if (ret != RISCV_EXCP_NONE) {
3896 return ret;
3898 } else if (csr_ops[csrno].write) {
3899 /* avoids having to write wrappers for all registers */
3900 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
3901 if (ret != RISCV_EXCP_NONE) {
3902 return ret;
3907 /* return old value */
3908 if (ret_value) {
3909 *ret_value = old_value;
3912 return RISCV_EXCP_NONE;
3915 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
3916 Int128 *ret_value,
3917 Int128 new_value, Int128 write_mask)
3919 RISCVException ret;
3920 RISCVCPU *cpu = env_archcpu(env);
3922 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
3923 if (ret != RISCV_EXCP_NONE) {
3924 return ret;
3927 if (csr_ops[csrno].read128) {
3928 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
3932 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3933 * at all defined.
3934 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3935 * significant), for those, this fallback is correctly handling the accesses
3937 target_ulong old_value;
3938 ret = riscv_csrrw_do64(env, csrno, &old_value,
3939 int128_getlo(new_value),
3940 int128_getlo(write_mask));
3941 if (ret == RISCV_EXCP_NONE && ret_value) {
3942 *ret_value = int128_make64(old_value);
3944 return ret;
3948 * Debugger support. If not in user mode, set env->debugger before the
3949 * riscv_csrrw call and clear it after the call.
3951 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
3952 target_ulong *ret_value,
3953 target_ulong new_value,
3954 target_ulong write_mask)
3956 RISCVException ret;
3957 #if !defined(CONFIG_USER_ONLY)
3958 env->debugger = true;
3959 #endif
3960 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
3961 #if !defined(CONFIG_USER_ONLY)
3962 env->debugger = false;
3963 #endif
3964 return ret;
3967 /* Control and Status Register function table */
3968 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
3969 /* User Floating-Point CSRs */
3970 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
3971 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
3972 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
3973 /* Vector CSRs */
3974 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart,
3975 .min_priv_ver = PRIV_VERSION_1_12_0 },
3976 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat,
3977 .min_priv_ver = PRIV_VERSION_1_12_0 },
3978 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm,
3979 .min_priv_ver = PRIV_VERSION_1_12_0 },
3980 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr,
3981 .min_priv_ver = PRIV_VERSION_1_12_0 },
3982 [CSR_VL] = { "vl", vs, read_vl,
3983 .min_priv_ver = PRIV_VERSION_1_12_0 },
3984 [CSR_VTYPE] = { "vtype", vs, read_vtype,
3985 .min_priv_ver = PRIV_VERSION_1_12_0 },
3986 [CSR_VLENB] = { "vlenb", vs, read_vlenb,
3987 .min_priv_ver = PRIV_VERSION_1_12_0 },
3988 /* User Timers and Counters */
3989 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
3990 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
3991 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
3992 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
3995 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3996 * rdtime callback is not provided by machine/platform emulation.
3998 [CSR_TIME] = { "time", ctr, read_time },
3999 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
4001 /* Crypto Extension */
4002 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
4004 #if !defined(CONFIG_USER_ONLY)
4005 /* Machine Timers and Counters */
4006 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
4007 write_mhpmcounter },
4008 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
4009 write_mhpmcounter },
4010 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
4011 write_mhpmcounterh },
4012 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
4013 write_mhpmcounterh },
4015 /* Machine Information Registers */
4016 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
4017 [CSR_MARCHID] = { "marchid", any, read_marchid },
4018 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
4019 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
4021 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
4022 .min_priv_ver = PRIV_VERSION_1_12_0 },
4023 /* Machine Trap Setup */
4024 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
4025 NULL, read_mstatus_i128 },
4026 [CSR_MISA] = { "misa", any, read_misa, write_misa,
4027 NULL, read_misa_i128 },
4028 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
4029 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
4030 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
4031 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
4032 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
4033 write_mcounteren },
4035 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
4036 write_mstatush },
4038 /* Machine Trap Handling */
4039 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
4040 NULL, read_mscratch_i128, write_mscratch_i128 },
4041 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
4042 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
4043 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
4044 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
4046 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
4047 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
4048 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
4050 /* Machine-Level Interrupts (AIA) */
4051 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
4052 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
4054 /* Virtual Interrupts for Supervisor Level (AIA) */
4055 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
4056 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
4058 /* Machine-Level High-Half CSRs (AIA) */
4059 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
4060 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
4061 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
4062 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
4063 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
4065 /* Execution environment configuration */
4066 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
4067 .min_priv_ver = PRIV_VERSION_1_12_0 },
4068 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
4069 .min_priv_ver = PRIV_VERSION_1_12_0 },
4070 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
4071 .min_priv_ver = PRIV_VERSION_1_12_0 },
4072 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
4073 .min_priv_ver = PRIV_VERSION_1_12_0 },
4074 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
4075 .min_priv_ver = PRIV_VERSION_1_12_0 },
4077 /* Smstateen extension CSRs */
4078 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
4079 .min_priv_ver = PRIV_VERSION_1_12_0 },
4080 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
4081 write_mstateen0h,
4082 .min_priv_ver = PRIV_VERSION_1_12_0 },
4083 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
4084 write_mstateen_1_3,
4085 .min_priv_ver = PRIV_VERSION_1_12_0 },
4086 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
4087 write_mstateenh_1_3,
4088 .min_priv_ver = PRIV_VERSION_1_12_0 },
4089 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
4090 write_mstateen_1_3,
4091 .min_priv_ver = PRIV_VERSION_1_12_0 },
4092 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
4093 write_mstateenh_1_3,
4094 .min_priv_ver = PRIV_VERSION_1_12_0 },
4095 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
4096 write_mstateen_1_3,
4097 .min_priv_ver = PRIV_VERSION_1_12_0 },
4098 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
4099 write_mstateenh_1_3,
4100 .min_priv_ver = PRIV_VERSION_1_12_0 },
4101 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
4102 .min_priv_ver = PRIV_VERSION_1_12_0 },
4103 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
4104 write_hstateen0h,
4105 .min_priv_ver = PRIV_VERSION_1_12_0 },
4106 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
4107 write_hstateen_1_3,
4108 .min_priv_ver = PRIV_VERSION_1_12_0 },
4109 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
4110 write_hstateenh_1_3,
4111 .min_priv_ver = PRIV_VERSION_1_12_0 },
4112 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
4113 write_hstateen_1_3,
4114 .min_priv_ver = PRIV_VERSION_1_12_0 },
4115 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
4116 write_hstateenh_1_3,
4117 .min_priv_ver = PRIV_VERSION_1_12_0 },
4118 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
4119 write_hstateen_1_3,
4120 .min_priv_ver = PRIV_VERSION_1_12_0 },
4121 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
4122 write_hstateenh_1_3,
4123 .min_priv_ver = PRIV_VERSION_1_12_0 },
4124 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
4125 .min_priv_ver = PRIV_VERSION_1_12_0 },
4126 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
4127 write_sstateen_1_3,
4128 .min_priv_ver = PRIV_VERSION_1_12_0 },
4129 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
4130 write_sstateen_1_3,
4131 .min_priv_ver = PRIV_VERSION_1_12_0 },
4132 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
4133 write_sstateen_1_3,
4134 .min_priv_ver = PRIV_VERSION_1_12_0 },
4136 /* Supervisor Trap Setup */
4137 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
4138 NULL, read_sstatus_i128 },
4139 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
4140 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
4141 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
4142 write_scounteren },
4144 /* Supervisor Trap Handling */
4145 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
4146 NULL, read_sscratch_i128, write_sscratch_i128 },
4147 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
4148 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
4149 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
4150 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
4151 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
4152 .min_priv_ver = PRIV_VERSION_1_12_0 },
4153 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
4154 .min_priv_ver = PRIV_VERSION_1_12_0 },
4155 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
4156 write_vstimecmp,
4157 .min_priv_ver = PRIV_VERSION_1_12_0 },
4158 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
4159 write_vstimecmph,
4160 .min_priv_ver = PRIV_VERSION_1_12_0 },
4162 /* Supervisor Protection and Translation */
4163 [CSR_SATP] = { "satp", smode, read_satp, write_satp },
4165 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
4166 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
4167 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
4169 /* Supervisor-Level Interrupts (AIA) */
4170 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
4171 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
4173 /* Supervisor-Level High-Half CSRs (AIA) */
4174 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
4175 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
4177 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
4178 .min_priv_ver = PRIV_VERSION_1_12_0 },
4179 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
4180 .min_priv_ver = PRIV_VERSION_1_12_0 },
4181 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
4182 .min_priv_ver = PRIV_VERSION_1_12_0 },
4183 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
4184 .min_priv_ver = PRIV_VERSION_1_12_0 },
4185 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
4186 .min_priv_ver = PRIV_VERSION_1_12_0 },
4187 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
4188 .min_priv_ver = PRIV_VERSION_1_12_0 },
4189 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
4190 write_hcounteren,
4191 .min_priv_ver = PRIV_VERSION_1_12_0 },
4192 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
4193 .min_priv_ver = PRIV_VERSION_1_12_0 },
4194 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
4195 .min_priv_ver = PRIV_VERSION_1_12_0 },
4196 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
4197 .min_priv_ver = PRIV_VERSION_1_12_0 },
4198 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
4199 .min_priv_ver = PRIV_VERSION_1_12_0 },
4200 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp,
4201 .min_priv_ver = PRIV_VERSION_1_12_0 },
4202 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
4203 write_htimedelta,
4204 .min_priv_ver = PRIV_VERSION_1_12_0 },
4205 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
4206 write_htimedeltah,
4207 .min_priv_ver = PRIV_VERSION_1_12_0 },
4209 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
4210 write_vsstatus,
4211 .min_priv_ver = PRIV_VERSION_1_12_0 },
4212 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
4213 .min_priv_ver = PRIV_VERSION_1_12_0 },
4214 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
4215 .min_priv_ver = PRIV_VERSION_1_12_0 },
4216 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
4217 .min_priv_ver = PRIV_VERSION_1_12_0 },
4218 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
4219 write_vsscratch,
4220 .min_priv_ver = PRIV_VERSION_1_12_0 },
4221 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
4222 .min_priv_ver = PRIV_VERSION_1_12_0 },
4223 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
4224 .min_priv_ver = PRIV_VERSION_1_12_0 },
4225 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
4226 .min_priv_ver = PRIV_VERSION_1_12_0 },
4227 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
4228 .min_priv_ver = PRIV_VERSION_1_12_0 },
4230 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
4231 .min_priv_ver = PRIV_VERSION_1_12_0 },
4232 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
4233 .min_priv_ver = PRIV_VERSION_1_12_0 },
4235 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
4236 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
4237 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
4238 write_hvictl },
4239 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
4240 write_hviprio1 },
4241 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
4242 write_hviprio2 },
4245 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
4247 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
4248 rmw_xiselect },
4249 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
4251 /* VS-Level Interrupts (H-extension with AIA) */
4252 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
4253 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
4255 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
4256 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
4257 rmw_hidelegh },
4258 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero,
4259 write_ignore },
4260 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
4261 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
4262 write_hviprio1h },
4263 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
4264 write_hviprio2h },
4265 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
4266 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
4268 /* Physical Memory Protection */
4269 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg,
4270 .min_priv_ver = PRIV_VERSION_1_11_0 },
4271 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
4272 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
4273 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
4274 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
4275 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
4276 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
4277 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
4278 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
4279 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
4280 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
4281 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
4282 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
4283 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
4284 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
4285 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
4286 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
4287 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
4288 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
4289 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
4290 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
4292 /* Debug CSRs */
4293 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
4294 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
4295 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
4296 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
4297 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
4299 /* User Pointer Masking */
4300 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
4301 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
4302 write_upmmask },
4303 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
4304 write_upmbase },
4305 /* Machine Pointer Masking */
4306 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
4307 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
4308 write_mpmmask },
4309 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
4310 write_mpmbase },
4311 /* Supervisor Pointer Masking */
4312 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
4313 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
4314 write_spmmask },
4315 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
4316 write_spmbase },
4318 /* Performance Counters */
4319 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
4320 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
4321 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
4322 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
4323 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
4324 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
4325 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
4326 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
4327 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
4328 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
4329 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
4330 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
4331 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
4332 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
4333 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
4334 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
4335 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
4336 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
4337 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
4338 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
4339 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
4340 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
4341 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
4342 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
4343 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
4344 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
4345 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
4346 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
4347 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
4349 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
4350 write_mhpmcounter },
4351 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
4352 write_mhpmcounter },
4353 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
4354 write_mhpmcounter },
4355 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
4356 write_mhpmcounter },
4357 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
4358 write_mhpmcounter },
4359 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
4360 write_mhpmcounter },
4361 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
4362 write_mhpmcounter },
4363 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
4364 write_mhpmcounter },
4365 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
4366 write_mhpmcounter },
4367 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
4368 write_mhpmcounter },
4369 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
4370 write_mhpmcounter },
4371 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
4372 write_mhpmcounter },
4373 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
4374 write_mhpmcounter },
4375 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
4376 write_mhpmcounter },
4377 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
4378 write_mhpmcounter },
4379 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
4380 write_mhpmcounter },
4381 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
4382 write_mhpmcounter },
4383 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
4384 write_mhpmcounter },
4385 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
4386 write_mhpmcounter },
4387 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
4388 write_mhpmcounter },
4389 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
4390 write_mhpmcounter },
4391 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
4392 write_mhpmcounter },
4393 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
4394 write_mhpmcounter },
4395 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
4396 write_mhpmcounter },
4397 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
4398 write_mhpmcounter },
4399 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
4400 write_mhpmcounter },
4401 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
4402 write_mhpmcounter },
4403 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
4404 write_mhpmcounter },
4405 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
4406 write_mhpmcounter },
4408 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
4409 write_mcountinhibit,
4410 .min_priv_ver = PRIV_VERSION_1_11_0 },
4412 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
4413 write_mhpmevent },
4414 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
4415 write_mhpmevent },
4416 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
4417 write_mhpmevent },
4418 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
4419 write_mhpmevent },
4420 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
4421 write_mhpmevent },
4422 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
4423 write_mhpmevent },
4424 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
4425 write_mhpmevent },
4426 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
4427 write_mhpmevent },
4428 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
4429 write_mhpmevent },
4430 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
4431 write_mhpmevent },
4432 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
4433 write_mhpmevent },
4434 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
4435 write_mhpmevent },
4436 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
4437 write_mhpmevent },
4438 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
4439 write_mhpmevent },
4440 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
4441 write_mhpmevent },
4442 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
4443 write_mhpmevent },
4444 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
4445 write_mhpmevent },
4446 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
4447 write_mhpmevent },
4448 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
4449 write_mhpmevent },
4450 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
4451 write_mhpmevent },
4452 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
4453 write_mhpmevent },
4454 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
4455 write_mhpmevent },
4456 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
4457 write_mhpmevent },
4458 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
4459 write_mhpmevent },
4460 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
4461 write_mhpmevent },
4462 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
4463 write_mhpmevent },
4464 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
4465 write_mhpmevent },
4466 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
4467 write_mhpmevent },
4468 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
4469 write_mhpmevent },
4471 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh,
4472 write_mhpmeventh,
4473 .min_priv_ver = PRIV_VERSION_1_12_0 },
4474 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh,
4475 write_mhpmeventh,
4476 .min_priv_ver = PRIV_VERSION_1_12_0 },
4477 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh,
4478 write_mhpmeventh,
4479 .min_priv_ver = PRIV_VERSION_1_12_0 },
4480 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh,
4481 write_mhpmeventh,
4482 .min_priv_ver = PRIV_VERSION_1_12_0 },
4483 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh,
4484 write_mhpmeventh,
4485 .min_priv_ver = PRIV_VERSION_1_12_0 },
4486 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh,
4487 write_mhpmeventh,
4488 .min_priv_ver = PRIV_VERSION_1_12_0 },
4489 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh,
4490 write_mhpmeventh,
4491 .min_priv_ver = PRIV_VERSION_1_12_0 },
4492 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh,
4493 write_mhpmeventh,
4494 .min_priv_ver = PRIV_VERSION_1_12_0 },
4495 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh,
4496 write_mhpmeventh,
4497 .min_priv_ver = PRIV_VERSION_1_12_0 },
4498 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh,
4499 write_mhpmeventh,
4500 .min_priv_ver = PRIV_VERSION_1_12_0 },
4501 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh,
4502 write_mhpmeventh,
4503 .min_priv_ver = PRIV_VERSION_1_12_0 },
4504 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh,
4505 write_mhpmeventh,
4506 .min_priv_ver = PRIV_VERSION_1_12_0 },
4507 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh,
4508 write_mhpmeventh,
4509 .min_priv_ver = PRIV_VERSION_1_12_0 },
4510 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh,
4511 write_mhpmeventh,
4512 .min_priv_ver = PRIV_VERSION_1_12_0 },
4513 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh,
4514 write_mhpmeventh,
4515 .min_priv_ver = PRIV_VERSION_1_12_0 },
4516 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh,
4517 write_mhpmeventh,
4518 .min_priv_ver = PRIV_VERSION_1_12_0 },
4519 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh,
4520 write_mhpmeventh,
4521 .min_priv_ver = PRIV_VERSION_1_12_0 },
4522 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh,
4523 write_mhpmeventh,
4524 .min_priv_ver = PRIV_VERSION_1_12_0 },
4525 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh,
4526 write_mhpmeventh,
4527 .min_priv_ver = PRIV_VERSION_1_12_0 },
4528 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh,
4529 write_mhpmeventh,
4530 .min_priv_ver = PRIV_VERSION_1_12_0 },
4531 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh,
4532 write_mhpmeventh,
4533 .min_priv_ver = PRIV_VERSION_1_12_0 },
4534 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh,
4535 write_mhpmeventh,
4536 .min_priv_ver = PRIV_VERSION_1_12_0 },
4537 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh,
4538 write_mhpmeventh,
4539 .min_priv_ver = PRIV_VERSION_1_12_0 },
4540 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh,
4541 write_mhpmeventh,
4542 .min_priv_ver = PRIV_VERSION_1_12_0 },
4543 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh,
4544 write_mhpmeventh,
4545 .min_priv_ver = PRIV_VERSION_1_12_0 },
4546 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh,
4547 write_mhpmeventh,
4548 .min_priv_ver = PRIV_VERSION_1_12_0 },
4549 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh,
4550 write_mhpmeventh,
4551 .min_priv_ver = PRIV_VERSION_1_12_0 },
4552 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh,
4553 write_mhpmeventh,
4554 .min_priv_ver = PRIV_VERSION_1_12_0 },
4555 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh,
4556 write_mhpmeventh,
4557 .min_priv_ver = PRIV_VERSION_1_12_0 },
4559 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
4560 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
4561 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
4562 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
4563 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
4564 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
4565 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
4566 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
4567 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
4568 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
4569 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
4570 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
4571 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
4572 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
4573 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
4574 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
4575 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
4576 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
4577 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
4578 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
4579 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
4580 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
4581 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
4582 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
4583 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
4584 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
4585 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
4586 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
4587 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
4589 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
4590 write_mhpmcounterh },
4591 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
4592 write_mhpmcounterh },
4593 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
4594 write_mhpmcounterh },
4595 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
4596 write_mhpmcounterh },
4597 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
4598 write_mhpmcounterh },
4599 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
4600 write_mhpmcounterh },
4601 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
4602 write_mhpmcounterh },
4603 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
4604 write_mhpmcounterh },
4605 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
4606 write_mhpmcounterh },
4607 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
4608 write_mhpmcounterh },
4609 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
4610 write_mhpmcounterh },
4611 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
4612 write_mhpmcounterh },
4613 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
4614 write_mhpmcounterh },
4615 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
4616 write_mhpmcounterh },
4617 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
4618 write_mhpmcounterh },
4619 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
4620 write_mhpmcounterh },
4621 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
4622 write_mhpmcounterh },
4623 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
4624 write_mhpmcounterh },
4625 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
4626 write_mhpmcounterh },
4627 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
4628 write_mhpmcounterh },
4629 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
4630 write_mhpmcounterh },
4631 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
4632 write_mhpmcounterh },
4633 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
4634 write_mhpmcounterh },
4635 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
4636 write_mhpmcounterh },
4637 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
4638 write_mhpmcounterh },
4639 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
4640 write_mhpmcounterh },
4641 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
4642 write_mhpmcounterh },
4643 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
4644 write_mhpmcounterh },
4645 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
4646 write_mhpmcounterh },
4647 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
4648 .min_priv_ver = PRIV_VERSION_1_12_0 },
4650 #endif /* !CONFIG_USER_ONLY */