hw/arm/aspeed: Replace aspeed_eeprom_init with at24c_eeprom_init
[qemu/kevin.git] / target / riscv / csr.c
blob62e6c4acbda9697f8b805d5b2902aa0e45cfefaf
1 /*
2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "pmu.h"
25 #include "time_helper.h"
26 #include "qemu/main-loop.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/cpu-timers.h"
29 #include "qemu/guest-random.h"
30 #include "qapi/error.h"
32 /* CSR function table public API */
33 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
35 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
38 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
40 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
43 /* Predicates */
44 #if !defined(CONFIG_USER_ONLY)
45 static RISCVException smstateen_acc_ok(CPURISCVState *env, int index,
46 uint64_t bit)
48 bool virt = riscv_cpu_virt_enabled(env);
49 CPUState *cs = env_cpu(env);
50 RISCVCPU *cpu = RISCV_CPU(cs);
52 if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) {
53 return RISCV_EXCP_NONE;
56 if (!(env->mstateen[index] & bit)) {
57 return RISCV_EXCP_ILLEGAL_INST;
60 if (virt) {
61 if (!(env->hstateen[index] & bit)) {
62 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
65 if (env->priv == PRV_U && !(env->sstateen[index] & bit)) {
66 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
70 if (env->priv == PRV_U && riscv_has_ext(env, RVS)) {
71 if (!(env->sstateen[index] & bit)) {
72 return RISCV_EXCP_ILLEGAL_INST;
76 return RISCV_EXCP_NONE;
78 #endif
80 static RISCVException fs(CPURISCVState *env, int csrno)
82 #if !defined(CONFIG_USER_ONLY)
83 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
84 !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
85 return RISCV_EXCP_ILLEGAL_INST;
87 #endif
88 return RISCV_EXCP_NONE;
91 static RISCVException vs(CPURISCVState *env, int csrno)
93 CPUState *cs = env_cpu(env);
94 RISCVCPU *cpu = RISCV_CPU(cs);
96 if (env->misa_ext & RVV ||
97 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
98 #if !defined(CONFIG_USER_ONLY)
99 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
100 return RISCV_EXCP_ILLEGAL_INST;
102 #endif
103 return RISCV_EXCP_NONE;
105 return RISCV_EXCP_ILLEGAL_INST;
108 static RISCVException ctr(CPURISCVState *env, int csrno)
110 #if !defined(CONFIG_USER_ONLY)
111 CPUState *cs = env_cpu(env);
112 RISCVCPU *cpu = RISCV_CPU(cs);
113 int ctr_index;
114 target_ulong ctr_mask;
115 int base_csrno = CSR_CYCLE;
116 bool rv32 = riscv_cpu_mxl(env) == MXL_RV32 ? true : false;
118 if (rv32 && csrno >= CSR_CYCLEH) {
119 /* Offset for RV32 hpmcounternh counters */
120 base_csrno += 0x80;
122 ctr_index = csrno - base_csrno;
123 ctr_mask = BIT(ctr_index);
125 if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
126 (csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
127 goto skip_ext_pmu_check;
130 if (!(cpu->pmu_avail_ctrs & ctr_mask)) {
131 /* No counter is enabled in PMU or the counter is out of range */
132 return RISCV_EXCP_ILLEGAL_INST;
135 skip_ext_pmu_check:
137 if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) {
138 return RISCV_EXCP_ILLEGAL_INST;
141 if (riscv_cpu_virt_enabled(env)) {
142 if (!get_field(env->hcounteren, ctr_mask) ||
143 (env->priv == PRV_U && !get_field(env->scounteren, ctr_mask))) {
144 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
148 if (riscv_has_ext(env, RVS) && env->priv == PRV_U &&
149 !get_field(env->scounteren, ctr_mask)) {
150 return RISCV_EXCP_ILLEGAL_INST;
153 #endif
154 return RISCV_EXCP_NONE;
157 static RISCVException ctr32(CPURISCVState *env, int csrno)
159 if (riscv_cpu_mxl(env) != MXL_RV32) {
160 return RISCV_EXCP_ILLEGAL_INST;
163 return ctr(env, csrno);
166 #if !defined(CONFIG_USER_ONLY)
167 static RISCVException mctr(CPURISCVState *env, int csrno)
169 CPUState *cs = env_cpu(env);
170 RISCVCPU *cpu = RISCV_CPU(cs);
171 int ctr_index;
172 int base_csrno = CSR_MHPMCOUNTER3;
174 if ((riscv_cpu_mxl(env) == MXL_RV32) && csrno >= CSR_MCYCLEH) {
175 /* Offset for RV32 mhpmcounternh counters */
176 base_csrno += 0x80;
178 ctr_index = csrno - base_csrno;
179 if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) {
180 /* The PMU is not enabled or counter is out of range*/
181 return RISCV_EXCP_ILLEGAL_INST;
184 return RISCV_EXCP_NONE;
187 static RISCVException mctr32(CPURISCVState *env, int csrno)
189 if (riscv_cpu_mxl(env) != MXL_RV32) {
190 return RISCV_EXCP_ILLEGAL_INST;
193 return mctr(env, csrno);
196 static RISCVException sscofpmf(CPURISCVState *env, int csrno)
198 CPUState *cs = env_cpu(env);
199 RISCVCPU *cpu = RISCV_CPU(cs);
201 if (!cpu->cfg.ext_sscofpmf) {
202 return RISCV_EXCP_ILLEGAL_INST;
205 return RISCV_EXCP_NONE;
208 static RISCVException any(CPURISCVState *env, int csrno)
210 return RISCV_EXCP_NONE;
213 static RISCVException any32(CPURISCVState *env, int csrno)
215 if (riscv_cpu_mxl(env) != MXL_RV32) {
216 return RISCV_EXCP_ILLEGAL_INST;
219 return any(env, csrno);
223 static int aia_any(CPURISCVState *env, int csrno)
225 RISCVCPU *cpu = env_archcpu(env);
227 if (!cpu->cfg.ext_smaia) {
228 return RISCV_EXCP_ILLEGAL_INST;
231 return any(env, csrno);
234 static int aia_any32(CPURISCVState *env, int csrno)
236 RISCVCPU *cpu = env_archcpu(env);
238 if (!cpu->cfg.ext_smaia) {
239 return RISCV_EXCP_ILLEGAL_INST;
242 return any32(env, csrno);
245 static RISCVException smode(CPURISCVState *env, int csrno)
247 if (riscv_has_ext(env, RVS)) {
248 return RISCV_EXCP_NONE;
251 return RISCV_EXCP_ILLEGAL_INST;
254 static int smode32(CPURISCVState *env, int csrno)
256 if (riscv_cpu_mxl(env) != MXL_RV32) {
257 return RISCV_EXCP_ILLEGAL_INST;
260 return smode(env, csrno);
263 static int aia_smode(CPURISCVState *env, int csrno)
265 RISCVCPU *cpu = env_archcpu(env);
267 if (!cpu->cfg.ext_ssaia) {
268 return RISCV_EXCP_ILLEGAL_INST;
271 return smode(env, csrno);
274 static int aia_smode32(CPURISCVState *env, int csrno)
276 RISCVCPU *cpu = env_archcpu(env);
278 if (!cpu->cfg.ext_ssaia) {
279 return RISCV_EXCP_ILLEGAL_INST;
282 return smode32(env, csrno);
285 static RISCVException hmode(CPURISCVState *env, int csrno)
287 if (riscv_has_ext(env, RVH)) {
288 return RISCV_EXCP_NONE;
291 return RISCV_EXCP_ILLEGAL_INST;
294 static RISCVException hmode32(CPURISCVState *env, int csrno)
296 if (riscv_cpu_mxl(env) != MXL_RV32) {
297 return RISCV_EXCP_ILLEGAL_INST;
300 return hmode(env, csrno);
304 static RISCVException umode(CPURISCVState *env, int csrno)
306 if (riscv_has_ext(env, RVU)) {
307 return RISCV_EXCP_NONE;
310 return RISCV_EXCP_ILLEGAL_INST;
313 static RISCVException umode32(CPURISCVState *env, int csrno)
315 if (riscv_cpu_mxl(env) != MXL_RV32) {
316 return RISCV_EXCP_ILLEGAL_INST;
319 return umode(env, csrno);
322 static RISCVException mstateen(CPURISCVState *env, int csrno)
324 CPUState *cs = env_cpu(env);
325 RISCVCPU *cpu = RISCV_CPU(cs);
327 if (!cpu->cfg.ext_smstateen) {
328 return RISCV_EXCP_ILLEGAL_INST;
331 return any(env, csrno);
334 static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base)
336 CPUState *cs = env_cpu(env);
337 RISCVCPU *cpu = RISCV_CPU(cs);
339 if (!cpu->cfg.ext_smstateen) {
340 return RISCV_EXCP_ILLEGAL_INST;
343 if (env->priv < PRV_M) {
344 if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) {
345 return RISCV_EXCP_ILLEGAL_INST;
349 return hmode(env, csrno);
352 static RISCVException hstateen(CPURISCVState *env, int csrno)
354 return hstateen_pred(env, csrno, CSR_HSTATEEN0);
357 static RISCVException hstateenh(CPURISCVState *env, int csrno)
359 return hstateen_pred(env, csrno, CSR_HSTATEEN0H);
362 static RISCVException sstateen(CPURISCVState *env, int csrno)
364 bool virt = riscv_cpu_virt_enabled(env);
365 int index = csrno - CSR_SSTATEEN0;
366 CPUState *cs = env_cpu(env);
367 RISCVCPU *cpu = RISCV_CPU(cs);
369 if (!cpu->cfg.ext_smstateen) {
370 return RISCV_EXCP_ILLEGAL_INST;
373 if (env->priv < PRV_M) {
374 if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) {
375 return RISCV_EXCP_ILLEGAL_INST;
378 if (virt) {
379 if (!(env->hstateen[index] & SMSTATEEN_STATEEN)) {
380 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
385 return smode(env, csrno);
388 /* Checks if PointerMasking registers could be accessed */
389 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
391 /* Check if j-ext is present */
392 if (riscv_has_ext(env, RVJ)) {
393 return RISCV_EXCP_NONE;
395 return RISCV_EXCP_ILLEGAL_INST;
398 static int aia_hmode(CPURISCVState *env, int csrno)
400 RISCVCPU *cpu = env_archcpu(env);
402 if (!cpu->cfg.ext_ssaia) {
403 return RISCV_EXCP_ILLEGAL_INST;
406 return hmode(env, csrno);
409 static int aia_hmode32(CPURISCVState *env, int csrno)
411 RISCVCPU *cpu = env_archcpu(env);
413 if (!cpu->cfg.ext_ssaia) {
414 return RISCV_EXCP_ILLEGAL_INST;
417 return hmode32(env, csrno);
420 static RISCVException pmp(CPURISCVState *env, int csrno)
422 if (riscv_feature(env, RISCV_FEATURE_PMP)) {
423 return RISCV_EXCP_NONE;
426 return RISCV_EXCP_ILLEGAL_INST;
429 static RISCVException epmp(CPURISCVState *env, int csrno)
431 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
432 return RISCV_EXCP_NONE;
435 return RISCV_EXCP_ILLEGAL_INST;
438 static RISCVException debug(CPURISCVState *env, int csrno)
440 if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
441 return RISCV_EXCP_NONE;
444 return RISCV_EXCP_ILLEGAL_INST;
446 #endif
448 static RISCVException seed(CPURISCVState *env, int csrno)
450 RISCVCPU *cpu = env_archcpu(env);
452 if (!cpu->cfg.ext_zkr) {
453 return RISCV_EXCP_ILLEGAL_INST;
456 #if !defined(CONFIG_USER_ONLY)
458 * With a CSR read-write instruction:
459 * 1) The seed CSR is always available in machine mode as normal.
460 * 2) Attempted access to seed from virtual modes VS and VU always raises
461 * an exception(virtual instruction exception only if mseccfg.sseed=1).
462 * 3) Without the corresponding access control bit set to 1, any attempted
463 * access to seed from U, S or HS modes will raise an illegal instruction
464 * exception.
466 if (env->priv == PRV_M) {
467 return RISCV_EXCP_NONE;
468 } else if (riscv_cpu_virt_enabled(env)) {
469 if (env->mseccfg & MSECCFG_SSEED) {
470 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
471 } else {
472 return RISCV_EXCP_ILLEGAL_INST;
474 } else {
475 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
476 return RISCV_EXCP_NONE;
477 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
478 return RISCV_EXCP_NONE;
479 } else {
480 return RISCV_EXCP_ILLEGAL_INST;
483 #else
484 return RISCV_EXCP_NONE;
485 #endif
488 /* User Floating-Point CSRs */
489 static RISCVException read_fflags(CPURISCVState *env, int csrno,
490 target_ulong *val)
492 *val = riscv_cpu_get_fflags(env);
493 return RISCV_EXCP_NONE;
496 static RISCVException write_fflags(CPURISCVState *env, int csrno,
497 target_ulong val)
499 #if !defined(CONFIG_USER_ONLY)
500 if (riscv_has_ext(env, RVF)) {
501 env->mstatus |= MSTATUS_FS;
503 #endif
504 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
505 return RISCV_EXCP_NONE;
508 static RISCVException read_frm(CPURISCVState *env, int csrno,
509 target_ulong *val)
511 *val = env->frm;
512 return RISCV_EXCP_NONE;
515 static RISCVException write_frm(CPURISCVState *env, int csrno,
516 target_ulong val)
518 #if !defined(CONFIG_USER_ONLY)
519 if (riscv_has_ext(env, RVF)) {
520 env->mstatus |= MSTATUS_FS;
522 #endif
523 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
524 return RISCV_EXCP_NONE;
527 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
528 target_ulong *val)
530 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
531 | (env->frm << FSR_RD_SHIFT);
532 return RISCV_EXCP_NONE;
535 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
536 target_ulong val)
538 #if !defined(CONFIG_USER_ONLY)
539 if (riscv_has_ext(env, RVF)) {
540 env->mstatus |= MSTATUS_FS;
542 #endif
543 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
544 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
545 return RISCV_EXCP_NONE;
548 static RISCVException read_vtype(CPURISCVState *env, int csrno,
549 target_ulong *val)
551 uint64_t vill;
552 switch (env->xl) {
553 case MXL_RV32:
554 vill = (uint32_t)env->vill << 31;
555 break;
556 case MXL_RV64:
557 vill = (uint64_t)env->vill << 63;
558 break;
559 default:
560 g_assert_not_reached();
562 *val = (target_ulong)vill | env->vtype;
563 return RISCV_EXCP_NONE;
566 static RISCVException read_vl(CPURISCVState *env, int csrno,
567 target_ulong *val)
569 *val = env->vl;
570 return RISCV_EXCP_NONE;
573 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
575 *val = env_archcpu(env)->cfg.vlen >> 3;
576 return RISCV_EXCP_NONE;
579 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
580 target_ulong *val)
582 *val = env->vxrm;
583 return RISCV_EXCP_NONE;
586 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
587 target_ulong val)
589 #if !defined(CONFIG_USER_ONLY)
590 env->mstatus |= MSTATUS_VS;
591 #endif
592 env->vxrm = val;
593 return RISCV_EXCP_NONE;
596 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
597 target_ulong *val)
599 *val = env->vxsat;
600 return RISCV_EXCP_NONE;
603 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
604 target_ulong val)
606 #if !defined(CONFIG_USER_ONLY)
607 env->mstatus |= MSTATUS_VS;
608 #endif
609 env->vxsat = val;
610 return RISCV_EXCP_NONE;
613 static RISCVException read_vstart(CPURISCVState *env, int csrno,
614 target_ulong *val)
616 *val = env->vstart;
617 return RISCV_EXCP_NONE;
620 static RISCVException write_vstart(CPURISCVState *env, int csrno,
621 target_ulong val)
623 #if !defined(CONFIG_USER_ONLY)
624 env->mstatus |= MSTATUS_VS;
625 #endif
627 * The vstart CSR is defined to have only enough writable bits
628 * to hold the largest element index, i.e. lg2(VLEN) bits.
630 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
631 return RISCV_EXCP_NONE;
634 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
636 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
637 return RISCV_EXCP_NONE;
640 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
642 #if !defined(CONFIG_USER_ONLY)
643 env->mstatus |= MSTATUS_VS;
644 #endif
645 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
646 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
647 return RISCV_EXCP_NONE;
650 /* User Timers and Counters */
651 static target_ulong get_ticks(bool shift)
653 int64_t val;
654 target_ulong result;
656 #if !defined(CONFIG_USER_ONLY)
657 if (icount_enabled()) {
658 val = icount_get();
659 } else {
660 val = cpu_get_host_ticks();
662 #else
663 val = cpu_get_host_ticks();
664 #endif
666 if (shift) {
667 result = val >> 32;
668 } else {
669 result = val;
672 return result;
675 #if defined(CONFIG_USER_ONLY)
676 static RISCVException read_time(CPURISCVState *env, int csrno,
677 target_ulong *val)
679 *val = cpu_get_host_ticks();
680 return RISCV_EXCP_NONE;
683 static RISCVException read_timeh(CPURISCVState *env, int csrno,
684 target_ulong *val)
686 *val = cpu_get_host_ticks() >> 32;
687 return RISCV_EXCP_NONE;
690 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
692 *val = get_ticks(false);
693 return RISCV_EXCP_NONE;
696 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
698 *val = get_ticks(true);
699 return RISCV_EXCP_NONE;
702 #else /* CONFIG_USER_ONLY */
704 static int read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val)
706 int evt_index = csrno - CSR_MCOUNTINHIBIT;
708 *val = env->mhpmevent_val[evt_index];
710 return RISCV_EXCP_NONE;
713 static int write_mhpmevent(CPURISCVState *env, int csrno, target_ulong val)
715 int evt_index = csrno - CSR_MCOUNTINHIBIT;
716 uint64_t mhpmevt_val = val;
718 env->mhpmevent_val[evt_index] = val;
720 if (riscv_cpu_mxl(env) == MXL_RV32) {
721 mhpmevt_val = mhpmevt_val |
722 ((uint64_t)env->mhpmeventh_val[evt_index] << 32);
724 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
726 return RISCV_EXCP_NONE;
729 static int read_mhpmeventh(CPURISCVState *env, int csrno, target_ulong *val)
731 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
733 *val = env->mhpmeventh_val[evt_index];
735 return RISCV_EXCP_NONE;
738 static int write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val)
740 int evt_index = csrno - CSR_MHPMEVENT3H + 3;
741 uint64_t mhpmevth_val = val;
742 uint64_t mhpmevt_val = env->mhpmevent_val[evt_index];
744 mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32);
745 env->mhpmeventh_val[evt_index] = val;
747 riscv_pmu_update_event_map(env, mhpmevt_val, evt_index);
749 return RISCV_EXCP_NONE;
752 static int write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val)
754 int ctr_idx = csrno - CSR_MCYCLE;
755 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
756 uint64_t mhpmctr_val = val;
758 counter->mhpmcounter_val = val;
759 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
760 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
761 counter->mhpmcounter_prev = get_ticks(false);
762 if (ctr_idx > 2) {
763 if (riscv_cpu_mxl(env) == MXL_RV32) {
764 mhpmctr_val = mhpmctr_val |
765 ((uint64_t)counter->mhpmcounterh_val << 32);
767 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
769 } else {
770 /* Other counters can keep incrementing from the given value */
771 counter->mhpmcounter_prev = val;
774 return RISCV_EXCP_NONE;
777 static int write_mhpmcounterh(CPURISCVState *env, int csrno, target_ulong val)
779 int ctr_idx = csrno - CSR_MCYCLEH;
780 PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
781 uint64_t mhpmctr_val = counter->mhpmcounter_val;
782 uint64_t mhpmctrh_val = val;
784 counter->mhpmcounterh_val = val;
785 mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
786 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
787 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
788 counter->mhpmcounterh_prev = get_ticks(true);
789 if (ctr_idx > 2) {
790 riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
792 } else {
793 counter->mhpmcounterh_prev = val;
796 return RISCV_EXCP_NONE;
799 static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
800 bool upper_half, uint32_t ctr_idx)
802 PMUCTRState counter = env->pmu_ctrs[ctr_idx];
803 target_ulong ctr_prev = upper_half ? counter.mhpmcounterh_prev :
804 counter.mhpmcounter_prev;
805 target_ulong ctr_val = upper_half ? counter.mhpmcounterh_val :
806 counter.mhpmcounter_val;
808 if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
810 * Counter should not increment if inhibit bit is set. We can't really
811 * stop the icount counting. Just return the counter value written by
812 * the supervisor to indicate that counter was not incremented.
814 if (!counter.started) {
815 *val = ctr_val;
816 return RISCV_EXCP_NONE;
817 } else {
818 /* Mark that the counter has been stopped */
819 counter.started = false;
824 * The kernel computes the perf delta by subtracting the current value from
825 * the value it initialized previously (ctr_val).
827 if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
828 riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
829 *val = get_ticks(upper_half) - ctr_prev + ctr_val;
830 } else {
831 *val = ctr_val;
834 return RISCV_EXCP_NONE;
837 static int read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val)
839 uint16_t ctr_index;
841 if (csrno >= CSR_MCYCLE && csrno <= CSR_MHPMCOUNTER31) {
842 ctr_index = csrno - CSR_MCYCLE;
843 } else if (csrno >= CSR_CYCLE && csrno <= CSR_HPMCOUNTER31) {
844 ctr_index = csrno - CSR_CYCLE;
845 } else {
846 return RISCV_EXCP_ILLEGAL_INST;
849 return riscv_pmu_read_ctr(env, val, false, ctr_index);
852 static int read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val)
854 uint16_t ctr_index;
856 if (csrno >= CSR_MCYCLEH && csrno <= CSR_MHPMCOUNTER31H) {
857 ctr_index = csrno - CSR_MCYCLEH;
858 } else if (csrno >= CSR_CYCLEH && csrno <= CSR_HPMCOUNTER31H) {
859 ctr_index = csrno - CSR_CYCLEH;
860 } else {
861 return RISCV_EXCP_ILLEGAL_INST;
864 return riscv_pmu_read_ctr(env, val, true, ctr_index);
867 static int read_scountovf(CPURISCVState *env, int csrno, target_ulong *val)
869 int mhpmevt_start = CSR_MHPMEVENT3 - CSR_MCOUNTINHIBIT;
870 int i;
871 *val = 0;
872 target_ulong *mhpm_evt_val;
873 uint64_t of_bit_mask;
875 if (riscv_cpu_mxl(env) == MXL_RV32) {
876 mhpm_evt_val = env->mhpmeventh_val;
877 of_bit_mask = MHPMEVENTH_BIT_OF;
878 } else {
879 mhpm_evt_val = env->mhpmevent_val;
880 of_bit_mask = MHPMEVENT_BIT_OF;
883 for (i = mhpmevt_start; i < RV_MAX_MHPMEVENTS; i++) {
884 if ((get_field(env->mcounteren, BIT(i))) &&
885 (mhpm_evt_val[i] & of_bit_mask)) {
886 *val |= BIT(i);
890 return RISCV_EXCP_NONE;
893 static RISCVException read_time(CPURISCVState *env, int csrno,
894 target_ulong *val)
896 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
898 if (!env->rdtime_fn) {
899 return RISCV_EXCP_ILLEGAL_INST;
902 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
903 return RISCV_EXCP_NONE;
906 static RISCVException read_timeh(CPURISCVState *env, int csrno,
907 target_ulong *val)
909 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
911 if (!env->rdtime_fn) {
912 return RISCV_EXCP_ILLEGAL_INST;
915 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
916 return RISCV_EXCP_NONE;
919 static RISCVException sstc(CPURISCVState *env, int csrno)
921 CPUState *cs = env_cpu(env);
922 RISCVCPU *cpu = RISCV_CPU(cs);
923 bool hmode_check = false;
925 if (!cpu->cfg.ext_sstc || !env->rdtime_fn) {
926 return RISCV_EXCP_ILLEGAL_INST;
929 if (env->priv == PRV_M) {
930 return RISCV_EXCP_NONE;
934 * No need of separate function for rv32 as menvcfg stores both menvcfg
935 * menvcfgh for RV32.
937 if (!(get_field(env->mcounteren, COUNTEREN_TM) &&
938 get_field(env->menvcfg, MENVCFG_STCE))) {
939 return RISCV_EXCP_ILLEGAL_INST;
942 if (riscv_cpu_virt_enabled(env)) {
943 if (!(get_field(env->hcounteren, COUNTEREN_TM) &&
944 get_field(env->henvcfg, HENVCFG_STCE))) {
945 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
949 if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) {
950 hmode_check = true;
953 return hmode_check ? hmode(env, csrno) : smode(env, csrno);
956 static RISCVException sstc_32(CPURISCVState *env, int csrno)
958 if (riscv_cpu_mxl(env) != MXL_RV32) {
959 return RISCV_EXCP_ILLEGAL_INST;
962 return sstc(env, csrno);
965 static RISCVException read_vstimecmp(CPURISCVState *env, int csrno,
966 target_ulong *val)
968 *val = env->vstimecmp;
970 return RISCV_EXCP_NONE;
973 static RISCVException read_vstimecmph(CPURISCVState *env, int csrno,
974 target_ulong *val)
976 *val = env->vstimecmp >> 32;
978 return RISCV_EXCP_NONE;
981 static RISCVException write_vstimecmp(CPURISCVState *env, int csrno,
982 target_ulong val)
984 RISCVCPU *cpu = env_archcpu(env);
986 if (riscv_cpu_mxl(env) == MXL_RV32) {
987 env->vstimecmp = deposit64(env->vstimecmp, 0, 32, (uint64_t)val);
988 } else {
989 env->vstimecmp = val;
992 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
993 env->htimedelta, MIP_VSTIP);
995 return RISCV_EXCP_NONE;
998 static RISCVException write_vstimecmph(CPURISCVState *env, int csrno,
999 target_ulong val)
1001 RISCVCPU *cpu = env_archcpu(env);
1003 env->vstimecmp = deposit64(env->vstimecmp, 32, 32, (uint64_t)val);
1004 riscv_timer_write_timecmp(cpu, env->vstimer, env->vstimecmp,
1005 env->htimedelta, MIP_VSTIP);
1007 return RISCV_EXCP_NONE;
1010 static RISCVException read_stimecmp(CPURISCVState *env, int csrno,
1011 target_ulong *val)
1013 if (riscv_cpu_virt_enabled(env)) {
1014 *val = env->vstimecmp;
1015 } else {
1016 *val = env->stimecmp;
1019 return RISCV_EXCP_NONE;
1022 static RISCVException read_stimecmph(CPURISCVState *env, int csrno,
1023 target_ulong *val)
1025 if (riscv_cpu_virt_enabled(env)) {
1026 *val = env->vstimecmp >> 32;
1027 } else {
1028 *val = env->stimecmp >> 32;
1031 return RISCV_EXCP_NONE;
1034 static RISCVException write_stimecmp(CPURISCVState *env, int csrno,
1035 target_ulong val)
1037 RISCVCPU *cpu = env_archcpu(env);
1039 if (riscv_cpu_virt_enabled(env)) {
1040 if (env->hvictl & HVICTL_VTI) {
1041 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1043 return write_vstimecmp(env, csrno, val);
1046 if (riscv_cpu_mxl(env) == MXL_RV32) {
1047 env->stimecmp = deposit64(env->stimecmp, 0, 32, (uint64_t)val);
1048 } else {
1049 env->stimecmp = val;
1052 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP);
1054 return RISCV_EXCP_NONE;
1057 static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
1058 target_ulong val)
1060 RISCVCPU *cpu = env_archcpu(env);
1062 if (riscv_cpu_virt_enabled(env)) {
1063 if (env->hvictl & HVICTL_VTI) {
1064 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1066 return write_vstimecmph(env, csrno, val);
1069 env->stimecmp = deposit64(env->stimecmp, 32, 32, (uint64_t)val);
1070 riscv_timer_write_timecmp(cpu, env->stimer, env->stimecmp, 0, MIP_STIP);
1072 return RISCV_EXCP_NONE;
1075 /* Machine constants */
1077 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
1078 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \
1079 MIP_LCOFIP))
1080 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
1081 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
1083 #define VSTOPI_NUM_SRCS 5
1085 static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
1086 VS_MODE_INTERRUPTS;
1087 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
1088 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
1089 HS_MODE_INTERRUPTS;
1090 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
1091 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
1092 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
1093 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
1094 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
1095 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
1096 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
1097 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
1098 (1ULL << (RISCV_EXCP_U_ECALL)) | \
1099 (1ULL << (RISCV_EXCP_S_ECALL)) | \
1100 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
1101 (1ULL << (RISCV_EXCP_M_ECALL)) | \
1102 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
1103 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
1104 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
1105 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
1106 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
1107 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
1108 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
1109 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
1110 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
1111 (1ULL << (RISCV_EXCP_VS_ECALL)) |
1112 (1ULL << (RISCV_EXCP_M_ECALL)) |
1113 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
1114 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
1115 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
1116 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
1117 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
1118 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
1119 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
1120 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP |
1121 SIP_LCOFIP;
1122 static const target_ulong hip_writable_mask = MIP_VSSIP;
1123 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
1124 static const target_ulong vsip_writable_mask = MIP_VSSIP;
1126 static const char valid_vm_1_10_32[16] = {
1127 [VM_1_10_MBARE] = 1,
1128 [VM_1_10_SV32] = 1
1131 static const char valid_vm_1_10_64[16] = {
1132 [VM_1_10_MBARE] = 1,
1133 [VM_1_10_SV39] = 1,
1134 [VM_1_10_SV48] = 1,
1135 [VM_1_10_SV57] = 1
1138 /* Machine Information Registers */
1139 static RISCVException read_zero(CPURISCVState *env, int csrno,
1140 target_ulong *val)
1142 *val = 0;
1143 return RISCV_EXCP_NONE;
1146 static RISCVException write_ignore(CPURISCVState *env, int csrno,
1147 target_ulong val)
1149 return RISCV_EXCP_NONE;
1152 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
1153 target_ulong *val)
1155 CPUState *cs = env_cpu(env);
1156 RISCVCPU *cpu = RISCV_CPU(cs);
1158 *val = cpu->cfg.mvendorid;
1159 return RISCV_EXCP_NONE;
1162 static RISCVException read_marchid(CPURISCVState *env, int csrno,
1163 target_ulong *val)
1165 CPUState *cs = env_cpu(env);
1166 RISCVCPU *cpu = RISCV_CPU(cs);
1168 *val = cpu->cfg.marchid;
1169 return RISCV_EXCP_NONE;
1172 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
1173 target_ulong *val)
1175 CPUState *cs = env_cpu(env);
1176 RISCVCPU *cpu = RISCV_CPU(cs);
1178 *val = cpu->cfg.mimpid;
1179 return RISCV_EXCP_NONE;
1182 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
1183 target_ulong *val)
1185 *val = env->mhartid;
1186 return RISCV_EXCP_NONE;
1189 /* Machine Trap Setup */
1191 /* We do not store SD explicitly, only compute it on demand. */
1192 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
1194 if ((status & MSTATUS_FS) == MSTATUS_FS ||
1195 (status & MSTATUS_VS) == MSTATUS_VS ||
1196 (status & MSTATUS_XS) == MSTATUS_XS) {
1197 switch (xl) {
1198 case MXL_RV32:
1199 return status | MSTATUS32_SD;
1200 case MXL_RV64:
1201 return status | MSTATUS64_SD;
1202 case MXL_RV128:
1203 return MSTATUSH128_SD;
1204 default:
1205 g_assert_not_reached();
1208 return status;
1211 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
1212 target_ulong *val)
1214 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
1215 return RISCV_EXCP_NONE;
1218 static int validate_vm(CPURISCVState *env, target_ulong vm)
1220 if (riscv_cpu_mxl(env) == MXL_RV32) {
1221 return valid_vm_1_10_32[vm & 0xf];
1222 } else {
1223 return valid_vm_1_10_64[vm & 0xf];
1227 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
1228 target_ulong val)
1230 uint64_t mstatus = env->mstatus;
1231 uint64_t mask = 0;
1232 RISCVMXL xl = riscv_cpu_mxl(env);
1234 /* flush tlb on mstatus fields that affect VM */
1235 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
1236 MSTATUS_MPRV | MSTATUS_SUM)) {
1237 tlb_flush(env_cpu(env));
1239 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
1240 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
1241 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
1242 MSTATUS_TW | MSTATUS_VS;
1244 if (riscv_has_ext(env, RVF)) {
1245 mask |= MSTATUS_FS;
1248 if (xl != MXL_RV32 || env->debugger) {
1250 * RV32: MPV and GVA are not in mstatus. The current plan is to
1251 * add them to mstatush. For now, we just don't support it.
1253 mask |= MSTATUS_MPV | MSTATUS_GVA;
1254 if ((val & MSTATUS64_UXL) != 0) {
1255 mask |= MSTATUS64_UXL;
1259 mstatus = (mstatus & ~mask) | (val & mask);
1261 if (xl > MXL_RV32) {
1262 /* SXL field is for now read only */
1263 mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
1265 env->mstatus = mstatus;
1266 env->xl = cpu_recompute_xl(env);
1268 return RISCV_EXCP_NONE;
1271 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
1272 target_ulong *val)
1274 *val = env->mstatus >> 32;
1275 return RISCV_EXCP_NONE;
1278 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
1279 target_ulong val)
1281 uint64_t valh = (uint64_t)val << 32;
1282 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
1284 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) {
1285 tlb_flush(env_cpu(env));
1288 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
1290 return RISCV_EXCP_NONE;
1293 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
1294 Int128 *val)
1296 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
1297 return RISCV_EXCP_NONE;
1300 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
1301 Int128 *val)
1303 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
1304 return RISCV_EXCP_NONE;
1307 static RISCVException read_misa(CPURISCVState *env, int csrno,
1308 target_ulong *val)
1310 target_ulong misa;
1312 switch (env->misa_mxl) {
1313 case MXL_RV32:
1314 misa = (target_ulong)MXL_RV32 << 30;
1315 break;
1316 #ifdef TARGET_RISCV64
1317 case MXL_RV64:
1318 misa = (target_ulong)MXL_RV64 << 62;
1319 break;
1320 #endif
1321 default:
1322 g_assert_not_reached();
1325 *val = misa | env->misa_ext;
1326 return RISCV_EXCP_NONE;
1329 static RISCVException write_misa(CPURISCVState *env, int csrno,
1330 target_ulong val)
1332 if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
1333 /* drop write to misa */
1334 return RISCV_EXCP_NONE;
1337 /* 'I' or 'E' must be present */
1338 if (!(val & (RVI | RVE))) {
1339 /* It is not, drop write to misa */
1340 return RISCV_EXCP_NONE;
1343 /* 'E' excludes all other extensions */
1344 if (val & RVE) {
1345 /* when we support 'E' we can do "val = RVE;" however
1346 * for now we just drop writes if 'E' is present.
1348 return RISCV_EXCP_NONE;
1352 * misa.MXL writes are not supported by QEMU.
1353 * Drop writes to those bits.
1356 /* Mask extensions that are not supported by this hart */
1357 val &= env->misa_ext_mask;
1359 /* Mask extensions that are not supported by QEMU */
1360 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
1362 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
1363 if ((val & RVD) && !(val & RVF)) {
1364 val &= ~RVD;
1367 /* Suppress 'C' if next instruction is not aligned
1368 * TODO: this should check next_pc
1370 if ((val & RVC) && (GETPC() & ~3) != 0) {
1371 val &= ~RVC;
1374 /* If nothing changed, do nothing. */
1375 if (val == env->misa_ext) {
1376 return RISCV_EXCP_NONE;
1379 if (!(val & RVF)) {
1380 env->mstatus &= ~MSTATUS_FS;
1383 /* flush translation cache */
1384 tb_flush(env_cpu(env));
1385 env->misa_ext = val;
1386 env->xl = riscv_cpu_mxl(env);
1387 return RISCV_EXCP_NONE;
1390 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
1391 target_ulong *val)
1393 *val = env->medeleg;
1394 return RISCV_EXCP_NONE;
1397 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
1398 target_ulong val)
1400 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
1401 return RISCV_EXCP_NONE;
1404 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
1405 uint64_t *ret_val,
1406 uint64_t new_val, uint64_t wr_mask)
1408 uint64_t mask = wr_mask & delegable_ints;
1410 if (ret_val) {
1411 *ret_val = env->mideleg;
1414 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
1416 if (riscv_has_ext(env, RVH)) {
1417 env->mideleg |= HS_MODE_INTERRUPTS;
1420 return RISCV_EXCP_NONE;
1423 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
1424 target_ulong *ret_val,
1425 target_ulong new_val, target_ulong wr_mask)
1427 uint64_t rval;
1428 RISCVException ret;
1430 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
1431 if (ret_val) {
1432 *ret_val = rval;
1435 return ret;
1438 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
1439 target_ulong *ret_val,
1440 target_ulong new_val,
1441 target_ulong wr_mask)
1443 uint64_t rval;
1444 RISCVException ret;
1446 ret = rmw_mideleg64(env, csrno, &rval,
1447 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1448 if (ret_val) {
1449 *ret_val = rval >> 32;
1452 return ret;
1455 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
1456 uint64_t *ret_val,
1457 uint64_t new_val, uint64_t wr_mask)
1459 uint64_t mask = wr_mask & all_ints;
1461 if (ret_val) {
1462 *ret_val = env->mie;
1465 env->mie = (env->mie & ~mask) | (new_val & mask);
1467 if (!riscv_has_ext(env, RVH)) {
1468 env->mie &= ~((uint64_t)MIP_SGEIP);
1471 return RISCV_EXCP_NONE;
1474 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
1475 target_ulong *ret_val,
1476 target_ulong new_val, target_ulong wr_mask)
1478 uint64_t rval;
1479 RISCVException ret;
1481 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
1482 if (ret_val) {
1483 *ret_val = rval;
1486 return ret;
1489 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
1490 target_ulong *ret_val,
1491 target_ulong new_val, target_ulong wr_mask)
1493 uint64_t rval;
1494 RISCVException ret;
1496 ret = rmw_mie64(env, csrno, &rval,
1497 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1498 if (ret_val) {
1499 *ret_val = rval >> 32;
1502 return ret;
1505 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
1507 int irq;
1508 uint8_t iprio;
1510 irq = riscv_cpu_mirq_pending(env);
1511 if (irq <= 0 || irq > 63) {
1512 *val = 0;
1513 } else {
1514 iprio = env->miprio[irq];
1515 if (!iprio) {
1516 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1517 iprio = IPRIO_MMAXIPRIO;
1520 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1521 *val |= iprio;
1524 return RISCV_EXCP_NONE;
1527 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1529 if (!riscv_cpu_virt_enabled(env)) {
1530 return csrno;
1533 switch (csrno) {
1534 case CSR_SISELECT:
1535 return CSR_VSISELECT;
1536 case CSR_SIREG:
1537 return CSR_VSIREG;
1538 case CSR_STOPEI:
1539 return CSR_VSTOPEI;
1540 default:
1541 return csrno;
1545 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
1546 target_ulong new_val, target_ulong wr_mask)
1548 target_ulong *iselect;
1550 /* Translate CSR number for VS-mode */
1551 csrno = aia_xlate_vs_csrno(env, csrno);
1553 /* Find the iselect CSR based on CSR number */
1554 switch (csrno) {
1555 case CSR_MISELECT:
1556 iselect = &env->miselect;
1557 break;
1558 case CSR_SISELECT:
1559 iselect = &env->siselect;
1560 break;
1561 case CSR_VSISELECT:
1562 iselect = &env->vsiselect;
1563 break;
1564 default:
1565 return RISCV_EXCP_ILLEGAL_INST;
1568 if (val) {
1569 *val = *iselect;
1572 wr_mask &= ISELECT_MASK;
1573 if (wr_mask) {
1574 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
1577 return RISCV_EXCP_NONE;
1580 static int rmw_iprio(target_ulong xlen,
1581 target_ulong iselect, uint8_t *iprio,
1582 target_ulong *val, target_ulong new_val,
1583 target_ulong wr_mask, int ext_irq_no)
1585 int i, firq, nirqs;
1586 target_ulong old_val;
1588 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1589 return -EINVAL;
1591 if (xlen != 32 && iselect & 0x1) {
1592 return -EINVAL;
1595 nirqs = 4 * (xlen / 32);
1596 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1598 old_val = 0;
1599 for (i = 0; i < nirqs; i++) {
1600 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1603 if (val) {
1604 *val = old_val;
1607 if (wr_mask) {
1608 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1609 for (i = 0; i < nirqs; i++) {
1611 * M-level and S-level external IRQ priority always read-only
1612 * zero. This means default priority order is always preferred
1613 * for M-level and S-level external IRQs.
1615 if ((firq + i) == ext_irq_no) {
1616 continue;
1618 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
1622 return 0;
1625 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
1626 target_ulong new_val, target_ulong wr_mask)
1628 bool virt;
1629 uint8_t *iprio;
1630 int ret = -EINVAL;
1631 target_ulong priv, isel, vgein;
1633 /* Translate CSR number for VS-mode */
1634 csrno = aia_xlate_vs_csrno(env, csrno);
1636 /* Decode register details from CSR number */
1637 virt = false;
1638 switch (csrno) {
1639 case CSR_MIREG:
1640 iprio = env->miprio;
1641 isel = env->miselect;
1642 priv = PRV_M;
1643 break;
1644 case CSR_SIREG:
1645 iprio = env->siprio;
1646 isel = env->siselect;
1647 priv = PRV_S;
1648 break;
1649 case CSR_VSIREG:
1650 iprio = env->hviprio;
1651 isel = env->vsiselect;
1652 priv = PRV_S;
1653 virt = true;
1654 break;
1655 default:
1656 goto done;
1659 /* Find the selected guest interrupt file */
1660 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1662 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
1663 /* Local interrupt priority registers not available for VS-mode */
1664 if (!virt) {
1665 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
1666 isel, iprio, val, new_val, wr_mask,
1667 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
1669 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
1670 /* IMSIC registers only available when machine implements it. */
1671 if (env->aia_ireg_rmw_fn[priv]) {
1672 /* Selected guest interrupt file should not be zero */
1673 if (virt && (!vgein || env->geilen < vgein)) {
1674 goto done;
1676 /* Call machine specific IMSIC register emulation */
1677 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1678 AIA_MAKE_IREG(isel, priv, virt, vgein,
1679 riscv_cpu_mxl_bits(env)),
1680 val, new_val, wr_mask);
1684 done:
1685 if (ret) {
1686 return (riscv_cpu_virt_enabled(env) && virt) ?
1687 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1689 return RISCV_EXCP_NONE;
1692 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
1693 target_ulong new_val, target_ulong wr_mask)
1695 bool virt;
1696 int ret = -EINVAL;
1697 target_ulong priv, vgein;
1699 /* Translate CSR number for VS-mode */
1700 csrno = aia_xlate_vs_csrno(env, csrno);
1702 /* Decode register details from CSR number */
1703 virt = false;
1704 switch (csrno) {
1705 case CSR_MTOPEI:
1706 priv = PRV_M;
1707 break;
1708 case CSR_STOPEI:
1709 priv = PRV_S;
1710 break;
1711 case CSR_VSTOPEI:
1712 priv = PRV_S;
1713 virt = true;
1714 break;
1715 default:
1716 goto done;
1719 /* IMSIC CSRs only available when machine implements IMSIC. */
1720 if (!env->aia_ireg_rmw_fn[priv]) {
1721 goto done;
1724 /* Find the selected guest interrupt file */
1725 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1727 /* Selected guest interrupt file should be valid */
1728 if (virt && (!vgein || env->geilen < vgein)) {
1729 goto done;
1732 /* Call machine specific IMSIC register emulation for TOPEI */
1733 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1734 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
1735 riscv_cpu_mxl_bits(env)),
1736 val, new_val, wr_mask);
1738 done:
1739 if (ret) {
1740 return (riscv_cpu_virt_enabled(env) && virt) ?
1741 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1743 return RISCV_EXCP_NONE;
1746 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
1747 target_ulong *val)
1749 *val = env->mtvec;
1750 return RISCV_EXCP_NONE;
1753 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
1754 target_ulong val)
1756 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1757 if ((val & 3) < 2) {
1758 env->mtvec = val;
1759 } else {
1760 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
1762 return RISCV_EXCP_NONE;
1765 static RISCVException read_mcountinhibit(CPURISCVState *env, int csrno,
1766 target_ulong *val)
1768 *val = env->mcountinhibit;
1769 return RISCV_EXCP_NONE;
1772 static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
1773 target_ulong val)
1775 int cidx;
1776 PMUCTRState *counter;
1778 env->mcountinhibit = val;
1780 /* Check if any other counter is also monitoring cycles/instructions */
1781 for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
1782 if (!get_field(env->mcountinhibit, BIT(cidx))) {
1783 counter = &env->pmu_ctrs[cidx];
1784 counter->started = true;
1788 return RISCV_EXCP_NONE;
1791 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
1792 target_ulong *val)
1794 *val = env->mcounteren;
1795 return RISCV_EXCP_NONE;
1798 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
1799 target_ulong val)
1801 env->mcounteren = val;
1802 return RISCV_EXCP_NONE;
1805 /* Machine Trap Handling */
1806 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
1807 Int128 *val)
1809 *val = int128_make128(env->mscratch, env->mscratchh);
1810 return RISCV_EXCP_NONE;
1813 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
1814 Int128 val)
1816 env->mscratch = int128_getlo(val);
1817 env->mscratchh = int128_gethi(val);
1818 return RISCV_EXCP_NONE;
1821 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
1822 target_ulong *val)
1824 *val = env->mscratch;
1825 return RISCV_EXCP_NONE;
1828 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
1829 target_ulong val)
1831 env->mscratch = val;
1832 return RISCV_EXCP_NONE;
1835 static RISCVException read_mepc(CPURISCVState *env, int csrno,
1836 target_ulong *val)
1838 *val = env->mepc;
1839 return RISCV_EXCP_NONE;
1842 static RISCVException write_mepc(CPURISCVState *env, int csrno,
1843 target_ulong val)
1845 env->mepc = val;
1846 return RISCV_EXCP_NONE;
1849 static RISCVException read_mcause(CPURISCVState *env, int csrno,
1850 target_ulong *val)
1852 *val = env->mcause;
1853 return RISCV_EXCP_NONE;
1856 static RISCVException write_mcause(CPURISCVState *env, int csrno,
1857 target_ulong val)
1859 env->mcause = val;
1860 return RISCV_EXCP_NONE;
1863 static RISCVException read_mtval(CPURISCVState *env, int csrno,
1864 target_ulong *val)
1866 *val = env->mtval;
1867 return RISCV_EXCP_NONE;
1870 static RISCVException write_mtval(CPURISCVState *env, int csrno,
1871 target_ulong val)
1873 env->mtval = val;
1874 return RISCV_EXCP_NONE;
1877 /* Execution environment configuration setup */
1878 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
1879 target_ulong *val)
1881 *val = env->menvcfg;
1882 return RISCV_EXCP_NONE;
1885 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
1886 target_ulong val)
1888 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
1890 if (riscv_cpu_mxl(env) == MXL_RV64) {
1891 mask |= MENVCFG_PBMTE | MENVCFG_STCE;
1893 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
1895 return RISCV_EXCP_NONE;
1898 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
1899 target_ulong *val)
1901 *val = env->menvcfg >> 32;
1902 return RISCV_EXCP_NONE;
1905 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
1906 target_ulong val)
1908 uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE;
1909 uint64_t valh = (uint64_t)val << 32;
1911 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
1913 return RISCV_EXCP_NONE;
1916 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
1917 target_ulong *val)
1919 RISCVException ret;
1921 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1922 if (ret != RISCV_EXCP_NONE) {
1923 return ret;
1926 *val = env->senvcfg;
1927 return RISCV_EXCP_NONE;
1930 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
1931 target_ulong val)
1933 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
1934 RISCVException ret;
1936 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1937 if (ret != RISCV_EXCP_NONE) {
1938 return ret;
1941 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
1942 return RISCV_EXCP_NONE;
1945 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
1946 target_ulong *val)
1948 RISCVException ret;
1950 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1951 if (ret != RISCV_EXCP_NONE) {
1952 return ret;
1955 *val = env->henvcfg;
1956 return RISCV_EXCP_NONE;
1959 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
1960 target_ulong val)
1962 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
1963 RISCVException ret;
1965 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1966 if (ret != RISCV_EXCP_NONE) {
1967 return ret;
1970 if (riscv_cpu_mxl(env) == MXL_RV64) {
1971 mask |= HENVCFG_PBMTE | HENVCFG_STCE;
1974 env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
1976 return RISCV_EXCP_NONE;
1979 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
1980 target_ulong *val)
1982 RISCVException ret;
1984 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
1985 if (ret != RISCV_EXCP_NONE) {
1986 return ret;
1989 *val = env->henvcfg >> 32;
1990 return RISCV_EXCP_NONE;
1993 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
1994 target_ulong val)
1996 uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE;
1997 uint64_t valh = (uint64_t)val << 32;
1998 RISCVException ret;
2000 ret = smstateen_acc_ok(env, 0, SMSTATEEN0_HSENVCFG);
2001 if (ret != RISCV_EXCP_NONE) {
2002 return ret;
2005 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
2006 return RISCV_EXCP_NONE;
2009 static RISCVException read_mstateen(CPURISCVState *env, int csrno,
2010 target_ulong *val)
2012 *val = env->mstateen[csrno - CSR_MSTATEEN0];
2014 return RISCV_EXCP_NONE;
2017 static RISCVException write_mstateen(CPURISCVState *env, int csrno,
2018 uint64_t wr_mask, target_ulong new_val)
2020 uint64_t *reg;
2022 reg = &env->mstateen[csrno - CSR_MSTATEEN0];
2023 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2025 return RISCV_EXCP_NONE;
2028 static RISCVException write_mstateen0(CPURISCVState *env, int csrno,
2029 target_ulong new_val)
2031 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2033 return write_mstateen(env, csrno, wr_mask, new_val);
2036 static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno,
2037 target_ulong new_val)
2039 return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2042 static RISCVException read_mstateenh(CPURISCVState *env, int csrno,
2043 target_ulong *val)
2045 *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32;
2047 return RISCV_EXCP_NONE;
2050 static RISCVException write_mstateenh(CPURISCVState *env, int csrno,
2051 uint64_t wr_mask, target_ulong new_val)
2053 uint64_t *reg, val;
2055 reg = &env->mstateen[csrno - CSR_MSTATEEN0H];
2056 val = (uint64_t)new_val << 32;
2057 val |= *reg & 0xFFFFFFFF;
2058 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2060 return RISCV_EXCP_NONE;
2063 static RISCVException write_mstateen0h(CPURISCVState *env, int csrno,
2064 target_ulong new_val)
2066 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2068 return write_mstateenh(env, csrno, wr_mask, new_val);
2071 static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno,
2072 target_ulong new_val)
2074 return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2077 static RISCVException read_hstateen(CPURISCVState *env, int csrno,
2078 target_ulong *val)
2080 int index = csrno - CSR_HSTATEEN0;
2082 *val = env->hstateen[index] & env->mstateen[index];
2084 return RISCV_EXCP_NONE;
2087 static RISCVException write_hstateen(CPURISCVState *env, int csrno,
2088 uint64_t mask, target_ulong new_val)
2090 int index = csrno - CSR_HSTATEEN0;
2091 uint64_t *reg, wr_mask;
2093 reg = &env->hstateen[index];
2094 wr_mask = env->mstateen[index] & mask;
2095 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2097 return RISCV_EXCP_NONE;
2100 static RISCVException write_hstateen0(CPURISCVState *env, int csrno,
2101 target_ulong new_val)
2103 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2105 return write_hstateen(env, csrno, wr_mask, new_val);
2108 static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno,
2109 target_ulong new_val)
2111 return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2114 static RISCVException read_hstateenh(CPURISCVState *env, int csrno,
2115 target_ulong *val)
2117 int index = csrno - CSR_HSTATEEN0H;
2119 *val = (env->hstateen[index] >> 32) & (env->mstateen[index] >> 32);
2121 return RISCV_EXCP_NONE;
2124 static RISCVException write_hstateenh(CPURISCVState *env, int csrno,
2125 uint64_t mask, target_ulong new_val)
2127 int index = csrno - CSR_HSTATEEN0H;
2128 uint64_t *reg, wr_mask, val;
2130 reg = &env->hstateen[index];
2131 val = (uint64_t)new_val << 32;
2132 val |= *reg & 0xFFFFFFFF;
2133 wr_mask = env->mstateen[index] & mask;
2134 *reg = (*reg & ~wr_mask) | (val & wr_mask);
2136 return RISCV_EXCP_NONE;
2139 static RISCVException write_hstateen0h(CPURISCVState *env, int csrno,
2140 target_ulong new_val)
2142 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2144 return write_hstateenh(env, csrno, wr_mask, new_val);
2147 static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno,
2148 target_ulong new_val)
2150 return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val);
2153 static RISCVException read_sstateen(CPURISCVState *env, int csrno,
2154 target_ulong *val)
2156 bool virt = riscv_cpu_virt_enabled(env);
2157 int index = csrno - CSR_SSTATEEN0;
2159 *val = env->sstateen[index] & env->mstateen[index];
2160 if (virt) {
2161 *val &= env->hstateen[index];
2164 return RISCV_EXCP_NONE;
2167 static RISCVException write_sstateen(CPURISCVState *env, int csrno,
2168 uint64_t mask, target_ulong new_val)
2170 bool virt = riscv_cpu_virt_enabled(env);
2171 int index = csrno - CSR_SSTATEEN0;
2172 uint64_t wr_mask;
2173 uint64_t *reg;
2175 wr_mask = env->mstateen[index] & mask;
2176 if (virt) {
2177 wr_mask &= env->hstateen[index];
2180 reg = &env->sstateen[index];
2181 *reg = (*reg & ~wr_mask) | (new_val & wr_mask);
2183 return RISCV_EXCP_NONE;
2186 static RISCVException write_sstateen0(CPURISCVState *env, int csrno,
2187 target_ulong new_val)
2189 uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG;
2191 return write_sstateen(env, csrno, wr_mask, new_val);
2194 static RISCVException write_sstateen_1_3(CPURISCVState *env, int csrno,
2195 target_ulong new_val)
2197 return write_sstateen(env, csrno, SMSTATEEN_STATEEN, new_val);
2200 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
2201 uint64_t *ret_val,
2202 uint64_t new_val, uint64_t wr_mask)
2204 RISCVCPU *cpu = env_archcpu(env);
2205 uint64_t old_mip, mask = wr_mask & delegable_ints;
2206 uint32_t gin;
2208 if (mask & MIP_SEIP) {
2209 env->software_seip = new_val & MIP_SEIP;
2210 new_val |= env->external_seip * MIP_SEIP;
2213 if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
2214 get_field(env->menvcfg, MENVCFG_STCE)) {
2215 /* sstc extension forbids STIP & VSTIP to be writeable in mip */
2216 mask = mask & ~(MIP_STIP | MIP_VSTIP);
2219 if (mask) {
2220 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask));
2221 } else {
2222 old_mip = env->mip;
2225 if (csrno != CSR_HVIP) {
2226 gin = get_field(env->hstatus, HSTATUS_VGEIN);
2227 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
2228 old_mip |= env->vstime_irq ? MIP_VSTIP : 0;
2231 if (ret_val) {
2232 *ret_val = old_mip;
2235 return RISCV_EXCP_NONE;
2238 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
2239 target_ulong *ret_val,
2240 target_ulong new_val, target_ulong wr_mask)
2242 uint64_t rval;
2243 RISCVException ret;
2245 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
2246 if (ret_val) {
2247 *ret_val = rval;
2250 return ret;
2253 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
2254 target_ulong *ret_val,
2255 target_ulong new_val, target_ulong wr_mask)
2257 uint64_t rval;
2258 RISCVException ret;
2260 ret = rmw_mip64(env, csrno, &rval,
2261 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2262 if (ret_val) {
2263 *ret_val = rval >> 32;
2266 return ret;
2269 /* Supervisor Trap Setup */
2270 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
2271 Int128 *val)
2273 uint64_t mask = sstatus_v1_10_mask;
2274 uint64_t sstatus = env->mstatus & mask;
2275 if (env->xl != MXL_RV32 || env->debugger) {
2276 mask |= SSTATUS64_UXL;
2279 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
2280 return RISCV_EXCP_NONE;
2283 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
2284 target_ulong *val)
2286 target_ulong mask = (sstatus_v1_10_mask);
2287 if (env->xl != MXL_RV32 || env->debugger) {
2288 mask |= SSTATUS64_UXL;
2290 /* TODO: Use SXL not MXL. */
2291 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
2292 return RISCV_EXCP_NONE;
2295 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
2296 target_ulong val)
2298 target_ulong mask = (sstatus_v1_10_mask);
2300 if (env->xl != MXL_RV32 || env->debugger) {
2301 if ((val & SSTATUS64_UXL) != 0) {
2302 mask |= SSTATUS64_UXL;
2305 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
2306 return write_mstatus(env, CSR_MSTATUS, newval);
2309 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
2310 uint64_t *ret_val,
2311 uint64_t new_val, uint64_t wr_mask)
2313 RISCVException ret;
2314 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
2316 /* Bring VS-level bits to correct position */
2317 new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1;
2318 wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1;
2320 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
2321 if (ret_val) {
2322 *ret_val = (rval & mask) >> 1;
2325 return ret;
2328 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
2329 target_ulong *ret_val,
2330 target_ulong new_val, target_ulong wr_mask)
2332 uint64_t rval;
2333 RISCVException ret;
2335 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
2336 if (ret_val) {
2337 *ret_val = rval;
2340 return ret;
2343 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
2344 target_ulong *ret_val,
2345 target_ulong new_val, target_ulong wr_mask)
2347 uint64_t rval;
2348 RISCVException ret;
2350 ret = rmw_vsie64(env, csrno, &rval,
2351 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2352 if (ret_val) {
2353 *ret_val = rval >> 32;
2356 return ret;
2359 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
2360 uint64_t *ret_val,
2361 uint64_t new_val, uint64_t wr_mask)
2363 RISCVException ret;
2364 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
2366 if (riscv_cpu_virt_enabled(env)) {
2367 if (env->hvictl & HVICTL_VTI) {
2368 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2370 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
2371 } else {
2372 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
2375 if (ret_val) {
2376 *ret_val &= mask;
2379 return ret;
2382 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
2383 target_ulong *ret_val,
2384 target_ulong new_val, target_ulong wr_mask)
2386 uint64_t rval;
2387 RISCVException ret;
2389 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
2390 if (ret == RISCV_EXCP_NONE && ret_val) {
2391 *ret_val = rval;
2394 return ret;
2397 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
2398 target_ulong *ret_val,
2399 target_ulong new_val, target_ulong wr_mask)
2401 uint64_t rval;
2402 RISCVException ret;
2404 ret = rmw_sie64(env, csrno, &rval,
2405 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2406 if (ret_val) {
2407 *ret_val = rval >> 32;
2410 return ret;
2413 static RISCVException read_stvec(CPURISCVState *env, int csrno,
2414 target_ulong *val)
2416 *val = env->stvec;
2417 return RISCV_EXCP_NONE;
2420 static RISCVException write_stvec(CPURISCVState *env, int csrno,
2421 target_ulong val)
2423 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
2424 if ((val & 3) < 2) {
2425 env->stvec = val;
2426 } else {
2427 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
2429 return RISCV_EXCP_NONE;
2432 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
2433 target_ulong *val)
2435 *val = env->scounteren;
2436 return RISCV_EXCP_NONE;
2439 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
2440 target_ulong val)
2442 env->scounteren = val;
2443 return RISCV_EXCP_NONE;
2446 /* Supervisor Trap Handling */
2447 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
2448 Int128 *val)
2450 *val = int128_make128(env->sscratch, env->sscratchh);
2451 return RISCV_EXCP_NONE;
2454 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
2455 Int128 val)
2457 env->sscratch = int128_getlo(val);
2458 env->sscratchh = int128_gethi(val);
2459 return RISCV_EXCP_NONE;
2462 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
2463 target_ulong *val)
2465 *val = env->sscratch;
2466 return RISCV_EXCP_NONE;
2469 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
2470 target_ulong val)
2472 env->sscratch = val;
2473 return RISCV_EXCP_NONE;
2476 static RISCVException read_sepc(CPURISCVState *env, int csrno,
2477 target_ulong *val)
2479 *val = env->sepc;
2480 return RISCV_EXCP_NONE;
2483 static RISCVException write_sepc(CPURISCVState *env, int csrno,
2484 target_ulong val)
2486 env->sepc = val;
2487 return RISCV_EXCP_NONE;
2490 static RISCVException read_scause(CPURISCVState *env, int csrno,
2491 target_ulong *val)
2493 *val = env->scause;
2494 return RISCV_EXCP_NONE;
2497 static RISCVException write_scause(CPURISCVState *env, int csrno,
2498 target_ulong val)
2500 env->scause = val;
2501 return RISCV_EXCP_NONE;
2504 static RISCVException read_stval(CPURISCVState *env, int csrno,
2505 target_ulong *val)
2507 *val = env->stval;
2508 return RISCV_EXCP_NONE;
2511 static RISCVException write_stval(CPURISCVState *env, int csrno,
2512 target_ulong val)
2514 env->stval = val;
2515 return RISCV_EXCP_NONE;
2518 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
2519 uint64_t *ret_val,
2520 uint64_t new_val, uint64_t wr_mask)
2522 RISCVException ret;
2523 uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
2525 /* Bring VS-level bits to correct position */
2526 new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1;
2527 wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1;
2529 ret = rmw_mip64(env, csrno, &rval, new_val,
2530 wr_mask & mask & vsip_writable_mask);
2531 if (ret_val) {
2532 *ret_val = (rval & mask) >> 1;
2535 return ret;
2538 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
2539 target_ulong *ret_val,
2540 target_ulong new_val, target_ulong wr_mask)
2542 uint64_t rval;
2543 RISCVException ret;
2545 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
2546 if (ret_val) {
2547 *ret_val = rval;
2550 return ret;
2553 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
2554 target_ulong *ret_val,
2555 target_ulong new_val, target_ulong wr_mask)
2557 uint64_t rval;
2558 RISCVException ret;
2560 ret = rmw_vsip64(env, csrno, &rval,
2561 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2562 if (ret_val) {
2563 *ret_val = rval >> 32;
2566 return ret;
2569 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
2570 uint64_t *ret_val,
2571 uint64_t new_val, uint64_t wr_mask)
2573 RISCVException ret;
2574 uint64_t mask = env->mideleg & sip_writable_mask;
2576 if (riscv_cpu_virt_enabled(env)) {
2577 if (env->hvictl & HVICTL_VTI) {
2578 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
2580 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
2581 } else {
2582 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
2585 if (ret_val) {
2586 *ret_val &= env->mideleg & S_MODE_INTERRUPTS;
2589 return ret;
2592 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
2593 target_ulong *ret_val,
2594 target_ulong new_val, target_ulong wr_mask)
2596 uint64_t rval;
2597 RISCVException ret;
2599 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
2600 if (ret_val) {
2601 *ret_val = rval;
2604 return ret;
2607 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
2608 target_ulong *ret_val,
2609 target_ulong new_val, target_ulong wr_mask)
2611 uint64_t rval;
2612 RISCVException ret;
2614 ret = rmw_sip64(env, csrno, &rval,
2615 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2616 if (ret_val) {
2617 *ret_val = rval >> 32;
2620 return ret;
2623 /* Supervisor Protection and Translation */
2624 static RISCVException read_satp(CPURISCVState *env, int csrno,
2625 target_ulong *val)
2627 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2628 *val = 0;
2629 return RISCV_EXCP_NONE;
2632 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2633 return RISCV_EXCP_ILLEGAL_INST;
2634 } else {
2635 *val = env->satp;
2638 return RISCV_EXCP_NONE;
2641 static RISCVException write_satp(CPURISCVState *env, int csrno,
2642 target_ulong val)
2644 target_ulong vm, mask;
2646 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2647 return RISCV_EXCP_NONE;
2650 if (riscv_cpu_mxl(env) == MXL_RV32) {
2651 vm = validate_vm(env, get_field(val, SATP32_MODE));
2652 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
2653 } else {
2654 vm = validate_vm(env, get_field(val, SATP64_MODE));
2655 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
2658 if (vm && mask) {
2659 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2660 return RISCV_EXCP_ILLEGAL_INST;
2661 } else {
2663 * The ISA defines SATP.MODE=Bare as "no translation", but we still
2664 * pass these through QEMU's TLB emulation as it improves
2665 * performance. Flushing the TLB on SATP writes with paging
2666 * enabled avoids leaking those invalid cached mappings.
2668 tlb_flush(env_cpu(env));
2669 env->satp = val;
2672 return RISCV_EXCP_NONE;
2675 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
2677 int irq, ret;
2678 target_ulong topei;
2679 uint64_t vseip, vsgein;
2680 uint32_t iid, iprio, hviid, hviprio, gein;
2681 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
2683 gein = get_field(env->hstatus, HSTATUS_VGEIN);
2684 hviid = get_field(env->hvictl, HVICTL_IID);
2685 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
2687 if (gein) {
2688 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
2689 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
2690 if (gein <= env->geilen && vseip) {
2691 siid[scount] = IRQ_S_EXT;
2692 siprio[scount] = IPRIO_MMAXIPRIO + 1;
2693 if (env->aia_ireg_rmw_fn[PRV_S]) {
2695 * Call machine specific IMSIC register emulation for
2696 * reading TOPEI.
2698 ret = env->aia_ireg_rmw_fn[PRV_S](
2699 env->aia_ireg_rmw_fn_arg[PRV_S],
2700 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
2701 riscv_cpu_mxl_bits(env)),
2702 &topei, 0, 0);
2703 if (!ret && topei) {
2704 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
2707 scount++;
2709 } else {
2710 if (hviid == IRQ_S_EXT && hviprio) {
2711 siid[scount] = IRQ_S_EXT;
2712 siprio[scount] = hviprio;
2713 scount++;
2717 if (env->hvictl & HVICTL_VTI) {
2718 if (hviid != IRQ_S_EXT) {
2719 siid[scount] = hviid;
2720 siprio[scount] = hviprio;
2721 scount++;
2723 } else {
2724 irq = riscv_cpu_vsirq_pending(env);
2725 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
2726 siid[scount] = irq;
2727 siprio[scount] = env->hviprio[irq];
2728 scount++;
2732 iid = 0;
2733 iprio = UINT_MAX;
2734 for (s = 0; s < scount; s++) {
2735 if (siprio[s] < iprio) {
2736 iid = siid[s];
2737 iprio = siprio[s];
2741 if (iid) {
2742 if (env->hvictl & HVICTL_IPRIOM) {
2743 if (iprio > IPRIO_MMAXIPRIO) {
2744 iprio = IPRIO_MMAXIPRIO;
2746 if (!iprio) {
2747 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
2748 iprio = IPRIO_MMAXIPRIO;
2751 } else {
2752 iprio = 1;
2754 } else {
2755 iprio = 0;
2758 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2759 *val |= iprio;
2760 return RISCV_EXCP_NONE;
2763 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
2765 int irq;
2766 uint8_t iprio;
2768 if (riscv_cpu_virt_enabled(env)) {
2769 return read_vstopi(env, CSR_VSTOPI, val);
2772 irq = riscv_cpu_sirq_pending(env);
2773 if (irq <= 0 || irq > 63) {
2774 *val = 0;
2775 } else {
2776 iprio = env->siprio[irq];
2777 if (!iprio) {
2778 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
2779 iprio = IPRIO_MMAXIPRIO;
2782 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2783 *val |= iprio;
2786 return RISCV_EXCP_NONE;
2789 /* Hypervisor Extensions */
2790 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
2791 target_ulong *val)
2793 *val = env->hstatus;
2794 if (riscv_cpu_mxl(env) != MXL_RV32) {
2795 /* We only support 64-bit VSXL */
2796 *val = set_field(*val, HSTATUS_VSXL, 2);
2798 /* We only support little endian */
2799 *val = set_field(*val, HSTATUS_VSBE, 0);
2800 return RISCV_EXCP_NONE;
2803 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
2804 target_ulong val)
2806 env->hstatus = val;
2807 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
2808 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options.");
2810 if (get_field(val, HSTATUS_VSBE) != 0) {
2811 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
2813 return RISCV_EXCP_NONE;
2816 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
2817 target_ulong *val)
2819 *val = env->hedeleg;
2820 return RISCV_EXCP_NONE;
2823 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
2824 target_ulong val)
2826 env->hedeleg = val & vs_delegable_excps;
2827 return RISCV_EXCP_NONE;
2830 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
2831 uint64_t *ret_val,
2832 uint64_t new_val, uint64_t wr_mask)
2834 uint64_t mask = wr_mask & vs_delegable_ints;
2836 if (ret_val) {
2837 *ret_val = env->hideleg & vs_delegable_ints;
2840 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
2841 return RISCV_EXCP_NONE;
2844 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
2845 target_ulong *ret_val,
2846 target_ulong new_val, target_ulong wr_mask)
2848 uint64_t rval;
2849 RISCVException ret;
2851 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
2852 if (ret_val) {
2853 *ret_val = rval;
2856 return ret;
2859 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
2860 target_ulong *ret_val,
2861 target_ulong new_val, target_ulong wr_mask)
2863 uint64_t rval;
2864 RISCVException ret;
2866 ret = rmw_hideleg64(env, csrno, &rval,
2867 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2868 if (ret_val) {
2869 *ret_val = rval >> 32;
2872 return ret;
2875 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
2876 uint64_t *ret_val,
2877 uint64_t new_val, uint64_t wr_mask)
2879 RISCVException ret;
2881 ret = rmw_mip64(env, csrno, ret_val, new_val,
2882 wr_mask & hvip_writable_mask);
2883 if (ret_val) {
2884 *ret_val &= VS_MODE_INTERRUPTS;
2887 return ret;
2890 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
2891 target_ulong *ret_val,
2892 target_ulong new_val, target_ulong wr_mask)
2894 uint64_t rval;
2895 RISCVException ret;
2897 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
2898 if (ret_val) {
2899 *ret_val = rval;
2902 return ret;
2905 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
2906 target_ulong *ret_val,
2907 target_ulong new_val, target_ulong wr_mask)
2909 uint64_t rval;
2910 RISCVException ret;
2912 ret = rmw_hvip64(env, csrno, &rval,
2913 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2914 if (ret_val) {
2915 *ret_val = rval >> 32;
2918 return ret;
2921 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
2922 target_ulong *ret_value,
2923 target_ulong new_value, target_ulong write_mask)
2925 int ret = rmw_mip(env, csrno, ret_value, new_value,
2926 write_mask & hip_writable_mask);
2928 if (ret_value) {
2929 *ret_value &= HS_MODE_INTERRUPTS;
2931 return ret;
2934 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
2935 target_ulong *ret_val,
2936 target_ulong new_val, target_ulong wr_mask)
2938 uint64_t rval;
2939 RISCVException ret;
2941 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
2942 if (ret_val) {
2943 *ret_val = rval & HS_MODE_INTERRUPTS;
2946 return ret;
2949 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
2950 target_ulong *val)
2952 *val = env->hcounteren;
2953 return RISCV_EXCP_NONE;
2956 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
2957 target_ulong val)
2959 env->hcounteren = val;
2960 return RISCV_EXCP_NONE;
2963 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
2964 target_ulong *val)
2966 if (val) {
2967 *val = env->hgeie;
2969 return RISCV_EXCP_NONE;
2972 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
2973 target_ulong val)
2975 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2976 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
2977 env->hgeie = val;
2978 /* Update mip.SGEIP bit */
2979 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP,
2980 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
2981 return RISCV_EXCP_NONE;
2984 static RISCVException read_htval(CPURISCVState *env, int csrno,
2985 target_ulong *val)
2987 *val = env->htval;
2988 return RISCV_EXCP_NONE;
2991 static RISCVException write_htval(CPURISCVState *env, int csrno,
2992 target_ulong val)
2994 env->htval = val;
2995 return RISCV_EXCP_NONE;
2998 static RISCVException read_htinst(CPURISCVState *env, int csrno,
2999 target_ulong *val)
3001 *val = env->htinst;
3002 return RISCV_EXCP_NONE;
3005 static RISCVException write_htinst(CPURISCVState *env, int csrno,
3006 target_ulong val)
3008 return RISCV_EXCP_NONE;
3011 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
3012 target_ulong *val)
3014 if (val) {
3015 *val = env->hgeip;
3017 return RISCV_EXCP_NONE;
3020 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
3021 target_ulong *val)
3023 *val = env->hgatp;
3024 return RISCV_EXCP_NONE;
3027 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
3028 target_ulong val)
3030 env->hgatp = val;
3031 return RISCV_EXCP_NONE;
3034 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
3035 target_ulong *val)
3037 if (!env->rdtime_fn) {
3038 return RISCV_EXCP_ILLEGAL_INST;
3041 *val = env->htimedelta;
3042 return RISCV_EXCP_NONE;
3045 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
3046 target_ulong val)
3048 if (!env->rdtime_fn) {
3049 return RISCV_EXCP_ILLEGAL_INST;
3052 if (riscv_cpu_mxl(env) == MXL_RV32) {
3053 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
3054 } else {
3055 env->htimedelta = val;
3057 return RISCV_EXCP_NONE;
3060 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
3061 target_ulong *val)
3063 if (!env->rdtime_fn) {
3064 return RISCV_EXCP_ILLEGAL_INST;
3067 *val = env->htimedelta >> 32;
3068 return RISCV_EXCP_NONE;
3071 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
3072 target_ulong val)
3074 if (!env->rdtime_fn) {
3075 return RISCV_EXCP_ILLEGAL_INST;
3078 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
3079 return RISCV_EXCP_NONE;
3082 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
3084 *val = env->hvictl;
3085 return RISCV_EXCP_NONE;
3088 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
3090 env->hvictl = val & HVICTL_VALID_MASK;
3091 return RISCV_EXCP_NONE;
3094 static int read_hvipriox(CPURISCVState *env, int first_index,
3095 uint8_t *iprio, target_ulong *val)
3097 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3099 /* First index has to be a multiple of number of irqs per register */
3100 if (first_index % num_irqs) {
3101 return (riscv_cpu_virt_enabled(env)) ?
3102 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3105 /* Fill-up return value */
3106 *val = 0;
3107 for (i = 0; i < num_irqs; i++) {
3108 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3109 continue;
3111 if (rdzero) {
3112 continue;
3114 *val |= ((target_ulong)iprio[irq]) << (i * 8);
3117 return RISCV_EXCP_NONE;
3120 static int write_hvipriox(CPURISCVState *env, int first_index,
3121 uint8_t *iprio, target_ulong val)
3123 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
3125 /* First index has to be a multiple of number of irqs per register */
3126 if (first_index % num_irqs) {
3127 return (riscv_cpu_virt_enabled(env)) ?
3128 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
3131 /* Fill-up priority arrary */
3132 for (i = 0; i < num_irqs; i++) {
3133 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
3134 continue;
3136 if (rdzero) {
3137 iprio[irq] = 0;
3138 } else {
3139 iprio[irq] = (val >> (i * 8)) & 0xff;
3143 return RISCV_EXCP_NONE;
3146 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
3148 return read_hvipriox(env, 0, env->hviprio, val);
3151 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
3153 return write_hvipriox(env, 0, env->hviprio, val);
3156 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
3158 return read_hvipriox(env, 4, env->hviprio, val);
3161 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
3163 return write_hvipriox(env, 4, env->hviprio, val);
3166 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
3168 return read_hvipriox(env, 8, env->hviprio, val);
3171 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
3173 return write_hvipriox(env, 8, env->hviprio, val);
3176 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
3178 return read_hvipriox(env, 12, env->hviprio, val);
3181 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
3183 return write_hvipriox(env, 12, env->hviprio, val);
3186 /* Virtual CSR Registers */
3187 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
3188 target_ulong *val)
3190 *val = env->vsstatus;
3191 return RISCV_EXCP_NONE;
3194 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
3195 target_ulong val)
3197 uint64_t mask = (target_ulong)-1;
3198 if ((val & VSSTATUS64_UXL) == 0) {
3199 mask &= ~VSSTATUS64_UXL;
3201 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
3202 return RISCV_EXCP_NONE;
3205 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
3207 *val = env->vstvec;
3208 return RISCV_EXCP_NONE;
3211 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
3212 target_ulong val)
3214 env->vstvec = val;
3215 return RISCV_EXCP_NONE;
3218 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
3219 target_ulong *val)
3221 *val = env->vsscratch;
3222 return RISCV_EXCP_NONE;
3225 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
3226 target_ulong val)
3228 env->vsscratch = val;
3229 return RISCV_EXCP_NONE;
3232 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
3233 target_ulong *val)
3235 *val = env->vsepc;
3236 return RISCV_EXCP_NONE;
3239 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
3240 target_ulong val)
3242 env->vsepc = val;
3243 return RISCV_EXCP_NONE;
3246 static RISCVException read_vscause(CPURISCVState *env, int csrno,
3247 target_ulong *val)
3249 *val = env->vscause;
3250 return RISCV_EXCP_NONE;
3253 static RISCVException write_vscause(CPURISCVState *env, int csrno,
3254 target_ulong val)
3256 env->vscause = val;
3257 return RISCV_EXCP_NONE;
3260 static RISCVException read_vstval(CPURISCVState *env, int csrno,
3261 target_ulong *val)
3263 *val = env->vstval;
3264 return RISCV_EXCP_NONE;
3267 static RISCVException write_vstval(CPURISCVState *env, int csrno,
3268 target_ulong val)
3270 env->vstval = val;
3271 return RISCV_EXCP_NONE;
3274 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
3275 target_ulong *val)
3277 *val = env->vsatp;
3278 return RISCV_EXCP_NONE;
3281 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
3282 target_ulong val)
3284 env->vsatp = val;
3285 return RISCV_EXCP_NONE;
3288 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
3289 target_ulong *val)
3291 *val = env->mtval2;
3292 return RISCV_EXCP_NONE;
3295 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
3296 target_ulong val)
3298 env->mtval2 = val;
3299 return RISCV_EXCP_NONE;
3302 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
3303 target_ulong *val)
3305 *val = env->mtinst;
3306 return RISCV_EXCP_NONE;
3309 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
3310 target_ulong val)
3312 env->mtinst = val;
3313 return RISCV_EXCP_NONE;
3316 /* Physical Memory Protection */
3317 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
3318 target_ulong *val)
3320 *val = mseccfg_csr_read(env);
3321 return RISCV_EXCP_NONE;
3324 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
3325 target_ulong val)
3327 mseccfg_csr_write(env, val);
3328 return RISCV_EXCP_NONE;
3331 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
3333 /* TODO: RV128 restriction check */
3334 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
3335 return false;
3337 return true;
3340 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
3341 target_ulong *val)
3343 uint32_t reg_index = csrno - CSR_PMPCFG0;
3345 if (!check_pmp_reg_index(env, reg_index)) {
3346 return RISCV_EXCP_ILLEGAL_INST;
3348 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
3349 return RISCV_EXCP_NONE;
3352 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
3353 target_ulong val)
3355 uint32_t reg_index = csrno - CSR_PMPCFG0;
3357 if (!check_pmp_reg_index(env, reg_index)) {
3358 return RISCV_EXCP_ILLEGAL_INST;
3360 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
3361 return RISCV_EXCP_NONE;
3364 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
3365 target_ulong *val)
3367 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
3368 return RISCV_EXCP_NONE;
3371 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
3372 target_ulong val)
3374 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
3375 return RISCV_EXCP_NONE;
3378 static RISCVException read_tselect(CPURISCVState *env, int csrno,
3379 target_ulong *val)
3381 *val = tselect_csr_read(env);
3382 return RISCV_EXCP_NONE;
3385 static RISCVException write_tselect(CPURISCVState *env, int csrno,
3386 target_ulong val)
3388 tselect_csr_write(env, val);
3389 return RISCV_EXCP_NONE;
3392 static RISCVException read_tdata(CPURISCVState *env, int csrno,
3393 target_ulong *val)
3395 /* return 0 in tdata1 to end the trigger enumeration */
3396 if (env->trigger_cur >= RV_MAX_TRIGGERS && csrno == CSR_TDATA1) {
3397 *val = 0;
3398 return RISCV_EXCP_NONE;
3401 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3402 return RISCV_EXCP_ILLEGAL_INST;
3405 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
3406 return RISCV_EXCP_NONE;
3409 static RISCVException write_tdata(CPURISCVState *env, int csrno,
3410 target_ulong val)
3412 if (!tdata_available(env, csrno - CSR_TDATA1)) {
3413 return RISCV_EXCP_ILLEGAL_INST;
3416 tdata_csr_write(env, csrno - CSR_TDATA1, val);
3417 return RISCV_EXCP_NONE;
3420 static RISCVException read_tinfo(CPURISCVState *env, int csrno,
3421 target_ulong *val)
3423 *val = tinfo_csr_read(env);
3424 return RISCV_EXCP_NONE;
3428 * Functions to access Pointer Masking feature registers
3429 * We have to check if current priv lvl could modify
3430 * csr in given mode
3432 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
3434 int csr_priv = get_field(csrno, 0x300);
3435 int pm_current;
3437 if (env->debugger) {
3438 return false;
3441 * If priv lvls differ that means we're accessing csr from higher priv lvl,
3442 * so allow the access
3444 if (env->priv != csr_priv) {
3445 return false;
3447 switch (env->priv) {
3448 case PRV_M:
3449 pm_current = get_field(env->mmte, M_PM_CURRENT);
3450 break;
3451 case PRV_S:
3452 pm_current = get_field(env->mmte, S_PM_CURRENT);
3453 break;
3454 case PRV_U:
3455 pm_current = get_field(env->mmte, U_PM_CURRENT);
3456 break;
3457 default:
3458 g_assert_not_reached();
3460 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
3461 return !pm_current;
3464 static RISCVException read_mmte(CPURISCVState *env, int csrno,
3465 target_ulong *val)
3467 *val = env->mmte & MMTE_MASK;
3468 return RISCV_EXCP_NONE;
3471 static RISCVException write_mmte(CPURISCVState *env, int csrno,
3472 target_ulong val)
3474 uint64_t mstatus;
3475 target_ulong wpri_val = val & MMTE_MASK;
3477 if (val != wpri_val) {
3478 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3479 "MMTE: WPRI violation written 0x", val,
3480 "vs expected 0x", wpri_val);
3482 /* for machine mode pm.current is hardwired to 1 */
3483 wpri_val |= MMTE_M_PM_CURRENT;
3485 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
3486 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
3487 env->mmte = wpri_val | PM_EXT_DIRTY;
3488 riscv_cpu_update_mask(env);
3490 /* Set XS and SD bits, since PM CSRs are dirty */
3491 mstatus = env->mstatus | MSTATUS_XS;
3492 write_mstatus(env, csrno, mstatus);
3493 return RISCV_EXCP_NONE;
3496 static RISCVException read_smte(CPURISCVState *env, int csrno,
3497 target_ulong *val)
3499 *val = env->mmte & SMTE_MASK;
3500 return RISCV_EXCP_NONE;
3503 static RISCVException write_smte(CPURISCVState *env, int csrno,
3504 target_ulong val)
3506 target_ulong wpri_val = val & SMTE_MASK;
3508 if (val != wpri_val) {
3509 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3510 "SMTE: WPRI violation written 0x", val,
3511 "vs expected 0x", wpri_val);
3514 /* if pm.current==0 we can't modify current PM CSRs */
3515 if (check_pm_current_disabled(env, csrno)) {
3516 return RISCV_EXCP_NONE;
3519 wpri_val |= (env->mmte & ~SMTE_MASK);
3520 write_mmte(env, csrno, wpri_val);
3521 return RISCV_EXCP_NONE;
3524 static RISCVException read_umte(CPURISCVState *env, int csrno,
3525 target_ulong *val)
3527 *val = env->mmte & UMTE_MASK;
3528 return RISCV_EXCP_NONE;
3531 static RISCVException write_umte(CPURISCVState *env, int csrno,
3532 target_ulong val)
3534 target_ulong wpri_val = val & UMTE_MASK;
3536 if (val != wpri_val) {
3537 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
3538 "UMTE: WPRI violation written 0x", val,
3539 "vs expected 0x", wpri_val);
3542 if (check_pm_current_disabled(env, csrno)) {
3543 return RISCV_EXCP_NONE;
3546 wpri_val |= (env->mmte & ~UMTE_MASK);
3547 write_mmte(env, csrno, wpri_val);
3548 return RISCV_EXCP_NONE;
3551 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
3552 target_ulong *val)
3554 *val = env->mpmmask;
3555 return RISCV_EXCP_NONE;
3558 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
3559 target_ulong val)
3561 uint64_t mstatus;
3563 env->mpmmask = val;
3564 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3565 env->cur_pmmask = val;
3567 env->mmte |= PM_EXT_DIRTY;
3569 /* Set XS and SD bits, since PM CSRs are dirty */
3570 mstatus = env->mstatus | MSTATUS_XS;
3571 write_mstatus(env, csrno, mstatus);
3572 return RISCV_EXCP_NONE;
3575 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
3576 target_ulong *val)
3578 *val = env->spmmask;
3579 return RISCV_EXCP_NONE;
3582 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
3583 target_ulong val)
3585 uint64_t mstatus;
3587 /* if pm.current==0 we can't modify current PM CSRs */
3588 if (check_pm_current_disabled(env, csrno)) {
3589 return RISCV_EXCP_NONE;
3591 env->spmmask = val;
3592 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3593 env->cur_pmmask = val;
3595 env->mmte |= PM_EXT_DIRTY;
3597 /* Set XS and SD bits, since PM CSRs are dirty */
3598 mstatus = env->mstatus | MSTATUS_XS;
3599 write_mstatus(env, csrno, mstatus);
3600 return RISCV_EXCP_NONE;
3603 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
3604 target_ulong *val)
3606 *val = env->upmmask;
3607 return RISCV_EXCP_NONE;
3610 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
3611 target_ulong val)
3613 uint64_t mstatus;
3615 /* if pm.current==0 we can't modify current PM CSRs */
3616 if (check_pm_current_disabled(env, csrno)) {
3617 return RISCV_EXCP_NONE;
3619 env->upmmask = val;
3620 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3621 env->cur_pmmask = val;
3623 env->mmte |= PM_EXT_DIRTY;
3625 /* Set XS and SD bits, since PM CSRs are dirty */
3626 mstatus = env->mstatus | MSTATUS_XS;
3627 write_mstatus(env, csrno, mstatus);
3628 return RISCV_EXCP_NONE;
3631 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
3632 target_ulong *val)
3634 *val = env->mpmbase;
3635 return RISCV_EXCP_NONE;
3638 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
3639 target_ulong val)
3641 uint64_t mstatus;
3643 env->mpmbase = val;
3644 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3645 env->cur_pmbase = val;
3647 env->mmte |= PM_EXT_DIRTY;
3649 /* Set XS and SD bits, since PM CSRs are dirty */
3650 mstatus = env->mstatus | MSTATUS_XS;
3651 write_mstatus(env, csrno, mstatus);
3652 return RISCV_EXCP_NONE;
3655 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
3656 target_ulong *val)
3658 *val = env->spmbase;
3659 return RISCV_EXCP_NONE;
3662 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
3663 target_ulong val)
3665 uint64_t mstatus;
3667 /* if pm.current==0 we can't modify current PM CSRs */
3668 if (check_pm_current_disabled(env, csrno)) {
3669 return RISCV_EXCP_NONE;
3671 env->spmbase = val;
3672 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3673 env->cur_pmbase = val;
3675 env->mmte |= PM_EXT_DIRTY;
3677 /* Set XS and SD bits, since PM CSRs are dirty */
3678 mstatus = env->mstatus | MSTATUS_XS;
3679 write_mstatus(env, csrno, mstatus);
3680 return RISCV_EXCP_NONE;
3683 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
3684 target_ulong *val)
3686 *val = env->upmbase;
3687 return RISCV_EXCP_NONE;
3690 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
3691 target_ulong val)
3693 uint64_t mstatus;
3695 /* if pm.current==0 we can't modify current PM CSRs */
3696 if (check_pm_current_disabled(env, csrno)) {
3697 return RISCV_EXCP_NONE;
3699 env->upmbase = val;
3700 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3701 env->cur_pmbase = val;
3703 env->mmte |= PM_EXT_DIRTY;
3705 /* Set XS and SD bits, since PM CSRs are dirty */
3706 mstatus = env->mstatus | MSTATUS_XS;
3707 write_mstatus(env, csrno, mstatus);
3708 return RISCV_EXCP_NONE;
3711 #endif
3713 /* Crypto Extension */
3714 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
3715 target_ulong *ret_value,
3716 target_ulong new_value,
3717 target_ulong write_mask)
3719 uint16_t random_v;
3720 Error *random_e = NULL;
3721 int random_r;
3722 target_ulong rval;
3724 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
3725 if (unlikely(random_r < 0)) {
3727 * Failed, for unknown reasons in the crypto subsystem.
3728 * The best we can do is log the reason and return a
3729 * failure indication to the guest. There is no reason
3730 * we know to expect the failure to be transitory, so
3731 * indicate DEAD to avoid having the guest spin on WAIT.
3733 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
3734 __func__, error_get_pretty(random_e));
3735 error_free(random_e);
3736 rval = SEED_OPST_DEAD;
3737 } else {
3738 rval = random_v | SEED_OPST_ES16;
3741 if (ret_value) {
3742 *ret_value = rval;
3745 return RISCV_EXCP_NONE;
3749 * riscv_csrrw - read and/or update control and status register
3751 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
3752 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
3753 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
3754 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
3757 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
3758 int csrno,
3759 bool write_mask,
3760 RISCVCPU *cpu)
3762 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
3763 int read_only = get_field(csrno, 0xC00) == 3;
3764 int csr_min_priv = csr_ops[csrno].min_priv_ver;
3766 /* ensure the CSR extension is enabled. */
3767 if (!cpu->cfg.ext_icsr) {
3768 return RISCV_EXCP_ILLEGAL_INST;
3771 if (env->priv_ver < csr_min_priv) {
3772 return RISCV_EXCP_ILLEGAL_INST;
3775 /* check predicate */
3776 if (!csr_ops[csrno].predicate) {
3777 return RISCV_EXCP_ILLEGAL_INST;
3780 if (write_mask && read_only) {
3781 return RISCV_EXCP_ILLEGAL_INST;
3784 RISCVException ret = csr_ops[csrno].predicate(env, csrno);
3785 if (ret != RISCV_EXCP_NONE) {
3786 return ret;
3789 #if !defined(CONFIG_USER_ONLY)
3790 int csr_priv, effective_priv = env->priv;
3792 if (riscv_has_ext(env, RVH) && env->priv == PRV_S &&
3793 !riscv_cpu_virt_enabled(env)) {
3795 * We are in HS mode. Add 1 to the effective privledge level to
3796 * allow us to access the Hypervisor CSRs.
3798 effective_priv++;
3801 csr_priv = get_field(csrno, 0x300);
3802 if (!env->debugger && (effective_priv < csr_priv)) {
3803 if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) {
3804 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3806 return RISCV_EXCP_ILLEGAL_INST;
3808 #endif
3809 return RISCV_EXCP_NONE;
3812 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
3813 target_ulong *ret_value,
3814 target_ulong new_value,
3815 target_ulong write_mask)
3817 RISCVException ret;
3818 target_ulong old_value;
3820 /* execute combined read/write operation if it exists */
3821 if (csr_ops[csrno].op) {
3822 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
3825 /* if no accessor exists then return failure */
3826 if (!csr_ops[csrno].read) {
3827 return RISCV_EXCP_ILLEGAL_INST;
3829 /* read old value */
3830 ret = csr_ops[csrno].read(env, csrno, &old_value);
3831 if (ret != RISCV_EXCP_NONE) {
3832 return ret;
3835 /* write value if writable and write mask set, otherwise drop writes */
3836 if (write_mask) {
3837 new_value = (old_value & ~write_mask) | (new_value & write_mask);
3838 if (csr_ops[csrno].write) {
3839 ret = csr_ops[csrno].write(env, csrno, new_value);
3840 if (ret != RISCV_EXCP_NONE) {
3841 return ret;
3846 /* return old value */
3847 if (ret_value) {
3848 *ret_value = old_value;
3851 return RISCV_EXCP_NONE;
3854 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
3855 target_ulong *ret_value,
3856 target_ulong new_value, target_ulong write_mask)
3858 RISCVCPU *cpu = env_archcpu(env);
3860 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
3861 if (ret != RISCV_EXCP_NONE) {
3862 return ret;
3865 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
3868 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
3869 Int128 *ret_value,
3870 Int128 new_value,
3871 Int128 write_mask)
3873 RISCVException ret;
3874 Int128 old_value;
3876 /* read old value */
3877 ret = csr_ops[csrno].read128(env, csrno, &old_value);
3878 if (ret != RISCV_EXCP_NONE) {
3879 return ret;
3882 /* write value if writable and write mask set, otherwise drop writes */
3883 if (int128_nz(write_mask)) {
3884 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
3885 int128_and(new_value, write_mask));
3886 if (csr_ops[csrno].write128) {
3887 ret = csr_ops[csrno].write128(env, csrno, new_value);
3888 if (ret != RISCV_EXCP_NONE) {
3889 return ret;
3891 } else if (csr_ops[csrno].write) {
3892 /* avoids having to write wrappers for all registers */
3893 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
3894 if (ret != RISCV_EXCP_NONE) {
3895 return ret;
3900 /* return old value */
3901 if (ret_value) {
3902 *ret_value = old_value;
3905 return RISCV_EXCP_NONE;
3908 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
3909 Int128 *ret_value,
3910 Int128 new_value, Int128 write_mask)
3912 RISCVException ret;
3913 RISCVCPU *cpu = env_archcpu(env);
3915 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
3916 if (ret != RISCV_EXCP_NONE) {
3917 return ret;
3920 if (csr_ops[csrno].read128) {
3921 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
3925 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3926 * at all defined.
3927 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3928 * significant), for those, this fallback is correctly handling the accesses
3930 target_ulong old_value;
3931 ret = riscv_csrrw_do64(env, csrno, &old_value,
3932 int128_getlo(new_value),
3933 int128_getlo(write_mask));
3934 if (ret == RISCV_EXCP_NONE && ret_value) {
3935 *ret_value = int128_make64(old_value);
3937 return ret;
3941 * Debugger support. If not in user mode, set env->debugger before the
3942 * riscv_csrrw call and clear it after the call.
3944 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
3945 target_ulong *ret_value,
3946 target_ulong new_value,
3947 target_ulong write_mask)
3949 RISCVException ret;
3950 #if !defined(CONFIG_USER_ONLY)
3951 env->debugger = true;
3952 #endif
3953 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
3954 #if !defined(CONFIG_USER_ONLY)
3955 env->debugger = false;
3956 #endif
3957 return ret;
3960 /* Control and Status Register function table */
3961 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
3962 /* User Floating-Point CSRs */
3963 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
3964 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
3965 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
3966 /* Vector CSRs */
3967 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart,
3968 .min_priv_ver = PRIV_VERSION_1_12_0 },
3969 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat,
3970 .min_priv_ver = PRIV_VERSION_1_12_0 },
3971 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm,
3972 .min_priv_ver = PRIV_VERSION_1_12_0 },
3973 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr,
3974 .min_priv_ver = PRIV_VERSION_1_12_0 },
3975 [CSR_VL] = { "vl", vs, read_vl,
3976 .min_priv_ver = PRIV_VERSION_1_12_0 },
3977 [CSR_VTYPE] = { "vtype", vs, read_vtype,
3978 .min_priv_ver = PRIV_VERSION_1_12_0 },
3979 [CSR_VLENB] = { "vlenb", vs, read_vlenb,
3980 .min_priv_ver = PRIV_VERSION_1_12_0 },
3981 /* User Timers and Counters */
3982 [CSR_CYCLE] = { "cycle", ctr, read_hpmcounter },
3983 [CSR_INSTRET] = { "instret", ctr, read_hpmcounter },
3984 [CSR_CYCLEH] = { "cycleh", ctr32, read_hpmcounterh },
3985 [CSR_INSTRETH] = { "instreth", ctr32, read_hpmcounterh },
3988 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3989 * rdtime callback is not provided by machine/platform emulation.
3991 [CSR_TIME] = { "time", ctr, read_time },
3992 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
3994 /* Crypto Extension */
3995 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
3997 #if !defined(CONFIG_USER_ONLY)
3998 /* Machine Timers and Counters */
3999 [CSR_MCYCLE] = { "mcycle", any, read_hpmcounter,
4000 write_mhpmcounter },
4001 [CSR_MINSTRET] = { "minstret", any, read_hpmcounter,
4002 write_mhpmcounter },
4003 [CSR_MCYCLEH] = { "mcycleh", any32, read_hpmcounterh,
4004 write_mhpmcounterh },
4005 [CSR_MINSTRETH] = { "minstreth", any32, read_hpmcounterh,
4006 write_mhpmcounterh },
4008 /* Machine Information Registers */
4009 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
4010 [CSR_MARCHID] = { "marchid", any, read_marchid },
4011 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
4012 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
4014 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
4015 .min_priv_ver = PRIV_VERSION_1_12_0 },
4016 /* Machine Trap Setup */
4017 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus,
4018 NULL, read_mstatus_i128 },
4019 [CSR_MISA] = { "misa", any, read_misa, write_misa,
4020 NULL, read_misa_i128 },
4021 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
4022 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
4023 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
4024 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
4025 [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren,
4026 write_mcounteren },
4028 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush,
4029 write_mstatush },
4031 /* Machine Trap Handling */
4032 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch,
4033 NULL, read_mscratch_i128, write_mscratch_i128 },
4034 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
4035 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
4036 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
4037 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
4039 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
4040 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
4041 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
4043 /* Machine-Level Interrupts (AIA) */
4044 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
4045 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
4047 /* Virtual Interrupts for Supervisor Level (AIA) */
4048 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
4049 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
4051 /* Machine-Level High-Half CSRs (AIA) */
4052 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
4053 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
4054 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
4055 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
4056 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
4058 /* Execution environment configuration */
4059 [CSR_MENVCFG] = { "menvcfg", umode, read_menvcfg, write_menvcfg,
4060 .min_priv_ver = PRIV_VERSION_1_12_0 },
4061 [CSR_MENVCFGH] = { "menvcfgh", umode32, read_menvcfgh, write_menvcfgh,
4062 .min_priv_ver = PRIV_VERSION_1_12_0 },
4063 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
4064 .min_priv_ver = PRIV_VERSION_1_12_0 },
4065 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
4066 .min_priv_ver = PRIV_VERSION_1_12_0 },
4067 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
4068 .min_priv_ver = PRIV_VERSION_1_12_0 },
4070 /* Smstateen extension CSRs */
4071 [CSR_MSTATEEN0] = { "mstateen0", mstateen, read_mstateen, write_mstateen0,
4072 .min_priv_ver = PRIV_VERSION_1_12_0 },
4073 [CSR_MSTATEEN0H] = { "mstateen0h", mstateen, read_mstateenh,
4074 write_mstateen0h,
4075 .min_priv_ver = PRIV_VERSION_1_12_0 },
4076 [CSR_MSTATEEN1] = { "mstateen1", mstateen, read_mstateen,
4077 write_mstateen_1_3,
4078 .min_priv_ver = PRIV_VERSION_1_12_0 },
4079 [CSR_MSTATEEN1H] = { "mstateen1h", mstateen, read_mstateenh,
4080 write_mstateenh_1_3,
4081 .min_priv_ver = PRIV_VERSION_1_12_0 },
4082 [CSR_MSTATEEN2] = { "mstateen2", mstateen, read_mstateen,
4083 write_mstateen_1_3,
4084 .min_priv_ver = PRIV_VERSION_1_12_0 },
4085 [CSR_MSTATEEN2H] = { "mstateen2h", mstateen, read_mstateenh,
4086 write_mstateenh_1_3,
4087 .min_priv_ver = PRIV_VERSION_1_12_0 },
4088 [CSR_MSTATEEN3] = { "mstateen3", mstateen, read_mstateen,
4089 write_mstateen_1_3,
4090 .min_priv_ver = PRIV_VERSION_1_12_0 },
4091 [CSR_MSTATEEN3H] = { "mstateen3h", mstateen, read_mstateenh,
4092 write_mstateenh_1_3,
4093 .min_priv_ver = PRIV_VERSION_1_12_0 },
4094 [CSR_HSTATEEN0] = { "hstateen0", hstateen, read_hstateen, write_hstateen0,
4095 .min_priv_ver = PRIV_VERSION_1_12_0 },
4096 [CSR_HSTATEEN0H] = { "hstateen0h", hstateenh, read_hstateenh,
4097 write_hstateen0h,
4098 .min_priv_ver = PRIV_VERSION_1_12_0 },
4099 [CSR_HSTATEEN1] = { "hstateen1", hstateen, read_hstateen,
4100 write_hstateen_1_3,
4101 .min_priv_ver = PRIV_VERSION_1_12_0 },
4102 [CSR_HSTATEEN1H] = { "hstateen1h", hstateenh, read_hstateenh,
4103 write_hstateenh_1_3,
4104 .min_priv_ver = PRIV_VERSION_1_12_0 },
4105 [CSR_HSTATEEN2] = { "hstateen2", hstateen, read_hstateen,
4106 write_hstateen_1_3,
4107 .min_priv_ver = PRIV_VERSION_1_12_0 },
4108 [CSR_HSTATEEN2H] = { "hstateen2h", hstateenh, read_hstateenh,
4109 write_hstateenh_1_3,
4110 .min_priv_ver = PRIV_VERSION_1_12_0 },
4111 [CSR_HSTATEEN3] = { "hstateen3", hstateen, read_hstateen,
4112 write_hstateen_1_3,
4113 .min_priv_ver = PRIV_VERSION_1_12_0 },
4114 [CSR_HSTATEEN3H] = { "hstateen3h", hstateenh, read_hstateenh,
4115 write_hstateenh_1_3,
4116 .min_priv_ver = PRIV_VERSION_1_12_0 },
4117 [CSR_SSTATEEN0] = { "sstateen0", sstateen, read_sstateen, write_sstateen0,
4118 .min_priv_ver = PRIV_VERSION_1_12_0 },
4119 [CSR_SSTATEEN1] = { "sstateen1", sstateen, read_sstateen,
4120 write_sstateen_1_3,
4121 .min_priv_ver = PRIV_VERSION_1_12_0 },
4122 [CSR_SSTATEEN2] = { "sstateen2", sstateen, read_sstateen,
4123 write_sstateen_1_3,
4124 .min_priv_ver = PRIV_VERSION_1_12_0 },
4125 [CSR_SSTATEEN3] = { "sstateen3", sstateen, read_sstateen,
4126 write_sstateen_1_3,
4127 .min_priv_ver = PRIV_VERSION_1_12_0 },
4129 /* Supervisor Trap Setup */
4130 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus,
4131 NULL, read_sstatus_i128 },
4132 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
4133 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
4134 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren,
4135 write_scounteren },
4137 /* Supervisor Trap Handling */
4138 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch,
4139 NULL, read_sscratch_i128, write_sscratch_i128 },
4140 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
4141 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
4142 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
4143 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
4144 [CSR_STIMECMP] = { "stimecmp", sstc, read_stimecmp, write_stimecmp,
4145 .min_priv_ver = PRIV_VERSION_1_12_0 },
4146 [CSR_STIMECMPH] = { "stimecmph", sstc_32, read_stimecmph, write_stimecmph,
4147 .min_priv_ver = PRIV_VERSION_1_12_0 },
4148 [CSR_VSTIMECMP] = { "vstimecmp", sstc, read_vstimecmp,
4149 write_vstimecmp,
4150 .min_priv_ver = PRIV_VERSION_1_12_0 },
4151 [CSR_VSTIMECMPH] = { "vstimecmph", sstc_32, read_vstimecmph,
4152 write_vstimecmph,
4153 .min_priv_ver = PRIV_VERSION_1_12_0 },
4155 /* Supervisor Protection and Translation */
4156 [CSR_SATP] = { "satp", smode, read_satp, write_satp },
4158 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
4159 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
4160 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
4162 /* Supervisor-Level Interrupts (AIA) */
4163 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
4164 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
4166 /* Supervisor-Level High-Half CSRs (AIA) */
4167 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
4168 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
4170 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
4171 .min_priv_ver = PRIV_VERSION_1_12_0 },
4172 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
4173 .min_priv_ver = PRIV_VERSION_1_12_0 },
4174 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
4175 .min_priv_ver = PRIV_VERSION_1_12_0 },
4176 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
4177 .min_priv_ver = PRIV_VERSION_1_12_0 },
4178 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
4179 .min_priv_ver = PRIV_VERSION_1_12_0 },
4180 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
4181 .min_priv_ver = PRIV_VERSION_1_12_0 },
4182 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren,
4183 write_hcounteren,
4184 .min_priv_ver = PRIV_VERSION_1_12_0 },
4185 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
4186 .min_priv_ver = PRIV_VERSION_1_12_0 },
4187 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
4188 .min_priv_ver = PRIV_VERSION_1_12_0 },
4189 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
4190 .min_priv_ver = PRIV_VERSION_1_12_0 },
4191 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
4192 .min_priv_ver = PRIV_VERSION_1_12_0 },
4193 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp,
4194 .min_priv_ver = PRIV_VERSION_1_12_0 },
4195 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta,
4196 write_htimedelta,
4197 .min_priv_ver = PRIV_VERSION_1_12_0 },
4198 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah,
4199 write_htimedeltah,
4200 .min_priv_ver = PRIV_VERSION_1_12_0 },
4202 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus,
4203 write_vsstatus,
4204 .min_priv_ver = PRIV_VERSION_1_12_0 },
4205 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
4206 .min_priv_ver = PRIV_VERSION_1_12_0 },
4207 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
4208 .min_priv_ver = PRIV_VERSION_1_12_0 },
4209 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
4210 .min_priv_ver = PRIV_VERSION_1_12_0 },
4211 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch,
4212 write_vsscratch,
4213 .min_priv_ver = PRIV_VERSION_1_12_0 },
4214 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
4215 .min_priv_ver = PRIV_VERSION_1_12_0 },
4216 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
4217 .min_priv_ver = PRIV_VERSION_1_12_0 },
4218 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
4219 .min_priv_ver = PRIV_VERSION_1_12_0 },
4220 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
4221 .min_priv_ver = PRIV_VERSION_1_12_0 },
4223 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
4224 .min_priv_ver = PRIV_VERSION_1_12_0 },
4225 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
4226 .min_priv_ver = PRIV_VERSION_1_12_0 },
4228 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
4229 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
4230 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
4231 write_hvictl },
4232 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
4233 write_hviprio1 },
4234 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
4235 write_hviprio2 },
4238 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
4240 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL,
4241 rmw_xiselect },
4242 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
4244 /* VS-Level Interrupts (H-extension with AIA) */
4245 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
4246 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
4248 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
4249 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
4250 rmw_hidelegh },
4251 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero,
4252 write_ignore },
4253 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
4254 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
4255 write_hviprio1h },
4256 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h,
4257 write_hviprio2h },
4258 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
4259 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
4261 /* Physical Memory Protection */
4262 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg,
4263 .min_priv_ver = PRIV_VERSION_1_11_0 },
4264 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
4265 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
4266 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
4267 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
4268 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
4269 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
4270 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
4271 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
4272 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
4273 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
4274 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
4275 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
4276 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
4277 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
4278 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
4279 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
4280 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
4281 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
4282 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
4283 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
4285 /* Debug CSRs */
4286 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
4287 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
4288 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
4289 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
4290 [CSR_TINFO] = { "tinfo", debug, read_tinfo, write_ignore },
4292 /* User Pointer Masking */
4293 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
4294 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask,
4295 write_upmmask },
4296 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase,
4297 write_upmbase },
4298 /* Machine Pointer Masking */
4299 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
4300 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask,
4301 write_mpmmask },
4302 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase,
4303 write_mpmbase },
4304 /* Supervisor Pointer Masking */
4305 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
4306 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask,
4307 write_spmmask },
4308 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase,
4309 write_spmbase },
4311 /* Performance Counters */
4312 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_hpmcounter },
4313 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_hpmcounter },
4314 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_hpmcounter },
4315 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_hpmcounter },
4316 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_hpmcounter },
4317 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_hpmcounter },
4318 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_hpmcounter },
4319 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_hpmcounter },
4320 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_hpmcounter },
4321 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_hpmcounter },
4322 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_hpmcounter },
4323 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_hpmcounter },
4324 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_hpmcounter },
4325 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_hpmcounter },
4326 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_hpmcounter },
4327 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_hpmcounter },
4328 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_hpmcounter },
4329 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_hpmcounter },
4330 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_hpmcounter },
4331 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_hpmcounter },
4332 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_hpmcounter },
4333 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_hpmcounter },
4334 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_hpmcounter },
4335 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_hpmcounter },
4336 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_hpmcounter },
4337 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_hpmcounter },
4338 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_hpmcounter },
4339 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_hpmcounter },
4340 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_hpmcounter },
4342 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", mctr, read_hpmcounter,
4343 write_mhpmcounter },
4344 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", mctr, read_hpmcounter,
4345 write_mhpmcounter },
4346 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", mctr, read_hpmcounter,
4347 write_mhpmcounter },
4348 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", mctr, read_hpmcounter,
4349 write_mhpmcounter },
4350 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", mctr, read_hpmcounter,
4351 write_mhpmcounter },
4352 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", mctr, read_hpmcounter,
4353 write_mhpmcounter },
4354 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", mctr, read_hpmcounter,
4355 write_mhpmcounter },
4356 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", mctr, read_hpmcounter,
4357 write_mhpmcounter },
4358 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", mctr, read_hpmcounter,
4359 write_mhpmcounter },
4360 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", mctr, read_hpmcounter,
4361 write_mhpmcounter },
4362 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", mctr, read_hpmcounter,
4363 write_mhpmcounter },
4364 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", mctr, read_hpmcounter,
4365 write_mhpmcounter },
4366 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", mctr, read_hpmcounter,
4367 write_mhpmcounter },
4368 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", mctr, read_hpmcounter,
4369 write_mhpmcounter },
4370 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", mctr, read_hpmcounter,
4371 write_mhpmcounter },
4372 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", mctr, read_hpmcounter,
4373 write_mhpmcounter },
4374 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", mctr, read_hpmcounter,
4375 write_mhpmcounter },
4376 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", mctr, read_hpmcounter,
4377 write_mhpmcounter },
4378 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", mctr, read_hpmcounter,
4379 write_mhpmcounter },
4380 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", mctr, read_hpmcounter,
4381 write_mhpmcounter },
4382 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", mctr, read_hpmcounter,
4383 write_mhpmcounter },
4384 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", mctr, read_hpmcounter,
4385 write_mhpmcounter },
4386 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", mctr, read_hpmcounter,
4387 write_mhpmcounter },
4388 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", mctr, read_hpmcounter,
4389 write_mhpmcounter },
4390 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", mctr, read_hpmcounter,
4391 write_mhpmcounter },
4392 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", mctr, read_hpmcounter,
4393 write_mhpmcounter },
4394 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", mctr, read_hpmcounter,
4395 write_mhpmcounter },
4396 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", mctr, read_hpmcounter,
4397 write_mhpmcounter },
4398 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", mctr, read_hpmcounter,
4399 write_mhpmcounter },
4401 [CSR_MCOUNTINHIBIT] = { "mcountinhibit", any, read_mcountinhibit,
4402 write_mcountinhibit,
4403 .min_priv_ver = PRIV_VERSION_1_11_0 },
4405 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
4406 write_mhpmevent },
4407 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
4408 write_mhpmevent },
4409 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_mhpmevent,
4410 write_mhpmevent },
4411 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_mhpmevent,
4412 write_mhpmevent },
4413 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_mhpmevent,
4414 write_mhpmevent },
4415 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_mhpmevent,
4416 write_mhpmevent },
4417 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_mhpmevent,
4418 write_mhpmevent },
4419 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_mhpmevent,
4420 write_mhpmevent },
4421 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_mhpmevent,
4422 write_mhpmevent },
4423 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_mhpmevent,
4424 write_mhpmevent },
4425 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_mhpmevent,
4426 write_mhpmevent },
4427 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_mhpmevent,
4428 write_mhpmevent },
4429 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_mhpmevent,
4430 write_mhpmevent },
4431 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_mhpmevent,
4432 write_mhpmevent },
4433 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_mhpmevent,
4434 write_mhpmevent },
4435 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_mhpmevent,
4436 write_mhpmevent },
4437 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_mhpmevent,
4438 write_mhpmevent },
4439 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_mhpmevent,
4440 write_mhpmevent },
4441 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_mhpmevent,
4442 write_mhpmevent },
4443 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_mhpmevent,
4444 write_mhpmevent },
4445 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_mhpmevent,
4446 write_mhpmevent },
4447 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_mhpmevent,
4448 write_mhpmevent },
4449 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_mhpmevent,
4450 write_mhpmevent },
4451 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_mhpmevent,
4452 write_mhpmevent },
4453 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_mhpmevent,
4454 write_mhpmevent },
4455 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_mhpmevent,
4456 write_mhpmevent },
4457 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_mhpmevent,
4458 write_mhpmevent },
4459 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_mhpmevent,
4460 write_mhpmevent },
4461 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
4462 write_mhpmevent },
4464 [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh,
4465 write_mhpmeventh,
4466 .min_priv_ver = PRIV_VERSION_1_12_0 },
4467 [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh,
4468 write_mhpmeventh,
4469 .min_priv_ver = PRIV_VERSION_1_12_0 },
4470 [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh,
4471 write_mhpmeventh,
4472 .min_priv_ver = PRIV_VERSION_1_12_0 },
4473 [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh,
4474 write_mhpmeventh,
4475 .min_priv_ver = PRIV_VERSION_1_12_0 },
4476 [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh,
4477 write_mhpmeventh,
4478 .min_priv_ver = PRIV_VERSION_1_12_0 },
4479 [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh,
4480 write_mhpmeventh,
4481 .min_priv_ver = PRIV_VERSION_1_12_0 },
4482 [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh,
4483 write_mhpmeventh,
4484 .min_priv_ver = PRIV_VERSION_1_12_0 },
4485 [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh,
4486 write_mhpmeventh,
4487 .min_priv_ver = PRIV_VERSION_1_12_0 },
4488 [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh,
4489 write_mhpmeventh,
4490 .min_priv_ver = PRIV_VERSION_1_12_0 },
4491 [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh,
4492 write_mhpmeventh,
4493 .min_priv_ver = PRIV_VERSION_1_12_0 },
4494 [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh,
4495 write_mhpmeventh,
4496 .min_priv_ver = PRIV_VERSION_1_12_0 },
4497 [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh,
4498 write_mhpmeventh,
4499 .min_priv_ver = PRIV_VERSION_1_12_0 },
4500 [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh,
4501 write_mhpmeventh,
4502 .min_priv_ver = PRIV_VERSION_1_12_0 },
4503 [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh,
4504 write_mhpmeventh,
4505 .min_priv_ver = PRIV_VERSION_1_12_0 },
4506 [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh,
4507 write_mhpmeventh,
4508 .min_priv_ver = PRIV_VERSION_1_12_0 },
4509 [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh,
4510 write_mhpmeventh,
4511 .min_priv_ver = PRIV_VERSION_1_12_0 },
4512 [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh,
4513 write_mhpmeventh,
4514 .min_priv_ver = PRIV_VERSION_1_12_0 },
4515 [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh,
4516 write_mhpmeventh,
4517 .min_priv_ver = PRIV_VERSION_1_12_0 },
4518 [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh,
4519 write_mhpmeventh,
4520 .min_priv_ver = PRIV_VERSION_1_12_0 },
4521 [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh,
4522 write_mhpmeventh,
4523 .min_priv_ver = PRIV_VERSION_1_12_0 },
4524 [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh,
4525 write_mhpmeventh,
4526 .min_priv_ver = PRIV_VERSION_1_12_0 },
4527 [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh,
4528 write_mhpmeventh,
4529 .min_priv_ver = PRIV_VERSION_1_12_0 },
4530 [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh,
4531 write_mhpmeventh,
4532 .min_priv_ver = PRIV_VERSION_1_12_0 },
4533 [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh,
4534 write_mhpmeventh,
4535 .min_priv_ver = PRIV_VERSION_1_12_0 },
4536 [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh,
4537 write_mhpmeventh,
4538 .min_priv_ver = PRIV_VERSION_1_12_0 },
4539 [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh,
4540 write_mhpmeventh,
4541 .min_priv_ver = PRIV_VERSION_1_12_0 },
4542 [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh,
4543 write_mhpmeventh,
4544 .min_priv_ver = PRIV_VERSION_1_12_0 },
4545 [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh,
4546 write_mhpmeventh,
4547 .min_priv_ver = PRIV_VERSION_1_12_0 },
4548 [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh,
4549 write_mhpmeventh,
4550 .min_priv_ver = PRIV_VERSION_1_12_0 },
4552 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_hpmcounterh },
4553 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_hpmcounterh },
4554 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_hpmcounterh },
4555 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_hpmcounterh },
4556 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_hpmcounterh },
4557 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_hpmcounterh },
4558 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_hpmcounterh },
4559 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_hpmcounterh },
4560 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_hpmcounterh },
4561 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_hpmcounterh },
4562 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_hpmcounterh },
4563 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_hpmcounterh },
4564 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_hpmcounterh },
4565 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_hpmcounterh },
4566 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_hpmcounterh },
4567 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_hpmcounterh },
4568 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_hpmcounterh },
4569 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_hpmcounterh },
4570 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_hpmcounterh },
4571 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_hpmcounterh },
4572 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_hpmcounterh },
4573 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_hpmcounterh },
4574 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_hpmcounterh },
4575 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_hpmcounterh },
4576 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_hpmcounterh },
4577 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_hpmcounterh },
4578 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_hpmcounterh },
4579 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_hpmcounterh },
4580 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_hpmcounterh },
4582 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", mctr32, read_hpmcounterh,
4583 write_mhpmcounterh },
4584 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", mctr32, read_hpmcounterh,
4585 write_mhpmcounterh },
4586 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", mctr32, read_hpmcounterh,
4587 write_mhpmcounterh },
4588 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", mctr32, read_hpmcounterh,
4589 write_mhpmcounterh },
4590 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", mctr32, read_hpmcounterh,
4591 write_mhpmcounterh },
4592 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", mctr32, read_hpmcounterh,
4593 write_mhpmcounterh },
4594 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", mctr32, read_hpmcounterh,
4595 write_mhpmcounterh },
4596 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", mctr32, read_hpmcounterh,
4597 write_mhpmcounterh },
4598 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", mctr32, read_hpmcounterh,
4599 write_mhpmcounterh },
4600 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", mctr32, read_hpmcounterh,
4601 write_mhpmcounterh },
4602 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", mctr32, read_hpmcounterh,
4603 write_mhpmcounterh },
4604 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", mctr32, read_hpmcounterh,
4605 write_mhpmcounterh },
4606 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", mctr32, read_hpmcounterh,
4607 write_mhpmcounterh },
4608 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", mctr32, read_hpmcounterh,
4609 write_mhpmcounterh },
4610 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", mctr32, read_hpmcounterh,
4611 write_mhpmcounterh },
4612 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", mctr32, read_hpmcounterh,
4613 write_mhpmcounterh },
4614 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", mctr32, read_hpmcounterh,
4615 write_mhpmcounterh },
4616 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", mctr32, read_hpmcounterh,
4617 write_mhpmcounterh },
4618 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", mctr32, read_hpmcounterh,
4619 write_mhpmcounterh },
4620 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", mctr32, read_hpmcounterh,
4621 write_mhpmcounterh },
4622 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", mctr32, read_hpmcounterh,
4623 write_mhpmcounterh },
4624 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", mctr32, read_hpmcounterh,
4625 write_mhpmcounterh },
4626 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", mctr32, read_hpmcounterh,
4627 write_mhpmcounterh },
4628 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", mctr32, read_hpmcounterh,
4629 write_mhpmcounterh },
4630 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", mctr32, read_hpmcounterh,
4631 write_mhpmcounterh },
4632 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", mctr32, read_hpmcounterh,
4633 write_mhpmcounterh },
4634 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", mctr32, read_hpmcounterh,
4635 write_mhpmcounterh },
4636 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", mctr32, read_hpmcounterh,
4637 write_mhpmcounterh },
4638 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", mctr32, read_hpmcounterh,
4639 write_mhpmcounterh },
4640 [CSR_SCOUNTOVF] = { "scountovf", sscofpmf, read_scountovf,
4641 .min_priv_ver = PRIV_VERSION_1_12_0 },
4643 #endif /* !CONFIG_USER_ONLY */