target/ppc: Use TCG_CALL_NO_RWG_SE in fsel helper
[qemu.git] / target / riscv / csr.c
blob6dbe9b541fd86c61b75b914e669601b6023d5a14
1 /*
2 * RISC-V Control and Status Registers.
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "qemu/timer.h"
23 #include "cpu.h"
24 #include "qemu/main-loop.h"
25 #include "exec/exec-all.h"
26 #include "sysemu/cpu-timers.h"
27 #include "qemu/guest-random.h"
28 #include "qapi/error.h"
30 /* CSR function table public API */
31 void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
33 *ops = csr_ops[csrno & (CSR_TABLE_SIZE - 1)];
36 void riscv_set_csr_ops(int csrno, riscv_csr_operations *ops)
38 csr_ops[csrno & (CSR_TABLE_SIZE - 1)] = *ops;
41 /* Predicates */
42 static RISCVException fs(CPURISCVState *env, int csrno)
44 #if !defined(CONFIG_USER_ONLY)
45 if (!env->debugger && !riscv_cpu_fp_enabled(env) &&
46 !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) {
47 return RISCV_EXCP_ILLEGAL_INST;
49 #endif
50 return RISCV_EXCP_NONE;
53 static RISCVException vs(CPURISCVState *env, int csrno)
55 CPUState *cs = env_cpu(env);
56 RISCVCPU *cpu = RISCV_CPU(cs);
58 if (env->misa_ext & RVV ||
59 cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) {
60 #if !defined(CONFIG_USER_ONLY)
61 if (!env->debugger && !riscv_cpu_vector_enabled(env)) {
62 return RISCV_EXCP_ILLEGAL_INST;
64 #endif
65 return RISCV_EXCP_NONE;
67 return RISCV_EXCP_ILLEGAL_INST;
70 static RISCVException ctr(CPURISCVState *env, int csrno)
72 #if !defined(CONFIG_USER_ONLY)
73 CPUState *cs = env_cpu(env);
74 RISCVCPU *cpu = RISCV_CPU(cs);
76 if (!cpu->cfg.ext_counters) {
77 /* The Counters extensions is not enabled */
78 return RISCV_EXCP_ILLEGAL_INST;
81 if (riscv_cpu_virt_enabled(env)) {
82 switch (csrno) {
83 case CSR_CYCLE:
84 if (!get_field(env->hcounteren, COUNTEREN_CY) &&
85 get_field(env->mcounteren, COUNTEREN_CY)) {
86 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
88 break;
89 case CSR_TIME:
90 if (!get_field(env->hcounteren, COUNTEREN_TM) &&
91 get_field(env->mcounteren, COUNTEREN_TM)) {
92 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
94 break;
95 case CSR_INSTRET:
96 if (!get_field(env->hcounteren, COUNTEREN_IR) &&
97 get_field(env->mcounteren, COUNTEREN_IR)) {
98 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
100 break;
101 case CSR_HPMCOUNTER3...CSR_HPMCOUNTER31:
102 if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3)) &&
103 get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3))) {
104 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
106 break;
108 if (riscv_cpu_mxl(env) == MXL_RV32) {
109 switch (csrno) {
110 case CSR_CYCLEH:
111 if (!get_field(env->hcounteren, COUNTEREN_CY) &&
112 get_field(env->mcounteren, COUNTEREN_CY)) {
113 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
115 break;
116 case CSR_TIMEH:
117 if (!get_field(env->hcounteren, COUNTEREN_TM) &&
118 get_field(env->mcounteren, COUNTEREN_TM)) {
119 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
121 break;
122 case CSR_INSTRETH:
123 if (!get_field(env->hcounteren, COUNTEREN_IR) &&
124 get_field(env->mcounteren, COUNTEREN_IR)) {
125 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
127 break;
128 case CSR_HPMCOUNTER3H...CSR_HPMCOUNTER31H:
129 if (!get_field(env->hcounteren, 1 << (csrno - CSR_HPMCOUNTER3H)) &&
130 get_field(env->mcounteren, 1 << (csrno - CSR_HPMCOUNTER3H))) {
131 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
133 break;
137 #endif
138 return RISCV_EXCP_NONE;
141 static RISCVException ctr32(CPURISCVState *env, int csrno)
143 if (riscv_cpu_mxl(env) != MXL_RV32) {
144 return RISCV_EXCP_ILLEGAL_INST;
147 return ctr(env, csrno);
150 #if !defined(CONFIG_USER_ONLY)
151 static RISCVException any(CPURISCVState *env, int csrno)
153 return RISCV_EXCP_NONE;
156 static RISCVException any32(CPURISCVState *env, int csrno)
158 if (riscv_cpu_mxl(env) != MXL_RV32) {
159 return RISCV_EXCP_ILLEGAL_INST;
162 return any(env, csrno);
166 static int aia_any(CPURISCVState *env, int csrno)
168 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
169 return RISCV_EXCP_ILLEGAL_INST;
172 return any(env, csrno);
175 static int aia_any32(CPURISCVState *env, int csrno)
177 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
178 return RISCV_EXCP_ILLEGAL_INST;
181 return any32(env, csrno);
184 static RISCVException smode(CPURISCVState *env, int csrno)
186 if (riscv_has_ext(env, RVS)) {
187 return RISCV_EXCP_NONE;
190 return RISCV_EXCP_ILLEGAL_INST;
193 static int smode32(CPURISCVState *env, int csrno)
195 if (riscv_cpu_mxl(env) != MXL_RV32) {
196 return RISCV_EXCP_ILLEGAL_INST;
199 return smode(env, csrno);
202 static int aia_smode(CPURISCVState *env, int csrno)
204 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
205 return RISCV_EXCP_ILLEGAL_INST;
208 return smode(env, csrno);
211 static int aia_smode32(CPURISCVState *env, int csrno)
213 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
214 return RISCV_EXCP_ILLEGAL_INST;
217 return smode32(env, csrno);
220 static RISCVException hmode(CPURISCVState *env, int csrno)
222 if (riscv_has_ext(env, RVS) &&
223 riscv_has_ext(env, RVH)) {
224 /* Hypervisor extension is supported */
225 if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
226 env->priv == PRV_M) {
227 return RISCV_EXCP_NONE;
228 } else {
229 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
233 return RISCV_EXCP_ILLEGAL_INST;
236 static RISCVException hmode32(CPURISCVState *env, int csrno)
238 if (riscv_cpu_mxl(env) != MXL_RV32) {
239 if (!riscv_cpu_virt_enabled(env)) {
240 return RISCV_EXCP_ILLEGAL_INST;
241 } else {
242 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
246 return hmode(env, csrno);
250 /* Checks if PointerMasking registers could be accessed */
251 static RISCVException pointer_masking(CPURISCVState *env, int csrno)
253 /* Check if j-ext is present */
254 if (riscv_has_ext(env, RVJ)) {
255 return RISCV_EXCP_NONE;
257 return RISCV_EXCP_ILLEGAL_INST;
260 static int aia_hmode(CPURISCVState *env, int csrno)
262 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
263 return RISCV_EXCP_ILLEGAL_INST;
266 return hmode(env, csrno);
269 static int aia_hmode32(CPURISCVState *env, int csrno)
271 if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
272 return RISCV_EXCP_ILLEGAL_INST;
275 return hmode32(env, csrno);
278 static RISCVException pmp(CPURISCVState *env, int csrno)
280 if (riscv_feature(env, RISCV_FEATURE_PMP)) {
281 return RISCV_EXCP_NONE;
284 return RISCV_EXCP_ILLEGAL_INST;
287 static RISCVException epmp(CPURISCVState *env, int csrno)
289 if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) {
290 return RISCV_EXCP_NONE;
293 return RISCV_EXCP_ILLEGAL_INST;
296 static RISCVException debug(CPURISCVState *env, int csrno)
298 if (riscv_feature(env, RISCV_FEATURE_DEBUG)) {
299 return RISCV_EXCP_NONE;
302 return RISCV_EXCP_ILLEGAL_INST;
304 #endif
306 static RISCVException seed(CPURISCVState *env, int csrno)
308 RISCVCPU *cpu = env_archcpu(env);
310 if (!cpu->cfg.ext_zkr) {
311 return RISCV_EXCP_ILLEGAL_INST;
314 #if !defined(CONFIG_USER_ONLY)
316 * With a CSR read-write instruction:
317 * 1) The seed CSR is always available in machine mode as normal.
318 * 2) Attempted access to seed from virtual modes VS and VU always raises
319 * an exception(virtual instruction exception only if mseccfg.sseed=1).
320 * 3) Without the corresponding access control bit set to 1, any attempted
321 * access to seed from U, S or HS modes will raise an illegal instruction
322 * exception.
324 if (env->priv == PRV_M) {
325 return RISCV_EXCP_NONE;
326 } else if (riscv_cpu_virt_enabled(env)) {
327 if (env->mseccfg & MSECCFG_SSEED) {
328 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
329 } else {
330 return RISCV_EXCP_ILLEGAL_INST;
332 } else {
333 if (env->priv == PRV_S && (env->mseccfg & MSECCFG_SSEED)) {
334 return RISCV_EXCP_NONE;
335 } else if (env->priv == PRV_U && (env->mseccfg & MSECCFG_USEED)) {
336 return RISCV_EXCP_NONE;
337 } else {
338 return RISCV_EXCP_ILLEGAL_INST;
341 #else
342 return RISCV_EXCP_NONE;
343 #endif
346 /* User Floating-Point CSRs */
347 static RISCVException read_fflags(CPURISCVState *env, int csrno,
348 target_ulong *val)
350 *val = riscv_cpu_get_fflags(env);
351 return RISCV_EXCP_NONE;
354 static RISCVException write_fflags(CPURISCVState *env, int csrno,
355 target_ulong val)
357 #if !defined(CONFIG_USER_ONLY)
358 if (riscv_has_ext(env, RVF)) {
359 env->mstatus |= MSTATUS_FS;
361 #endif
362 riscv_cpu_set_fflags(env, val & (FSR_AEXC >> FSR_AEXC_SHIFT));
363 return RISCV_EXCP_NONE;
366 static RISCVException read_frm(CPURISCVState *env, int csrno,
367 target_ulong *val)
369 *val = env->frm;
370 return RISCV_EXCP_NONE;
373 static RISCVException write_frm(CPURISCVState *env, int csrno,
374 target_ulong val)
376 #if !defined(CONFIG_USER_ONLY)
377 if (riscv_has_ext(env, RVF)) {
378 env->mstatus |= MSTATUS_FS;
380 #endif
381 env->frm = val & (FSR_RD >> FSR_RD_SHIFT);
382 return RISCV_EXCP_NONE;
385 static RISCVException read_fcsr(CPURISCVState *env, int csrno,
386 target_ulong *val)
388 *val = (riscv_cpu_get_fflags(env) << FSR_AEXC_SHIFT)
389 | (env->frm << FSR_RD_SHIFT);
390 return RISCV_EXCP_NONE;
393 static RISCVException write_fcsr(CPURISCVState *env, int csrno,
394 target_ulong val)
396 #if !defined(CONFIG_USER_ONLY)
397 if (riscv_has_ext(env, RVF)) {
398 env->mstatus |= MSTATUS_FS;
400 #endif
401 env->frm = (val & FSR_RD) >> FSR_RD_SHIFT;
402 riscv_cpu_set_fflags(env, (val & FSR_AEXC) >> FSR_AEXC_SHIFT);
403 return RISCV_EXCP_NONE;
406 static RISCVException read_vtype(CPURISCVState *env, int csrno,
407 target_ulong *val)
409 uint64_t vill;
410 switch (env->xl) {
411 case MXL_RV32:
412 vill = (uint32_t)env->vill << 31;
413 break;
414 case MXL_RV64:
415 vill = (uint64_t)env->vill << 63;
416 break;
417 default:
418 g_assert_not_reached();
420 *val = (target_ulong)vill | env->vtype;
421 return RISCV_EXCP_NONE;
424 static RISCVException read_vl(CPURISCVState *env, int csrno,
425 target_ulong *val)
427 *val = env->vl;
428 return RISCV_EXCP_NONE;
431 static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val)
433 *val = env_archcpu(env)->cfg.vlen >> 3;
434 return RISCV_EXCP_NONE;
437 static RISCVException read_vxrm(CPURISCVState *env, int csrno,
438 target_ulong *val)
440 *val = env->vxrm;
441 return RISCV_EXCP_NONE;
444 static RISCVException write_vxrm(CPURISCVState *env, int csrno,
445 target_ulong val)
447 #if !defined(CONFIG_USER_ONLY)
448 env->mstatus |= MSTATUS_VS;
449 #endif
450 env->vxrm = val;
451 return RISCV_EXCP_NONE;
454 static RISCVException read_vxsat(CPURISCVState *env, int csrno,
455 target_ulong *val)
457 *val = env->vxsat;
458 return RISCV_EXCP_NONE;
461 static RISCVException write_vxsat(CPURISCVState *env, int csrno,
462 target_ulong val)
464 #if !defined(CONFIG_USER_ONLY)
465 env->mstatus |= MSTATUS_VS;
466 #endif
467 env->vxsat = val;
468 return RISCV_EXCP_NONE;
471 static RISCVException read_vstart(CPURISCVState *env, int csrno,
472 target_ulong *val)
474 *val = env->vstart;
475 return RISCV_EXCP_NONE;
478 static RISCVException write_vstart(CPURISCVState *env, int csrno,
479 target_ulong val)
481 #if !defined(CONFIG_USER_ONLY)
482 env->mstatus |= MSTATUS_VS;
483 #endif
485 * The vstart CSR is defined to have only enough writable bits
486 * to hold the largest element index, i.e. lg2(VLEN) bits.
488 env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen));
489 return RISCV_EXCP_NONE;
492 static int read_vcsr(CPURISCVState *env, int csrno, target_ulong *val)
494 *val = (env->vxrm << VCSR_VXRM_SHIFT) | (env->vxsat << VCSR_VXSAT_SHIFT);
495 return RISCV_EXCP_NONE;
498 static int write_vcsr(CPURISCVState *env, int csrno, target_ulong val)
500 #if !defined(CONFIG_USER_ONLY)
501 env->mstatus |= MSTATUS_VS;
502 #endif
503 env->vxrm = (val & VCSR_VXRM) >> VCSR_VXRM_SHIFT;
504 env->vxsat = (val & VCSR_VXSAT) >> VCSR_VXSAT_SHIFT;
505 return RISCV_EXCP_NONE;
508 /* User Timers and Counters */
509 static RISCVException read_instret(CPURISCVState *env, int csrno,
510 target_ulong *val)
512 #if !defined(CONFIG_USER_ONLY)
513 if (icount_enabled()) {
514 *val = icount_get();
515 } else {
516 *val = cpu_get_host_ticks();
518 #else
519 *val = cpu_get_host_ticks();
520 #endif
521 return RISCV_EXCP_NONE;
524 static RISCVException read_instreth(CPURISCVState *env, int csrno,
525 target_ulong *val)
527 #if !defined(CONFIG_USER_ONLY)
528 if (icount_enabled()) {
529 *val = icount_get() >> 32;
530 } else {
531 *val = cpu_get_host_ticks() >> 32;
533 #else
534 *val = cpu_get_host_ticks() >> 32;
535 #endif
536 return RISCV_EXCP_NONE;
539 #if defined(CONFIG_USER_ONLY)
540 static RISCVException read_time(CPURISCVState *env, int csrno,
541 target_ulong *val)
543 *val = cpu_get_host_ticks();
544 return RISCV_EXCP_NONE;
547 static RISCVException read_timeh(CPURISCVState *env, int csrno,
548 target_ulong *val)
550 *val = cpu_get_host_ticks() >> 32;
551 return RISCV_EXCP_NONE;
554 #else /* CONFIG_USER_ONLY */
556 static RISCVException read_time(CPURISCVState *env, int csrno,
557 target_ulong *val)
559 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
561 if (!env->rdtime_fn) {
562 return RISCV_EXCP_ILLEGAL_INST;
565 *val = env->rdtime_fn(env->rdtime_fn_arg) + delta;
566 return RISCV_EXCP_NONE;
569 static RISCVException read_timeh(CPURISCVState *env, int csrno,
570 target_ulong *val)
572 uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
574 if (!env->rdtime_fn) {
575 return RISCV_EXCP_ILLEGAL_INST;
578 *val = (env->rdtime_fn(env->rdtime_fn_arg) + delta) >> 32;
579 return RISCV_EXCP_NONE;
582 /* Machine constants */
584 #define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
585 #define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP))
586 #define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
587 #define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
589 #define VSTOPI_NUM_SRCS 5
591 static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
592 VS_MODE_INTERRUPTS;
593 static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
594 static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
595 HS_MODE_INTERRUPTS;
596 #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
597 (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
598 (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
599 (1ULL << (RISCV_EXCP_BREAKPOINT)) | \
600 (1ULL << (RISCV_EXCP_LOAD_ADDR_MIS)) | \
601 (1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT)) | \
602 (1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS)) | \
603 (1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) | \
604 (1ULL << (RISCV_EXCP_U_ECALL)) | \
605 (1ULL << (RISCV_EXCP_S_ECALL)) | \
606 (1ULL << (RISCV_EXCP_VS_ECALL)) | \
607 (1ULL << (RISCV_EXCP_M_ECALL)) | \
608 (1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) | \
609 (1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) | \
610 (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) | \
611 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) | \
612 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) | \
613 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) | \
614 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)))
615 static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
616 ~((1ULL << (RISCV_EXCP_S_ECALL)) |
617 (1ULL << (RISCV_EXCP_VS_ECALL)) |
618 (1ULL << (RISCV_EXCP_M_ECALL)) |
619 (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
620 (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
621 (1ULL << (RISCV_EXCP_VIRT_INSTRUCTION_FAULT)) |
622 (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT)));
623 static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
624 SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
625 SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
626 static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
627 static const target_ulong hip_writable_mask = MIP_VSSIP;
628 static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
629 static const target_ulong vsip_writable_mask = MIP_VSSIP;
631 static const char valid_vm_1_10_32[16] = {
632 [VM_1_10_MBARE] = 1,
633 [VM_1_10_SV32] = 1
636 static const char valid_vm_1_10_64[16] = {
637 [VM_1_10_MBARE] = 1,
638 [VM_1_10_SV39] = 1,
639 [VM_1_10_SV48] = 1,
640 [VM_1_10_SV57] = 1
643 /* Machine Information Registers */
644 static RISCVException read_zero(CPURISCVState *env, int csrno,
645 target_ulong *val)
647 *val = 0;
648 return RISCV_EXCP_NONE;
651 static RISCVException write_ignore(CPURISCVState *env, int csrno,
652 target_ulong val)
654 return RISCV_EXCP_NONE;
657 static RISCVException read_mvendorid(CPURISCVState *env, int csrno,
658 target_ulong *val)
660 CPUState *cs = env_cpu(env);
661 RISCVCPU *cpu = RISCV_CPU(cs);
663 *val = cpu->cfg.mvendorid;
664 return RISCV_EXCP_NONE;
667 static RISCVException read_marchid(CPURISCVState *env, int csrno,
668 target_ulong *val)
670 CPUState *cs = env_cpu(env);
671 RISCVCPU *cpu = RISCV_CPU(cs);
673 *val = cpu->cfg.marchid;
674 return RISCV_EXCP_NONE;
677 static RISCVException read_mimpid(CPURISCVState *env, int csrno,
678 target_ulong *val)
680 CPUState *cs = env_cpu(env);
681 RISCVCPU *cpu = RISCV_CPU(cs);
683 *val = cpu->cfg.mimpid;
684 return RISCV_EXCP_NONE;
687 static RISCVException read_mhartid(CPURISCVState *env, int csrno,
688 target_ulong *val)
690 *val = env->mhartid;
691 return RISCV_EXCP_NONE;
694 /* Machine Trap Setup */
696 /* We do not store SD explicitly, only compute it on demand. */
697 static uint64_t add_status_sd(RISCVMXL xl, uint64_t status)
699 if ((status & MSTATUS_FS) == MSTATUS_FS ||
700 (status & MSTATUS_VS) == MSTATUS_VS ||
701 (status & MSTATUS_XS) == MSTATUS_XS) {
702 switch (xl) {
703 case MXL_RV32:
704 return status | MSTATUS32_SD;
705 case MXL_RV64:
706 return status | MSTATUS64_SD;
707 case MXL_RV128:
708 return MSTATUSH128_SD;
709 default:
710 g_assert_not_reached();
713 return status;
716 static RISCVException read_mstatus(CPURISCVState *env, int csrno,
717 target_ulong *val)
719 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus);
720 return RISCV_EXCP_NONE;
723 static int validate_vm(CPURISCVState *env, target_ulong vm)
725 if (riscv_cpu_mxl(env) == MXL_RV32) {
726 return valid_vm_1_10_32[vm & 0xf];
727 } else {
728 return valid_vm_1_10_64[vm & 0xf];
732 static RISCVException write_mstatus(CPURISCVState *env, int csrno,
733 target_ulong val)
735 uint64_t mstatus = env->mstatus;
736 uint64_t mask = 0;
737 RISCVMXL xl = riscv_cpu_mxl(env);
739 /* flush tlb on mstatus fields that affect VM */
740 if ((val ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP | MSTATUS_MPV |
741 MSTATUS_MPRV | MSTATUS_SUM)) {
742 tlb_flush(env_cpu(env));
744 mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
745 MSTATUS_SPP | MSTATUS_MPRV | MSTATUS_SUM |
746 MSTATUS_MPP | MSTATUS_MXR | MSTATUS_TVM | MSTATUS_TSR |
747 MSTATUS_TW | MSTATUS_VS;
749 if (riscv_has_ext(env, RVF)) {
750 mask |= MSTATUS_FS;
753 if (xl != MXL_RV32 || env->debugger) {
755 * RV32: MPV and GVA are not in mstatus. The current plan is to
756 * add them to mstatush. For now, we just don't support it.
758 mask |= MSTATUS_MPV | MSTATUS_GVA;
759 if ((val & MSTATUS64_UXL) != 0) {
760 mask |= MSTATUS64_UXL;
764 mstatus = (mstatus & ~mask) | (val & mask);
766 if (xl > MXL_RV32) {
767 /* SXL field is for now read only */
768 mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
770 env->mstatus = mstatus;
771 env->xl = cpu_recompute_xl(env);
773 return RISCV_EXCP_NONE;
776 static RISCVException read_mstatush(CPURISCVState *env, int csrno,
777 target_ulong *val)
779 *val = env->mstatus >> 32;
780 return RISCV_EXCP_NONE;
783 static RISCVException write_mstatush(CPURISCVState *env, int csrno,
784 target_ulong val)
786 uint64_t valh = (uint64_t)val << 32;
787 uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
789 if ((valh ^ env->mstatus) & (MSTATUS_MPV)) {
790 tlb_flush(env_cpu(env));
793 env->mstatus = (env->mstatus & ~mask) | (valh & mask);
795 return RISCV_EXCP_NONE;
798 static RISCVException read_mstatus_i128(CPURISCVState *env, int csrno,
799 Int128 *val)
801 *val = int128_make128(env->mstatus, add_status_sd(MXL_RV128, env->mstatus));
802 return RISCV_EXCP_NONE;
805 static RISCVException read_misa_i128(CPURISCVState *env, int csrno,
806 Int128 *val)
808 *val = int128_make128(env->misa_ext, (uint64_t)MXL_RV128 << 62);
809 return RISCV_EXCP_NONE;
812 static RISCVException read_misa(CPURISCVState *env, int csrno,
813 target_ulong *val)
815 target_ulong misa;
817 switch (env->misa_mxl) {
818 case MXL_RV32:
819 misa = (target_ulong)MXL_RV32 << 30;
820 break;
821 #ifdef TARGET_RISCV64
822 case MXL_RV64:
823 misa = (target_ulong)MXL_RV64 << 62;
824 break;
825 #endif
826 default:
827 g_assert_not_reached();
830 *val = misa | env->misa_ext;
831 return RISCV_EXCP_NONE;
834 static RISCVException write_misa(CPURISCVState *env, int csrno,
835 target_ulong val)
837 if (!riscv_feature(env, RISCV_FEATURE_MISA)) {
838 /* drop write to misa */
839 return RISCV_EXCP_NONE;
842 /* 'I' or 'E' must be present */
843 if (!(val & (RVI | RVE))) {
844 /* It is not, drop write to misa */
845 return RISCV_EXCP_NONE;
848 /* 'E' excludes all other extensions */
849 if (val & RVE) {
850 /* when we support 'E' we can do "val = RVE;" however
851 * for now we just drop writes if 'E' is present.
853 return RISCV_EXCP_NONE;
857 * misa.MXL writes are not supported by QEMU.
858 * Drop writes to those bits.
861 /* Mask extensions that are not supported by this hart */
862 val &= env->misa_ext_mask;
864 /* Mask extensions that are not supported by QEMU */
865 val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV);
867 /* 'D' depends on 'F', so clear 'D' if 'F' is not present */
868 if ((val & RVD) && !(val & RVF)) {
869 val &= ~RVD;
872 /* Suppress 'C' if next instruction is not aligned
873 * TODO: this should check next_pc
875 if ((val & RVC) && (GETPC() & ~3) != 0) {
876 val &= ~RVC;
879 /* If nothing changed, do nothing. */
880 if (val == env->misa_ext) {
881 return RISCV_EXCP_NONE;
884 if (!(val & RVF)) {
885 env->mstatus &= ~MSTATUS_FS;
888 /* flush translation cache */
889 tb_flush(env_cpu(env));
890 env->misa_ext = val;
891 env->xl = riscv_cpu_mxl(env);
892 return RISCV_EXCP_NONE;
895 static RISCVException read_medeleg(CPURISCVState *env, int csrno,
896 target_ulong *val)
898 *val = env->medeleg;
899 return RISCV_EXCP_NONE;
902 static RISCVException write_medeleg(CPURISCVState *env, int csrno,
903 target_ulong val)
905 env->medeleg = (env->medeleg & ~DELEGABLE_EXCPS) | (val & DELEGABLE_EXCPS);
906 return RISCV_EXCP_NONE;
909 static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno,
910 uint64_t *ret_val,
911 uint64_t new_val, uint64_t wr_mask)
913 uint64_t mask = wr_mask & delegable_ints;
915 if (ret_val) {
916 *ret_val = env->mideleg;
919 env->mideleg = (env->mideleg & ~mask) | (new_val & mask);
921 if (riscv_has_ext(env, RVH)) {
922 env->mideleg |= HS_MODE_INTERRUPTS;
925 return RISCV_EXCP_NONE;
928 static RISCVException rmw_mideleg(CPURISCVState *env, int csrno,
929 target_ulong *ret_val,
930 target_ulong new_val, target_ulong wr_mask)
932 uint64_t rval;
933 RISCVException ret;
935 ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask);
936 if (ret_val) {
937 *ret_val = rval;
940 return ret;
943 static RISCVException rmw_midelegh(CPURISCVState *env, int csrno,
944 target_ulong *ret_val,
945 target_ulong new_val,
946 target_ulong wr_mask)
948 uint64_t rval;
949 RISCVException ret;
951 ret = rmw_mideleg64(env, csrno, &rval,
952 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
953 if (ret_val) {
954 *ret_val = rval >> 32;
957 return ret;
960 static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
961 uint64_t *ret_val,
962 uint64_t new_val, uint64_t wr_mask)
964 uint64_t mask = wr_mask & all_ints;
966 if (ret_val) {
967 *ret_val = env->mie;
970 env->mie = (env->mie & ~mask) | (new_val & mask);
972 if (!riscv_has_ext(env, RVH)) {
973 env->mie &= ~((uint64_t)MIP_SGEIP);
976 return RISCV_EXCP_NONE;
979 static RISCVException rmw_mie(CPURISCVState *env, int csrno,
980 target_ulong *ret_val,
981 target_ulong new_val, target_ulong wr_mask)
983 uint64_t rval;
984 RISCVException ret;
986 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask);
987 if (ret_val) {
988 *ret_val = rval;
991 return ret;
994 static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
995 target_ulong *ret_val,
996 target_ulong new_val, target_ulong wr_mask)
998 uint64_t rval;
999 RISCVException ret;
1001 ret = rmw_mie64(env, csrno, &rval,
1002 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1003 if (ret_val) {
1004 *ret_val = rval >> 32;
1007 return ret;
1010 static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
1012 int irq;
1013 uint8_t iprio;
1015 irq = riscv_cpu_mirq_pending(env);
1016 if (irq <= 0 || irq > 63) {
1017 *val = 0;
1018 } else {
1019 iprio = env->miprio[irq];
1020 if (!iprio) {
1021 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) {
1022 iprio = IPRIO_MMAXIPRIO;
1025 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
1026 *val |= iprio;
1029 return RISCV_EXCP_NONE;
1032 static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno)
1034 if (!riscv_cpu_virt_enabled(env)) {
1035 return csrno;
1038 switch (csrno) {
1039 case CSR_SISELECT:
1040 return CSR_VSISELECT;
1041 case CSR_SIREG:
1042 return CSR_VSIREG;
1043 case CSR_SSETEIPNUM:
1044 return CSR_VSSETEIPNUM;
1045 case CSR_SCLREIPNUM:
1046 return CSR_VSCLREIPNUM;
1047 case CSR_SSETEIENUM:
1048 return CSR_VSSETEIENUM;
1049 case CSR_SCLREIENUM:
1050 return CSR_VSCLREIENUM;
1051 case CSR_STOPEI:
1052 return CSR_VSTOPEI;
1053 default:
1054 return csrno;
1058 static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val,
1059 target_ulong new_val, target_ulong wr_mask)
1061 target_ulong *iselect;
1063 /* Translate CSR number for VS-mode */
1064 csrno = aia_xlate_vs_csrno(env, csrno);
1066 /* Find the iselect CSR based on CSR number */
1067 switch (csrno) {
1068 case CSR_MISELECT:
1069 iselect = &env->miselect;
1070 break;
1071 case CSR_SISELECT:
1072 iselect = &env->siselect;
1073 break;
1074 case CSR_VSISELECT:
1075 iselect = &env->vsiselect;
1076 break;
1077 default:
1078 return RISCV_EXCP_ILLEGAL_INST;
1081 if (val) {
1082 *val = *iselect;
1085 wr_mask &= ISELECT_MASK;
1086 if (wr_mask) {
1087 *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask);
1090 return RISCV_EXCP_NONE;
1093 static int rmw_iprio(target_ulong xlen,
1094 target_ulong iselect, uint8_t *iprio,
1095 target_ulong *val, target_ulong new_val,
1096 target_ulong wr_mask, int ext_irq_no)
1098 int i, firq, nirqs;
1099 target_ulong old_val;
1101 if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) {
1102 return -EINVAL;
1104 if (xlen != 32 && iselect & 0x1) {
1105 return -EINVAL;
1108 nirqs = 4 * (xlen / 32);
1109 firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs);
1111 old_val = 0;
1112 for (i = 0; i < nirqs; i++) {
1113 old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i);
1116 if (val) {
1117 *val = old_val;
1120 if (wr_mask) {
1121 new_val = (old_val & ~wr_mask) | (new_val & wr_mask);
1122 for (i = 0; i < nirqs; i++) {
1124 * M-level and S-level external IRQ priority always read-only
1125 * zero. This means default priority order is always preferred
1126 * for M-level and S-level external IRQs.
1128 if ((firq + i) == ext_irq_no) {
1129 continue;
1131 iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff;
1135 return 0;
1138 static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
1139 target_ulong new_val, target_ulong wr_mask)
1141 bool virt;
1142 uint8_t *iprio;
1143 int ret = -EINVAL;
1144 target_ulong priv, isel, vgein;
1146 /* Translate CSR number for VS-mode */
1147 csrno = aia_xlate_vs_csrno(env, csrno);
1149 /* Decode register details from CSR number */
1150 virt = false;
1151 switch (csrno) {
1152 case CSR_MIREG:
1153 iprio = env->miprio;
1154 isel = env->miselect;
1155 priv = PRV_M;
1156 break;
1157 case CSR_SIREG:
1158 iprio = env->siprio;
1159 isel = env->siselect;
1160 priv = PRV_S;
1161 break;
1162 case CSR_VSIREG:
1163 iprio = env->hviprio;
1164 isel = env->vsiselect;
1165 priv = PRV_S;
1166 virt = true;
1167 break;
1168 default:
1169 goto done;
1172 /* Find the selected guest interrupt file */
1173 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1175 if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) {
1176 /* Local interrupt priority registers not available for VS-mode */
1177 if (!virt) {
1178 ret = rmw_iprio(riscv_cpu_mxl_bits(env),
1179 isel, iprio, val, new_val, wr_mask,
1180 (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT);
1182 } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) {
1183 /* IMSIC registers only available when machine implements it. */
1184 if (env->aia_ireg_rmw_fn[priv]) {
1185 /* Selected guest interrupt file should not be zero */
1186 if (virt && (!vgein || env->geilen < vgein)) {
1187 goto done;
1189 /* Call machine specific IMSIC register emulation */
1190 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1191 AIA_MAKE_IREG(isel, priv, virt, vgein,
1192 riscv_cpu_mxl_bits(env)),
1193 val, new_val, wr_mask);
1197 done:
1198 if (ret) {
1199 return (riscv_cpu_virt_enabled(env) && virt) ?
1200 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1202 return RISCV_EXCP_NONE;
1205 static int rmw_xsetclreinum(CPURISCVState *env, int csrno, target_ulong *val,
1206 target_ulong new_val, target_ulong wr_mask)
1208 int ret = -EINVAL;
1209 bool set, pend, virt;
1210 target_ulong priv, isel, vgein, xlen, nval, wmask;
1212 /* Translate CSR number for VS-mode */
1213 csrno = aia_xlate_vs_csrno(env, csrno);
1215 /* Decode register details from CSR number */
1216 virt = set = pend = false;
1217 switch (csrno) {
1218 case CSR_MSETEIPNUM:
1219 priv = PRV_M;
1220 set = true;
1221 pend = true;
1222 break;
1223 case CSR_MCLREIPNUM:
1224 priv = PRV_M;
1225 pend = true;
1226 break;
1227 case CSR_MSETEIENUM:
1228 priv = PRV_M;
1229 set = true;
1230 break;
1231 case CSR_MCLREIENUM:
1232 priv = PRV_M;
1233 break;
1234 case CSR_SSETEIPNUM:
1235 priv = PRV_S;
1236 set = true;
1237 pend = true;
1238 break;
1239 case CSR_SCLREIPNUM:
1240 priv = PRV_S;
1241 pend = true;
1242 break;
1243 case CSR_SSETEIENUM:
1244 priv = PRV_S;
1245 set = true;
1246 break;
1247 case CSR_SCLREIENUM:
1248 priv = PRV_S;
1249 break;
1250 case CSR_VSSETEIPNUM:
1251 priv = PRV_S;
1252 virt = true;
1253 set = true;
1254 pend = true;
1255 break;
1256 case CSR_VSCLREIPNUM:
1257 priv = PRV_S;
1258 virt = true;
1259 pend = true;
1260 break;
1261 case CSR_VSSETEIENUM:
1262 priv = PRV_S;
1263 virt = true;
1264 set = true;
1265 break;
1266 case CSR_VSCLREIENUM:
1267 priv = PRV_S;
1268 virt = true;
1269 break;
1270 default:
1271 goto done;
1274 /* IMSIC CSRs only available when machine implements IMSIC. */
1275 if (!env->aia_ireg_rmw_fn[priv]) {
1276 goto done;
1279 /* Find the selected guest interrupt file */
1280 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1282 /* Selected guest interrupt file should be valid */
1283 if (virt && (!vgein || env->geilen < vgein)) {
1284 goto done;
1287 /* Set/Clear CSRs always read zero */
1288 if (val) {
1289 *val = 0;
1292 if (wr_mask) {
1293 /* Get interrupt number */
1294 new_val &= wr_mask;
1296 /* Find target interrupt pending/enable register */
1297 xlen = riscv_cpu_mxl_bits(env);
1298 isel = (new_val / xlen);
1299 isel *= (xlen / IMSIC_EIPx_BITS);
1300 isel += (pend) ? ISELECT_IMSIC_EIP0 : ISELECT_IMSIC_EIE0;
1302 /* Find the interrupt bit to be set/clear */
1303 wmask = ((target_ulong)1) << (new_val % xlen);
1304 nval = (set) ? wmask : 0;
1306 /* Call machine specific IMSIC register emulation */
1307 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1308 AIA_MAKE_IREG(isel, priv, virt,
1309 vgein, xlen),
1310 NULL, nval, wmask);
1311 } else {
1312 ret = 0;
1315 done:
1316 if (ret) {
1317 return (riscv_cpu_virt_enabled(env) && virt) ?
1318 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1320 return RISCV_EXCP_NONE;
1323 static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
1324 target_ulong new_val, target_ulong wr_mask)
1326 bool virt;
1327 int ret = -EINVAL;
1328 target_ulong priv, vgein;
1330 /* Translate CSR number for VS-mode */
1331 csrno = aia_xlate_vs_csrno(env, csrno);
1333 /* Decode register details from CSR number */
1334 virt = false;
1335 switch (csrno) {
1336 case CSR_MTOPEI:
1337 priv = PRV_M;
1338 break;
1339 case CSR_STOPEI:
1340 priv = PRV_S;
1341 break;
1342 case CSR_VSTOPEI:
1343 priv = PRV_S;
1344 virt = true;
1345 break;
1346 default:
1347 goto done;
1350 /* IMSIC CSRs only available when machine implements IMSIC. */
1351 if (!env->aia_ireg_rmw_fn[priv]) {
1352 goto done;
1355 /* Find the selected guest interrupt file */
1356 vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0;
1358 /* Selected guest interrupt file should be valid */
1359 if (virt && (!vgein || env->geilen < vgein)) {
1360 goto done;
1363 /* Call machine specific IMSIC register emulation for TOPEI */
1364 ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv],
1365 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein,
1366 riscv_cpu_mxl_bits(env)),
1367 val, new_val, wr_mask);
1369 done:
1370 if (ret) {
1371 return (riscv_cpu_virt_enabled(env) && virt) ?
1372 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
1374 return RISCV_EXCP_NONE;
1377 static RISCVException read_mtvec(CPURISCVState *env, int csrno,
1378 target_ulong *val)
1380 *val = env->mtvec;
1381 return RISCV_EXCP_NONE;
1384 static RISCVException write_mtvec(CPURISCVState *env, int csrno,
1385 target_ulong val)
1387 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1388 if ((val & 3) < 2) {
1389 env->mtvec = val;
1390 } else {
1391 qemu_log_mask(LOG_UNIMP, "CSR_MTVEC: reserved mode not supported\n");
1393 return RISCV_EXCP_NONE;
1396 static RISCVException read_mcounteren(CPURISCVState *env, int csrno,
1397 target_ulong *val)
1399 *val = env->mcounteren;
1400 return RISCV_EXCP_NONE;
1403 static RISCVException write_mcounteren(CPURISCVState *env, int csrno,
1404 target_ulong val)
1406 env->mcounteren = val;
1407 return RISCV_EXCP_NONE;
1410 /* Machine Trap Handling */
1411 static RISCVException read_mscratch_i128(CPURISCVState *env, int csrno,
1412 Int128 *val)
1414 *val = int128_make128(env->mscratch, env->mscratchh);
1415 return RISCV_EXCP_NONE;
1418 static RISCVException write_mscratch_i128(CPURISCVState *env, int csrno,
1419 Int128 val)
1421 env->mscratch = int128_getlo(val);
1422 env->mscratchh = int128_gethi(val);
1423 return RISCV_EXCP_NONE;
1426 static RISCVException read_mscratch(CPURISCVState *env, int csrno,
1427 target_ulong *val)
1429 *val = env->mscratch;
1430 return RISCV_EXCP_NONE;
1433 static RISCVException write_mscratch(CPURISCVState *env, int csrno,
1434 target_ulong val)
1436 env->mscratch = val;
1437 return RISCV_EXCP_NONE;
1440 static RISCVException read_mepc(CPURISCVState *env, int csrno,
1441 target_ulong *val)
1443 *val = env->mepc;
1444 return RISCV_EXCP_NONE;
1447 static RISCVException write_mepc(CPURISCVState *env, int csrno,
1448 target_ulong val)
1450 env->mepc = val;
1451 return RISCV_EXCP_NONE;
1454 static RISCVException read_mcause(CPURISCVState *env, int csrno,
1455 target_ulong *val)
1457 *val = env->mcause;
1458 return RISCV_EXCP_NONE;
1461 static RISCVException write_mcause(CPURISCVState *env, int csrno,
1462 target_ulong val)
1464 env->mcause = val;
1465 return RISCV_EXCP_NONE;
1468 static RISCVException read_mtval(CPURISCVState *env, int csrno,
1469 target_ulong *val)
1471 *val = env->mtval;
1472 return RISCV_EXCP_NONE;
1475 static RISCVException write_mtval(CPURISCVState *env, int csrno,
1476 target_ulong val)
1478 env->mtval = val;
1479 return RISCV_EXCP_NONE;
1482 /* Execution environment configuration setup */
1483 static RISCVException read_menvcfg(CPURISCVState *env, int csrno,
1484 target_ulong *val)
1486 *val = env->menvcfg;
1487 return RISCV_EXCP_NONE;
1490 static RISCVException write_menvcfg(CPURISCVState *env, int csrno,
1491 target_ulong val)
1493 uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE;
1495 if (riscv_cpu_mxl(env) == MXL_RV64) {
1496 mask |= MENVCFG_PBMTE | MENVCFG_STCE;
1498 env->menvcfg = (env->menvcfg & ~mask) | (val & mask);
1500 return RISCV_EXCP_NONE;
1503 static RISCVException read_menvcfgh(CPURISCVState *env, int csrno,
1504 target_ulong *val)
1506 *val = env->menvcfg >> 32;
1507 return RISCV_EXCP_NONE;
1510 static RISCVException write_menvcfgh(CPURISCVState *env, int csrno,
1511 target_ulong val)
1513 uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE;
1514 uint64_t valh = (uint64_t)val << 32;
1516 env->menvcfg = (env->menvcfg & ~mask) | (valh & mask);
1518 return RISCV_EXCP_NONE;
1521 static RISCVException read_senvcfg(CPURISCVState *env, int csrno,
1522 target_ulong *val)
1524 *val = env->senvcfg;
1525 return RISCV_EXCP_NONE;
1528 static RISCVException write_senvcfg(CPURISCVState *env, int csrno,
1529 target_ulong val)
1531 uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE;
1533 env->senvcfg = (env->senvcfg & ~mask) | (val & mask);
1535 return RISCV_EXCP_NONE;
1538 static RISCVException read_henvcfg(CPURISCVState *env, int csrno,
1539 target_ulong *val)
1541 *val = env->henvcfg;
1542 return RISCV_EXCP_NONE;
1545 static RISCVException write_henvcfg(CPURISCVState *env, int csrno,
1546 target_ulong val)
1548 uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE;
1550 if (riscv_cpu_mxl(env) == MXL_RV64) {
1551 mask |= HENVCFG_PBMTE | HENVCFG_STCE;
1554 env->henvcfg = (env->henvcfg & ~mask) | (val & mask);
1556 return RISCV_EXCP_NONE;
1559 static RISCVException read_henvcfgh(CPURISCVState *env, int csrno,
1560 target_ulong *val)
1562 *val = env->henvcfg >> 32;
1563 return RISCV_EXCP_NONE;
1566 static RISCVException write_henvcfgh(CPURISCVState *env, int csrno,
1567 target_ulong val)
1569 uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE;
1570 uint64_t valh = (uint64_t)val << 32;
1572 env->henvcfg = (env->henvcfg & ~mask) | (valh & mask);
1574 return RISCV_EXCP_NONE;
1577 static RISCVException rmw_mip64(CPURISCVState *env, int csrno,
1578 uint64_t *ret_val,
1579 uint64_t new_val, uint64_t wr_mask)
1581 RISCVCPU *cpu = env_archcpu(env);
1582 uint64_t old_mip, mask = wr_mask & delegable_ints;
1583 uint32_t gin;
1585 if (mask & MIP_SEIP) {
1586 env->software_seip = new_val & MIP_SEIP;
1587 new_val |= env->external_seip * MIP_SEIP;
1590 if (mask) {
1591 old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask));
1592 } else {
1593 old_mip = env->mip;
1596 if (csrno != CSR_HVIP) {
1597 gin = get_field(env->hstatus, HSTATUS_VGEIN);
1598 old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0;
1601 if (ret_val) {
1602 *ret_val = old_mip;
1605 return RISCV_EXCP_NONE;
1608 static RISCVException rmw_mip(CPURISCVState *env, int csrno,
1609 target_ulong *ret_val,
1610 target_ulong new_val, target_ulong wr_mask)
1612 uint64_t rval;
1613 RISCVException ret;
1615 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask);
1616 if (ret_val) {
1617 *ret_val = rval;
1620 return ret;
1623 static RISCVException rmw_miph(CPURISCVState *env, int csrno,
1624 target_ulong *ret_val,
1625 target_ulong new_val, target_ulong wr_mask)
1627 uint64_t rval;
1628 RISCVException ret;
1630 ret = rmw_mip64(env, csrno, &rval,
1631 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1632 if (ret_val) {
1633 *ret_val = rval >> 32;
1636 return ret;
1639 /* Supervisor Trap Setup */
1640 static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
1641 Int128 *val)
1643 uint64_t mask = sstatus_v1_10_mask;
1644 uint64_t sstatus = env->mstatus & mask;
1645 if (env->xl != MXL_RV32 || env->debugger) {
1646 mask |= SSTATUS64_UXL;
1649 *val = int128_make128(sstatus, add_status_sd(MXL_RV128, sstatus));
1650 return RISCV_EXCP_NONE;
1653 static RISCVException read_sstatus(CPURISCVState *env, int csrno,
1654 target_ulong *val)
1656 target_ulong mask = (sstatus_v1_10_mask);
1657 if (env->xl != MXL_RV32 || env->debugger) {
1658 mask |= SSTATUS64_UXL;
1660 /* TODO: Use SXL not MXL. */
1661 *val = add_status_sd(riscv_cpu_mxl(env), env->mstatus & mask);
1662 return RISCV_EXCP_NONE;
1665 static RISCVException write_sstatus(CPURISCVState *env, int csrno,
1666 target_ulong val)
1668 target_ulong mask = (sstatus_v1_10_mask);
1670 if (env->xl != MXL_RV32 || env->debugger) {
1671 if ((val & SSTATUS64_UXL) != 0) {
1672 mask |= SSTATUS64_UXL;
1675 target_ulong newval = (env->mstatus & ~mask) | (val & mask);
1676 return write_mstatus(env, CSR_MSTATUS, newval);
1679 static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
1680 uint64_t *ret_val,
1681 uint64_t new_val, uint64_t wr_mask)
1683 RISCVException ret;
1684 uint64_t rval, vsbits, mask = env->hideleg & VS_MODE_INTERRUPTS;
1686 /* Bring VS-level bits to correct position */
1687 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
1688 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
1689 new_val |= vsbits << 1;
1690 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
1691 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
1692 wr_mask |= vsbits << 1;
1694 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
1695 if (ret_val) {
1696 rval &= mask;
1697 vsbits = rval & VS_MODE_INTERRUPTS;
1698 rval &= ~VS_MODE_INTERRUPTS;
1699 *ret_val = rval | (vsbits >> 1);
1702 return ret;
1705 static RISCVException rmw_vsie(CPURISCVState *env, int csrno,
1706 target_ulong *ret_val,
1707 target_ulong new_val, target_ulong wr_mask)
1709 uint64_t rval;
1710 RISCVException ret;
1712 ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask);
1713 if (ret_val) {
1714 *ret_val = rval;
1717 return ret;
1720 static RISCVException rmw_vsieh(CPURISCVState *env, int csrno,
1721 target_ulong *ret_val,
1722 target_ulong new_val, target_ulong wr_mask)
1724 uint64_t rval;
1725 RISCVException ret;
1727 ret = rmw_vsie64(env, csrno, &rval,
1728 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1729 if (ret_val) {
1730 *ret_val = rval >> 32;
1733 return ret;
1736 static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
1737 uint64_t *ret_val,
1738 uint64_t new_val, uint64_t wr_mask)
1740 RISCVException ret;
1741 uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
1743 if (riscv_cpu_virt_enabled(env)) {
1744 if (env->hvictl & HVICTL_VTI) {
1745 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1747 ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
1748 } else {
1749 ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
1752 if (ret_val) {
1753 *ret_val &= mask;
1756 return ret;
1759 static RISCVException rmw_sie(CPURISCVState *env, int csrno,
1760 target_ulong *ret_val,
1761 target_ulong new_val, target_ulong wr_mask)
1763 uint64_t rval;
1764 RISCVException ret;
1766 ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask);
1767 if (ret == RISCV_EXCP_NONE && ret_val) {
1768 *ret_val = rval;
1771 return ret;
1774 static RISCVException rmw_sieh(CPURISCVState *env, int csrno,
1775 target_ulong *ret_val,
1776 target_ulong new_val, target_ulong wr_mask)
1778 uint64_t rval;
1779 RISCVException ret;
1781 ret = rmw_sie64(env, csrno, &rval,
1782 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1783 if (ret_val) {
1784 *ret_val = rval >> 32;
1787 return ret;
1790 static RISCVException read_stvec(CPURISCVState *env, int csrno,
1791 target_ulong *val)
1793 *val = env->stvec;
1794 return RISCV_EXCP_NONE;
1797 static RISCVException write_stvec(CPURISCVState *env, int csrno,
1798 target_ulong val)
1800 /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
1801 if ((val & 3) < 2) {
1802 env->stvec = val;
1803 } else {
1804 qemu_log_mask(LOG_UNIMP, "CSR_STVEC: reserved mode not supported\n");
1806 return RISCV_EXCP_NONE;
1809 static RISCVException read_scounteren(CPURISCVState *env, int csrno,
1810 target_ulong *val)
1812 *val = env->scounteren;
1813 return RISCV_EXCP_NONE;
1816 static RISCVException write_scounteren(CPURISCVState *env, int csrno,
1817 target_ulong val)
1819 env->scounteren = val;
1820 return RISCV_EXCP_NONE;
1823 /* Supervisor Trap Handling */
1824 static RISCVException read_sscratch_i128(CPURISCVState *env, int csrno,
1825 Int128 *val)
1827 *val = int128_make128(env->sscratch, env->sscratchh);
1828 return RISCV_EXCP_NONE;
1831 static RISCVException write_sscratch_i128(CPURISCVState *env, int csrno,
1832 Int128 val)
1834 env->sscratch = int128_getlo(val);
1835 env->sscratchh = int128_gethi(val);
1836 return RISCV_EXCP_NONE;
1839 static RISCVException read_sscratch(CPURISCVState *env, int csrno,
1840 target_ulong *val)
1842 *val = env->sscratch;
1843 return RISCV_EXCP_NONE;
1846 static RISCVException write_sscratch(CPURISCVState *env, int csrno,
1847 target_ulong val)
1849 env->sscratch = val;
1850 return RISCV_EXCP_NONE;
1853 static RISCVException read_sepc(CPURISCVState *env, int csrno,
1854 target_ulong *val)
1856 *val = env->sepc;
1857 return RISCV_EXCP_NONE;
1860 static RISCVException write_sepc(CPURISCVState *env, int csrno,
1861 target_ulong val)
1863 env->sepc = val;
1864 return RISCV_EXCP_NONE;
1867 static RISCVException read_scause(CPURISCVState *env, int csrno,
1868 target_ulong *val)
1870 *val = env->scause;
1871 return RISCV_EXCP_NONE;
1874 static RISCVException write_scause(CPURISCVState *env, int csrno,
1875 target_ulong val)
1877 env->scause = val;
1878 return RISCV_EXCP_NONE;
1881 static RISCVException read_stval(CPURISCVState *env, int csrno,
1882 target_ulong *val)
1884 *val = env->stval;
1885 return RISCV_EXCP_NONE;
1888 static RISCVException write_stval(CPURISCVState *env, int csrno,
1889 target_ulong val)
1891 env->stval = val;
1892 return RISCV_EXCP_NONE;
1895 static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
1896 uint64_t *ret_val,
1897 uint64_t new_val, uint64_t wr_mask)
1899 RISCVException ret;
1900 uint64_t rval, vsbits, mask = env->hideleg & vsip_writable_mask;
1902 /* Bring VS-level bits to correct position */
1903 vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
1904 new_val &= ~(VS_MODE_INTERRUPTS >> 1);
1905 new_val |= vsbits << 1;
1906 vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
1907 wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
1908 wr_mask |= vsbits << 1;
1910 ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask & mask);
1911 if (ret_val) {
1912 rval &= mask;
1913 vsbits = rval & VS_MODE_INTERRUPTS;
1914 rval &= ~VS_MODE_INTERRUPTS;
1915 *ret_val = rval | (vsbits >> 1);
1918 return ret;
1921 static RISCVException rmw_vsip(CPURISCVState *env, int csrno,
1922 target_ulong *ret_val,
1923 target_ulong new_val, target_ulong wr_mask)
1925 uint64_t rval;
1926 RISCVException ret;
1928 ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask);
1929 if (ret_val) {
1930 *ret_val = rval;
1933 return ret;
1936 static RISCVException rmw_vsiph(CPURISCVState *env, int csrno,
1937 target_ulong *ret_val,
1938 target_ulong new_val, target_ulong wr_mask)
1940 uint64_t rval;
1941 RISCVException ret;
1943 ret = rmw_vsip64(env, csrno, &rval,
1944 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1945 if (ret_val) {
1946 *ret_val = rval >> 32;
1949 return ret;
1952 static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
1953 uint64_t *ret_val,
1954 uint64_t new_val, uint64_t wr_mask)
1956 RISCVException ret;
1957 uint64_t mask = env->mideleg & sip_writable_mask;
1959 if (riscv_cpu_virt_enabled(env)) {
1960 if (env->hvictl & HVICTL_VTI) {
1961 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
1963 ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
1964 } else {
1965 ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
1968 if (ret_val) {
1969 *ret_val &= env->mideleg & S_MODE_INTERRUPTS;
1972 return ret;
1975 static RISCVException rmw_sip(CPURISCVState *env, int csrno,
1976 target_ulong *ret_val,
1977 target_ulong new_val, target_ulong wr_mask)
1979 uint64_t rval;
1980 RISCVException ret;
1982 ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask);
1983 if (ret_val) {
1984 *ret_val = rval;
1987 return ret;
1990 static RISCVException rmw_siph(CPURISCVState *env, int csrno,
1991 target_ulong *ret_val,
1992 target_ulong new_val, target_ulong wr_mask)
1994 uint64_t rval;
1995 RISCVException ret;
1997 ret = rmw_sip64(env, csrno, &rval,
1998 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
1999 if (ret_val) {
2000 *ret_val = rval >> 32;
2003 return ret;
2006 /* Supervisor Protection and Translation */
2007 static RISCVException read_satp(CPURISCVState *env, int csrno,
2008 target_ulong *val)
2010 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2011 *val = 0;
2012 return RISCV_EXCP_NONE;
2015 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2016 return RISCV_EXCP_ILLEGAL_INST;
2017 } else {
2018 *val = env->satp;
2021 return RISCV_EXCP_NONE;
2024 static RISCVException write_satp(CPURISCVState *env, int csrno,
2025 target_ulong val)
2027 target_ulong vm, mask;
2029 if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
2030 return RISCV_EXCP_NONE;
2033 if (riscv_cpu_mxl(env) == MXL_RV32) {
2034 vm = validate_vm(env, get_field(val, SATP32_MODE));
2035 mask = (val ^ env->satp) & (SATP32_MODE | SATP32_ASID | SATP32_PPN);
2036 } else {
2037 vm = validate_vm(env, get_field(val, SATP64_MODE));
2038 mask = (val ^ env->satp) & (SATP64_MODE | SATP64_ASID | SATP64_PPN);
2041 if (vm && mask) {
2042 if (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_TVM)) {
2043 return RISCV_EXCP_ILLEGAL_INST;
2044 } else {
2046 * The ISA defines SATP.MODE=Bare as "no translation", but we still
2047 * pass these through QEMU's TLB emulation as it improves
2048 * performance. Flushing the TLB on SATP writes with paging
2049 * enabled avoids leaking those invalid cached mappings.
2051 tlb_flush(env_cpu(env));
2052 env->satp = val;
2055 return RISCV_EXCP_NONE;
2058 static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
2060 int irq, ret;
2061 target_ulong topei;
2062 uint64_t vseip, vsgein;
2063 uint32_t iid, iprio, hviid, hviprio, gein;
2064 uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS];
2066 gein = get_field(env->hstatus, HSTATUS_VGEIN);
2067 hviid = get_field(env->hvictl, HVICTL_IID);
2068 hviprio = get_field(env->hvictl, HVICTL_IPRIO);
2070 if (gein) {
2071 vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
2072 vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP;
2073 if (gein <= env->geilen && vseip) {
2074 siid[scount] = IRQ_S_EXT;
2075 siprio[scount] = IPRIO_MMAXIPRIO + 1;
2076 if (env->aia_ireg_rmw_fn[PRV_S]) {
2078 * Call machine specific IMSIC register emulation for
2079 * reading TOPEI.
2081 ret = env->aia_ireg_rmw_fn[PRV_S](
2082 env->aia_ireg_rmw_fn_arg[PRV_S],
2083 AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein,
2084 riscv_cpu_mxl_bits(env)),
2085 &topei, 0, 0);
2086 if (!ret && topei) {
2087 siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK;
2090 scount++;
2092 } else {
2093 if (hviid == IRQ_S_EXT && hviprio) {
2094 siid[scount] = IRQ_S_EXT;
2095 siprio[scount] = hviprio;
2096 scount++;
2100 if (env->hvictl & HVICTL_VTI) {
2101 if (hviid != IRQ_S_EXT) {
2102 siid[scount] = hviid;
2103 siprio[scount] = hviprio;
2104 scount++;
2106 } else {
2107 irq = riscv_cpu_vsirq_pending(env);
2108 if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) {
2109 siid[scount] = irq;
2110 siprio[scount] = env->hviprio[irq];
2111 scount++;
2115 iid = 0;
2116 iprio = UINT_MAX;
2117 for (s = 0; s < scount; s++) {
2118 if (siprio[s] < iprio) {
2119 iid = siid[s];
2120 iprio = siprio[s];
2124 if (iid) {
2125 if (env->hvictl & HVICTL_IPRIOM) {
2126 if (iprio > IPRIO_MMAXIPRIO) {
2127 iprio = IPRIO_MMAXIPRIO;
2129 if (!iprio) {
2130 if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) {
2131 iprio = IPRIO_MMAXIPRIO;
2134 } else {
2135 iprio = 1;
2137 } else {
2138 iprio = 0;
2141 *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2142 *val |= iprio;
2143 return RISCV_EXCP_NONE;
2146 static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val)
2148 int irq;
2149 uint8_t iprio;
2151 if (riscv_cpu_virt_enabled(env)) {
2152 return read_vstopi(env, CSR_VSTOPI, val);
2155 irq = riscv_cpu_sirq_pending(env);
2156 if (irq <= 0 || irq > 63) {
2157 *val = 0;
2158 } else {
2159 iprio = env->siprio[irq];
2160 if (!iprio) {
2161 if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) {
2162 iprio = IPRIO_MMAXIPRIO;
2165 *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT;
2166 *val |= iprio;
2169 return RISCV_EXCP_NONE;
2172 /* Hypervisor Extensions */
2173 static RISCVException read_hstatus(CPURISCVState *env, int csrno,
2174 target_ulong *val)
2176 *val = env->hstatus;
2177 if (riscv_cpu_mxl(env) != MXL_RV32) {
2178 /* We only support 64-bit VSXL */
2179 *val = set_field(*val, HSTATUS_VSXL, 2);
2181 /* We only support little endian */
2182 *val = set_field(*val, HSTATUS_VSBE, 0);
2183 return RISCV_EXCP_NONE;
2186 static RISCVException write_hstatus(CPURISCVState *env, int csrno,
2187 target_ulong val)
2189 env->hstatus = val;
2190 if (riscv_cpu_mxl(env) != MXL_RV32 && get_field(val, HSTATUS_VSXL) != 2) {
2191 qemu_log_mask(LOG_UNIMP, "QEMU does not support mixed HSXLEN options.");
2193 if (get_field(val, HSTATUS_VSBE) != 0) {
2194 qemu_log_mask(LOG_UNIMP, "QEMU does not support big endian guests.");
2196 return RISCV_EXCP_NONE;
2199 static RISCVException read_hedeleg(CPURISCVState *env, int csrno,
2200 target_ulong *val)
2202 *val = env->hedeleg;
2203 return RISCV_EXCP_NONE;
2206 static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
2207 target_ulong val)
2209 env->hedeleg = val & vs_delegable_excps;
2210 return RISCV_EXCP_NONE;
2213 static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
2214 uint64_t *ret_val,
2215 uint64_t new_val, uint64_t wr_mask)
2217 uint64_t mask = wr_mask & vs_delegable_ints;
2219 if (ret_val) {
2220 *ret_val = env->hideleg & vs_delegable_ints;
2223 env->hideleg = (env->hideleg & ~mask) | (new_val & mask);
2224 return RISCV_EXCP_NONE;
2227 static RISCVException rmw_hideleg(CPURISCVState *env, int csrno,
2228 target_ulong *ret_val,
2229 target_ulong new_val, target_ulong wr_mask)
2231 uint64_t rval;
2232 RISCVException ret;
2234 ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask);
2235 if (ret_val) {
2236 *ret_val = rval;
2239 return ret;
2242 static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
2243 target_ulong *ret_val,
2244 target_ulong new_val, target_ulong wr_mask)
2246 uint64_t rval;
2247 RISCVException ret;
2249 ret = rmw_hideleg64(env, csrno, &rval,
2250 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2251 if (ret_val) {
2252 *ret_val = rval >> 32;
2255 return ret;
2258 static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
2259 uint64_t *ret_val,
2260 uint64_t new_val, uint64_t wr_mask)
2262 RISCVException ret;
2264 ret = rmw_mip64(env, csrno, ret_val, new_val,
2265 wr_mask & hvip_writable_mask);
2266 if (ret_val) {
2267 *ret_val &= VS_MODE_INTERRUPTS;
2270 return ret;
2273 static RISCVException rmw_hvip(CPURISCVState *env, int csrno,
2274 target_ulong *ret_val,
2275 target_ulong new_val, target_ulong wr_mask)
2277 uint64_t rval;
2278 RISCVException ret;
2280 ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask);
2281 if (ret_val) {
2282 *ret_val = rval;
2285 return ret;
2288 static RISCVException rmw_hviph(CPURISCVState *env, int csrno,
2289 target_ulong *ret_val,
2290 target_ulong new_val, target_ulong wr_mask)
2292 uint64_t rval;
2293 RISCVException ret;
2295 ret = rmw_hvip64(env, csrno, &rval,
2296 ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
2297 if (ret_val) {
2298 *ret_val = rval >> 32;
2301 return ret;
2304 static RISCVException rmw_hip(CPURISCVState *env, int csrno,
2305 target_ulong *ret_value,
2306 target_ulong new_value, target_ulong write_mask)
2308 int ret = rmw_mip(env, csrno, ret_value, new_value,
2309 write_mask & hip_writable_mask);
2311 if (ret_value) {
2312 *ret_value &= HS_MODE_INTERRUPTS;
2314 return ret;
2317 static RISCVException rmw_hie(CPURISCVState *env, int csrno,
2318 target_ulong *ret_val,
2319 target_ulong new_val, target_ulong wr_mask)
2321 uint64_t rval;
2322 RISCVException ret;
2324 ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS);
2325 if (ret_val) {
2326 *ret_val = rval & HS_MODE_INTERRUPTS;
2329 return ret;
2332 static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
2333 target_ulong *val)
2335 *val = env->hcounteren;
2336 return RISCV_EXCP_NONE;
2339 static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
2340 target_ulong val)
2342 env->hcounteren = val;
2343 return RISCV_EXCP_NONE;
2346 static RISCVException read_hgeie(CPURISCVState *env, int csrno,
2347 target_ulong *val)
2349 if (val) {
2350 *val = env->hgeie;
2352 return RISCV_EXCP_NONE;
2355 static RISCVException write_hgeie(CPURISCVState *env, int csrno,
2356 target_ulong val)
2358 /* Only GEILEN:1 bits implemented and BIT0 is never implemented */
2359 val &= ((((target_ulong)1) << env->geilen) - 1) << 1;
2360 env->hgeie = val;
2361 /* Update mip.SGEIP bit */
2362 riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP,
2363 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
2364 return RISCV_EXCP_NONE;
2367 static RISCVException read_htval(CPURISCVState *env, int csrno,
2368 target_ulong *val)
2370 *val = env->htval;
2371 return RISCV_EXCP_NONE;
2374 static RISCVException write_htval(CPURISCVState *env, int csrno,
2375 target_ulong val)
2377 env->htval = val;
2378 return RISCV_EXCP_NONE;
2381 static RISCVException read_htinst(CPURISCVState *env, int csrno,
2382 target_ulong *val)
2384 *val = env->htinst;
2385 return RISCV_EXCP_NONE;
2388 static RISCVException write_htinst(CPURISCVState *env, int csrno,
2389 target_ulong val)
2391 return RISCV_EXCP_NONE;
2394 static RISCVException read_hgeip(CPURISCVState *env, int csrno,
2395 target_ulong *val)
2397 if (val) {
2398 *val = env->hgeip;
2400 return RISCV_EXCP_NONE;
2403 static RISCVException read_hgatp(CPURISCVState *env, int csrno,
2404 target_ulong *val)
2406 *val = env->hgatp;
2407 return RISCV_EXCP_NONE;
2410 static RISCVException write_hgatp(CPURISCVState *env, int csrno,
2411 target_ulong val)
2413 env->hgatp = val;
2414 return RISCV_EXCP_NONE;
2417 static RISCVException read_htimedelta(CPURISCVState *env, int csrno,
2418 target_ulong *val)
2420 if (!env->rdtime_fn) {
2421 return RISCV_EXCP_ILLEGAL_INST;
2424 *val = env->htimedelta;
2425 return RISCV_EXCP_NONE;
2428 static RISCVException write_htimedelta(CPURISCVState *env, int csrno,
2429 target_ulong val)
2431 if (!env->rdtime_fn) {
2432 return RISCV_EXCP_ILLEGAL_INST;
2435 if (riscv_cpu_mxl(env) == MXL_RV32) {
2436 env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
2437 } else {
2438 env->htimedelta = val;
2440 return RISCV_EXCP_NONE;
2443 static RISCVException read_htimedeltah(CPURISCVState *env, int csrno,
2444 target_ulong *val)
2446 if (!env->rdtime_fn) {
2447 return RISCV_EXCP_ILLEGAL_INST;
2450 *val = env->htimedelta >> 32;
2451 return RISCV_EXCP_NONE;
2454 static RISCVException write_htimedeltah(CPURISCVState *env, int csrno,
2455 target_ulong val)
2457 if (!env->rdtime_fn) {
2458 return RISCV_EXCP_ILLEGAL_INST;
2461 env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
2462 return RISCV_EXCP_NONE;
2465 static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val)
2467 *val = env->hvictl;
2468 return RISCV_EXCP_NONE;
2471 static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val)
2473 env->hvictl = val & HVICTL_VALID_MASK;
2474 return RISCV_EXCP_NONE;
2477 static int read_hvipriox(CPURISCVState *env, int first_index,
2478 uint8_t *iprio, target_ulong *val)
2480 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
2482 /* First index has to be a multiple of number of irqs per register */
2483 if (first_index % num_irqs) {
2484 return (riscv_cpu_virt_enabled(env)) ?
2485 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2488 /* Fill-up return value */
2489 *val = 0;
2490 for (i = 0; i < num_irqs; i++) {
2491 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
2492 continue;
2494 if (rdzero) {
2495 continue;
2497 *val |= ((target_ulong)iprio[irq]) << (i * 8);
2500 return RISCV_EXCP_NONE;
2503 static int write_hvipriox(CPURISCVState *env, int first_index,
2504 uint8_t *iprio, target_ulong val)
2506 int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32);
2508 /* First index has to be a multiple of number of irqs per register */
2509 if (first_index % num_irqs) {
2510 return (riscv_cpu_virt_enabled(env)) ?
2511 RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST;
2514 /* Fill-up priority arrary */
2515 for (i = 0; i < num_irqs; i++) {
2516 if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) {
2517 continue;
2519 if (rdzero) {
2520 iprio[irq] = 0;
2521 } else {
2522 iprio[irq] = (val >> (i * 8)) & 0xff;
2526 return RISCV_EXCP_NONE;
2529 static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val)
2531 return read_hvipriox(env, 0, env->hviprio, val);
2534 static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val)
2536 return write_hvipriox(env, 0, env->hviprio, val);
2539 static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val)
2541 return read_hvipriox(env, 4, env->hviprio, val);
2544 static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val)
2546 return write_hvipriox(env, 4, env->hviprio, val);
2549 static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val)
2551 return read_hvipriox(env, 8, env->hviprio, val);
2554 static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val)
2556 return write_hvipriox(env, 8, env->hviprio, val);
2559 static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val)
2561 return read_hvipriox(env, 12, env->hviprio, val);
2564 static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val)
2566 return write_hvipriox(env, 12, env->hviprio, val);
2569 /* Virtual CSR Registers */
2570 static RISCVException read_vsstatus(CPURISCVState *env, int csrno,
2571 target_ulong *val)
2573 *val = env->vsstatus;
2574 return RISCV_EXCP_NONE;
2577 static RISCVException write_vsstatus(CPURISCVState *env, int csrno,
2578 target_ulong val)
2580 uint64_t mask = (target_ulong)-1;
2581 if ((val & VSSTATUS64_UXL) == 0) {
2582 mask &= ~VSSTATUS64_UXL;
2584 env->vsstatus = (env->vsstatus & ~mask) | (uint64_t)val;
2585 return RISCV_EXCP_NONE;
2588 static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
2590 *val = env->vstvec;
2591 return RISCV_EXCP_NONE;
2594 static RISCVException write_vstvec(CPURISCVState *env, int csrno,
2595 target_ulong val)
2597 env->vstvec = val;
2598 return RISCV_EXCP_NONE;
2601 static RISCVException read_vsscratch(CPURISCVState *env, int csrno,
2602 target_ulong *val)
2604 *val = env->vsscratch;
2605 return RISCV_EXCP_NONE;
2608 static RISCVException write_vsscratch(CPURISCVState *env, int csrno,
2609 target_ulong val)
2611 env->vsscratch = val;
2612 return RISCV_EXCP_NONE;
2615 static RISCVException read_vsepc(CPURISCVState *env, int csrno,
2616 target_ulong *val)
2618 *val = env->vsepc;
2619 return RISCV_EXCP_NONE;
2622 static RISCVException write_vsepc(CPURISCVState *env, int csrno,
2623 target_ulong val)
2625 env->vsepc = val;
2626 return RISCV_EXCP_NONE;
2629 static RISCVException read_vscause(CPURISCVState *env, int csrno,
2630 target_ulong *val)
2632 *val = env->vscause;
2633 return RISCV_EXCP_NONE;
2636 static RISCVException write_vscause(CPURISCVState *env, int csrno,
2637 target_ulong val)
2639 env->vscause = val;
2640 return RISCV_EXCP_NONE;
2643 static RISCVException read_vstval(CPURISCVState *env, int csrno,
2644 target_ulong *val)
2646 *val = env->vstval;
2647 return RISCV_EXCP_NONE;
2650 static RISCVException write_vstval(CPURISCVState *env, int csrno,
2651 target_ulong val)
2653 env->vstval = val;
2654 return RISCV_EXCP_NONE;
2657 static RISCVException read_vsatp(CPURISCVState *env, int csrno,
2658 target_ulong *val)
2660 *val = env->vsatp;
2661 return RISCV_EXCP_NONE;
2664 static RISCVException write_vsatp(CPURISCVState *env, int csrno,
2665 target_ulong val)
2667 env->vsatp = val;
2668 return RISCV_EXCP_NONE;
2671 static RISCVException read_mtval2(CPURISCVState *env, int csrno,
2672 target_ulong *val)
2674 *val = env->mtval2;
2675 return RISCV_EXCP_NONE;
2678 static RISCVException write_mtval2(CPURISCVState *env, int csrno,
2679 target_ulong val)
2681 env->mtval2 = val;
2682 return RISCV_EXCP_NONE;
2685 static RISCVException read_mtinst(CPURISCVState *env, int csrno,
2686 target_ulong *val)
2688 *val = env->mtinst;
2689 return RISCV_EXCP_NONE;
2692 static RISCVException write_mtinst(CPURISCVState *env, int csrno,
2693 target_ulong val)
2695 env->mtinst = val;
2696 return RISCV_EXCP_NONE;
2699 /* Physical Memory Protection */
2700 static RISCVException read_mseccfg(CPURISCVState *env, int csrno,
2701 target_ulong *val)
2703 *val = mseccfg_csr_read(env);
2704 return RISCV_EXCP_NONE;
2707 static RISCVException write_mseccfg(CPURISCVState *env, int csrno,
2708 target_ulong val)
2710 mseccfg_csr_write(env, val);
2711 return RISCV_EXCP_NONE;
2714 static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index)
2716 /* TODO: RV128 restriction check */
2717 if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) {
2718 return false;
2720 return true;
2723 static RISCVException read_pmpcfg(CPURISCVState *env, int csrno,
2724 target_ulong *val)
2726 uint32_t reg_index = csrno - CSR_PMPCFG0;
2728 if (!check_pmp_reg_index(env, reg_index)) {
2729 return RISCV_EXCP_ILLEGAL_INST;
2731 *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
2732 return RISCV_EXCP_NONE;
2735 static RISCVException write_pmpcfg(CPURISCVState *env, int csrno,
2736 target_ulong val)
2738 uint32_t reg_index = csrno - CSR_PMPCFG0;
2740 if (!check_pmp_reg_index(env, reg_index)) {
2741 return RISCV_EXCP_ILLEGAL_INST;
2743 pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val);
2744 return RISCV_EXCP_NONE;
2747 static RISCVException read_pmpaddr(CPURISCVState *env, int csrno,
2748 target_ulong *val)
2750 *val = pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
2751 return RISCV_EXCP_NONE;
2754 static RISCVException write_pmpaddr(CPURISCVState *env, int csrno,
2755 target_ulong val)
2757 pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val);
2758 return RISCV_EXCP_NONE;
2761 static RISCVException read_tselect(CPURISCVState *env, int csrno,
2762 target_ulong *val)
2764 *val = tselect_csr_read(env);
2765 return RISCV_EXCP_NONE;
2768 static RISCVException write_tselect(CPURISCVState *env, int csrno,
2769 target_ulong val)
2771 tselect_csr_write(env, val);
2772 return RISCV_EXCP_NONE;
2775 static RISCVException read_tdata(CPURISCVState *env, int csrno,
2776 target_ulong *val)
2778 /* return 0 in tdata1 to end the trigger enumeration */
2779 if (env->trigger_cur >= TRIGGER_NUM && csrno == CSR_TDATA1) {
2780 *val = 0;
2781 return RISCV_EXCP_NONE;
2784 if (!tdata_available(env, csrno - CSR_TDATA1)) {
2785 return RISCV_EXCP_ILLEGAL_INST;
2788 *val = tdata_csr_read(env, csrno - CSR_TDATA1);
2789 return RISCV_EXCP_NONE;
2792 static RISCVException write_tdata(CPURISCVState *env, int csrno,
2793 target_ulong val)
2795 if (!tdata_available(env, csrno - CSR_TDATA1)) {
2796 return RISCV_EXCP_ILLEGAL_INST;
2799 tdata_csr_write(env, csrno - CSR_TDATA1, val);
2800 return RISCV_EXCP_NONE;
2804 * Functions to access Pointer Masking feature registers
2805 * We have to check if current priv lvl could modify
2806 * csr in given mode
2808 static bool check_pm_current_disabled(CPURISCVState *env, int csrno)
2810 int csr_priv = get_field(csrno, 0x300);
2811 int pm_current;
2813 if (env->debugger) {
2814 return false;
2817 * If priv lvls differ that means we're accessing csr from higher priv lvl,
2818 * so allow the access
2820 if (env->priv != csr_priv) {
2821 return false;
2823 switch (env->priv) {
2824 case PRV_M:
2825 pm_current = get_field(env->mmte, M_PM_CURRENT);
2826 break;
2827 case PRV_S:
2828 pm_current = get_field(env->mmte, S_PM_CURRENT);
2829 break;
2830 case PRV_U:
2831 pm_current = get_field(env->mmte, U_PM_CURRENT);
2832 break;
2833 default:
2834 g_assert_not_reached();
2836 /* It's same priv lvl, so we allow to modify csr only if pm.current==1 */
2837 return !pm_current;
2840 static RISCVException read_mmte(CPURISCVState *env, int csrno,
2841 target_ulong *val)
2843 *val = env->mmte & MMTE_MASK;
2844 return RISCV_EXCP_NONE;
2847 static RISCVException write_mmte(CPURISCVState *env, int csrno,
2848 target_ulong val)
2850 uint64_t mstatus;
2851 target_ulong wpri_val = val & MMTE_MASK;
2853 if (val != wpri_val) {
2854 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2855 "MMTE: WPRI violation written 0x", val,
2856 "vs expected 0x", wpri_val);
2858 /* for machine mode pm.current is hardwired to 1 */
2859 wpri_val |= MMTE_M_PM_CURRENT;
2861 /* hardwiring pm.instruction bit to 0, since it's not supported yet */
2862 wpri_val &= ~(MMTE_M_PM_INSN | MMTE_S_PM_INSN | MMTE_U_PM_INSN);
2863 env->mmte = wpri_val | PM_EXT_DIRTY;
2864 riscv_cpu_update_mask(env);
2866 /* Set XS and SD bits, since PM CSRs are dirty */
2867 mstatus = env->mstatus | MSTATUS_XS;
2868 write_mstatus(env, csrno, mstatus);
2869 return RISCV_EXCP_NONE;
2872 static RISCVException read_smte(CPURISCVState *env, int csrno,
2873 target_ulong *val)
2875 *val = env->mmte & SMTE_MASK;
2876 return RISCV_EXCP_NONE;
2879 static RISCVException write_smte(CPURISCVState *env, int csrno,
2880 target_ulong val)
2882 target_ulong wpri_val = val & SMTE_MASK;
2884 if (val != wpri_val) {
2885 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2886 "SMTE: WPRI violation written 0x", val,
2887 "vs expected 0x", wpri_val);
2890 /* if pm.current==0 we can't modify current PM CSRs */
2891 if (check_pm_current_disabled(env, csrno)) {
2892 return RISCV_EXCP_NONE;
2895 wpri_val |= (env->mmte & ~SMTE_MASK);
2896 write_mmte(env, csrno, wpri_val);
2897 return RISCV_EXCP_NONE;
2900 static RISCVException read_umte(CPURISCVState *env, int csrno,
2901 target_ulong *val)
2903 *val = env->mmte & UMTE_MASK;
2904 return RISCV_EXCP_NONE;
2907 static RISCVException write_umte(CPURISCVState *env, int csrno,
2908 target_ulong val)
2910 target_ulong wpri_val = val & UMTE_MASK;
2912 if (val != wpri_val) {
2913 qemu_log_mask(LOG_GUEST_ERROR, "%s" TARGET_FMT_lx " %s" TARGET_FMT_lx "\n",
2914 "UMTE: WPRI violation written 0x", val,
2915 "vs expected 0x", wpri_val);
2918 if (check_pm_current_disabled(env, csrno)) {
2919 return RISCV_EXCP_NONE;
2922 wpri_val |= (env->mmte & ~UMTE_MASK);
2923 write_mmte(env, csrno, wpri_val);
2924 return RISCV_EXCP_NONE;
2927 static RISCVException read_mpmmask(CPURISCVState *env, int csrno,
2928 target_ulong *val)
2930 *val = env->mpmmask;
2931 return RISCV_EXCP_NONE;
2934 static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
2935 target_ulong val)
2937 uint64_t mstatus;
2939 env->mpmmask = val;
2940 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
2941 env->cur_pmmask = val;
2943 env->mmte |= PM_EXT_DIRTY;
2945 /* Set XS and SD bits, since PM CSRs are dirty */
2946 mstatus = env->mstatus | MSTATUS_XS;
2947 write_mstatus(env, csrno, mstatus);
2948 return RISCV_EXCP_NONE;
2951 static RISCVException read_spmmask(CPURISCVState *env, int csrno,
2952 target_ulong *val)
2954 *val = env->spmmask;
2955 return RISCV_EXCP_NONE;
2958 static RISCVException write_spmmask(CPURISCVState *env, int csrno,
2959 target_ulong val)
2961 uint64_t mstatus;
2963 /* if pm.current==0 we can't modify current PM CSRs */
2964 if (check_pm_current_disabled(env, csrno)) {
2965 return RISCV_EXCP_NONE;
2967 env->spmmask = val;
2968 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
2969 env->cur_pmmask = val;
2971 env->mmte |= PM_EXT_DIRTY;
2973 /* Set XS and SD bits, since PM CSRs are dirty */
2974 mstatus = env->mstatus | MSTATUS_XS;
2975 write_mstatus(env, csrno, mstatus);
2976 return RISCV_EXCP_NONE;
2979 static RISCVException read_upmmask(CPURISCVState *env, int csrno,
2980 target_ulong *val)
2982 *val = env->upmmask;
2983 return RISCV_EXCP_NONE;
2986 static RISCVException write_upmmask(CPURISCVState *env, int csrno,
2987 target_ulong val)
2989 uint64_t mstatus;
2991 /* if pm.current==0 we can't modify current PM CSRs */
2992 if (check_pm_current_disabled(env, csrno)) {
2993 return RISCV_EXCP_NONE;
2995 env->upmmask = val;
2996 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
2997 env->cur_pmmask = val;
2999 env->mmte |= PM_EXT_DIRTY;
3001 /* Set XS and SD bits, since PM CSRs are dirty */
3002 mstatus = env->mstatus | MSTATUS_XS;
3003 write_mstatus(env, csrno, mstatus);
3004 return RISCV_EXCP_NONE;
3007 static RISCVException read_mpmbase(CPURISCVState *env, int csrno,
3008 target_ulong *val)
3010 *val = env->mpmbase;
3011 return RISCV_EXCP_NONE;
3014 static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
3015 target_ulong val)
3017 uint64_t mstatus;
3019 env->mpmbase = val;
3020 if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
3021 env->cur_pmbase = val;
3023 env->mmte |= PM_EXT_DIRTY;
3025 /* Set XS and SD bits, since PM CSRs are dirty */
3026 mstatus = env->mstatus | MSTATUS_XS;
3027 write_mstatus(env, csrno, mstatus);
3028 return RISCV_EXCP_NONE;
3031 static RISCVException read_spmbase(CPURISCVState *env, int csrno,
3032 target_ulong *val)
3034 *val = env->spmbase;
3035 return RISCV_EXCP_NONE;
3038 static RISCVException write_spmbase(CPURISCVState *env, int csrno,
3039 target_ulong val)
3041 uint64_t mstatus;
3043 /* if pm.current==0 we can't modify current PM CSRs */
3044 if (check_pm_current_disabled(env, csrno)) {
3045 return RISCV_EXCP_NONE;
3047 env->spmbase = val;
3048 if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
3049 env->cur_pmbase = val;
3051 env->mmte |= PM_EXT_DIRTY;
3053 /* Set XS and SD bits, since PM CSRs are dirty */
3054 mstatus = env->mstatus | MSTATUS_XS;
3055 write_mstatus(env, csrno, mstatus);
3056 return RISCV_EXCP_NONE;
3059 static RISCVException read_upmbase(CPURISCVState *env, int csrno,
3060 target_ulong *val)
3062 *val = env->upmbase;
3063 return RISCV_EXCP_NONE;
3066 static RISCVException write_upmbase(CPURISCVState *env, int csrno,
3067 target_ulong val)
3069 uint64_t mstatus;
3071 /* if pm.current==0 we can't modify current PM CSRs */
3072 if (check_pm_current_disabled(env, csrno)) {
3073 return RISCV_EXCP_NONE;
3075 env->upmbase = val;
3076 if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
3077 env->cur_pmbase = val;
3079 env->mmte |= PM_EXT_DIRTY;
3081 /* Set XS and SD bits, since PM CSRs are dirty */
3082 mstatus = env->mstatus | MSTATUS_XS;
3083 write_mstatus(env, csrno, mstatus);
3084 return RISCV_EXCP_NONE;
3087 #endif
3089 /* Crypto Extension */
3090 static RISCVException rmw_seed(CPURISCVState *env, int csrno,
3091 target_ulong *ret_value,
3092 target_ulong new_value,
3093 target_ulong write_mask)
3095 uint16_t random_v;
3096 Error *random_e = NULL;
3097 int random_r;
3098 target_ulong rval;
3100 random_r = qemu_guest_getrandom(&random_v, 2, &random_e);
3101 if (unlikely(random_r < 0)) {
3103 * Failed, for unknown reasons in the crypto subsystem.
3104 * The best we can do is log the reason and return a
3105 * failure indication to the guest. There is no reason
3106 * we know to expect the failure to be transitory, so
3107 * indicate DEAD to avoid having the guest spin on WAIT.
3109 qemu_log_mask(LOG_UNIMP, "%s: Crypto failure: %s",
3110 __func__, error_get_pretty(random_e));
3111 error_free(random_e);
3112 rval = SEED_OPST_DEAD;
3113 } else {
3114 rval = random_v | SEED_OPST_ES16;
3117 if (ret_value) {
3118 *ret_value = rval;
3121 return RISCV_EXCP_NONE;
3125 * riscv_csrrw - read and/or update control and status register
3127 * csrr <-> riscv_csrrw(env, csrno, ret_value, 0, 0);
3128 * csrrw <-> riscv_csrrw(env, csrno, ret_value, value, -1);
3129 * csrrs <-> riscv_csrrw(env, csrno, ret_value, -1, value);
3130 * csrrc <-> riscv_csrrw(env, csrno, ret_value, 0, value);
3133 static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
3134 int csrno,
3135 bool write_mask,
3136 RISCVCPU *cpu)
3138 /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */
3139 int read_only = get_field(csrno, 0xC00) == 3;
3140 int csr_min_priv = csr_ops[csrno].min_priv_ver;
3141 #if !defined(CONFIG_USER_ONLY)
3142 int csr_priv, effective_priv = env->priv;
3144 if (riscv_has_ext(env, RVH) && env->priv == PRV_S) {
3146 * We are in either HS or VS mode.
3147 * Add 1 to the effective privledge level to allow us to access the
3148 * Hypervisor CSRs. The `hmode` predicate will determine if access
3149 * should be allowed(HS) or if a virtual instruction exception should be
3150 * raised(VS).
3152 effective_priv++;
3155 csr_priv = get_field(csrno, 0x300);
3156 if (!env->debugger && (effective_priv < csr_priv)) {
3157 if (csr_priv == (PRV_S + 1) && riscv_cpu_virt_enabled(env)) {
3158 return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
3160 return RISCV_EXCP_ILLEGAL_INST;
3162 #endif
3163 if (write_mask && read_only) {
3164 return RISCV_EXCP_ILLEGAL_INST;
3167 /* ensure the CSR extension is enabled. */
3168 if (!cpu->cfg.ext_icsr) {
3169 return RISCV_EXCP_ILLEGAL_INST;
3172 /* check predicate */
3173 if (!csr_ops[csrno].predicate) {
3174 return RISCV_EXCP_ILLEGAL_INST;
3177 if (env->priv_ver < csr_min_priv) {
3178 return RISCV_EXCP_ILLEGAL_INST;
3181 return csr_ops[csrno].predicate(env, csrno);
3184 static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno,
3185 target_ulong *ret_value,
3186 target_ulong new_value,
3187 target_ulong write_mask)
3189 RISCVException ret;
3190 target_ulong old_value;
3192 /* execute combined read/write operation if it exists */
3193 if (csr_ops[csrno].op) {
3194 return csr_ops[csrno].op(env, csrno, ret_value, new_value, write_mask);
3197 /* if no accessor exists then return failure */
3198 if (!csr_ops[csrno].read) {
3199 return RISCV_EXCP_ILLEGAL_INST;
3201 /* read old value */
3202 ret = csr_ops[csrno].read(env, csrno, &old_value);
3203 if (ret != RISCV_EXCP_NONE) {
3204 return ret;
3207 /* write value if writable and write mask set, otherwise drop writes */
3208 if (write_mask) {
3209 new_value = (old_value & ~write_mask) | (new_value & write_mask);
3210 if (csr_ops[csrno].write) {
3211 ret = csr_ops[csrno].write(env, csrno, new_value);
3212 if (ret != RISCV_EXCP_NONE) {
3213 return ret;
3218 /* return old value */
3219 if (ret_value) {
3220 *ret_value = old_value;
3223 return RISCV_EXCP_NONE;
3226 RISCVException riscv_csrrw(CPURISCVState *env, int csrno,
3227 target_ulong *ret_value,
3228 target_ulong new_value, target_ulong write_mask)
3230 RISCVCPU *cpu = env_archcpu(env);
3232 RISCVException ret = riscv_csrrw_check(env, csrno, write_mask, cpu);
3233 if (ret != RISCV_EXCP_NONE) {
3234 return ret;
3237 return riscv_csrrw_do64(env, csrno, ret_value, new_value, write_mask);
3240 static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno,
3241 Int128 *ret_value,
3242 Int128 new_value,
3243 Int128 write_mask)
3245 RISCVException ret;
3246 Int128 old_value;
3248 /* read old value */
3249 ret = csr_ops[csrno].read128(env, csrno, &old_value);
3250 if (ret != RISCV_EXCP_NONE) {
3251 return ret;
3254 /* write value if writable and write mask set, otherwise drop writes */
3255 if (int128_nz(write_mask)) {
3256 new_value = int128_or(int128_and(old_value, int128_not(write_mask)),
3257 int128_and(new_value, write_mask));
3258 if (csr_ops[csrno].write128) {
3259 ret = csr_ops[csrno].write128(env, csrno, new_value);
3260 if (ret != RISCV_EXCP_NONE) {
3261 return ret;
3263 } else if (csr_ops[csrno].write) {
3264 /* avoids having to write wrappers for all registers */
3265 ret = csr_ops[csrno].write(env, csrno, int128_getlo(new_value));
3266 if (ret != RISCV_EXCP_NONE) {
3267 return ret;
3272 /* return old value */
3273 if (ret_value) {
3274 *ret_value = old_value;
3277 return RISCV_EXCP_NONE;
3280 RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno,
3281 Int128 *ret_value,
3282 Int128 new_value, Int128 write_mask)
3284 RISCVException ret;
3285 RISCVCPU *cpu = env_archcpu(env);
3287 ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask), cpu);
3288 if (ret != RISCV_EXCP_NONE) {
3289 return ret;
3292 if (csr_ops[csrno].read128) {
3293 return riscv_csrrw_do128(env, csrno, ret_value, new_value, write_mask);
3297 * Fall back to 64-bit version for now, if the 128-bit alternative isn't
3298 * at all defined.
3299 * Note, some CSRs don't need to extend to MXLEN (64 upper bits non
3300 * significant), for those, this fallback is correctly handling the accesses
3302 target_ulong old_value;
3303 ret = riscv_csrrw_do64(env, csrno, &old_value,
3304 int128_getlo(new_value),
3305 int128_getlo(write_mask));
3306 if (ret == RISCV_EXCP_NONE && ret_value) {
3307 *ret_value = int128_make64(old_value);
3309 return ret;
3313 * Debugger support. If not in user mode, set env->debugger before the
3314 * riscv_csrrw call and clear it after the call.
3316 RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno,
3317 target_ulong *ret_value,
3318 target_ulong new_value,
3319 target_ulong write_mask)
3321 RISCVException ret;
3322 #if !defined(CONFIG_USER_ONLY)
3323 env->debugger = true;
3324 #endif
3325 ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask);
3326 #if !defined(CONFIG_USER_ONLY)
3327 env->debugger = false;
3328 #endif
3329 return ret;
3332 /* Control and Status Register function table */
3333 riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
3334 /* User Floating-Point CSRs */
3335 [CSR_FFLAGS] = { "fflags", fs, read_fflags, write_fflags },
3336 [CSR_FRM] = { "frm", fs, read_frm, write_frm },
3337 [CSR_FCSR] = { "fcsr", fs, read_fcsr, write_fcsr },
3338 /* Vector CSRs */
3339 [CSR_VSTART] = { "vstart", vs, read_vstart, write_vstart,
3340 .min_priv_ver = PRIV_VERSION_1_12_0 },
3341 [CSR_VXSAT] = { "vxsat", vs, read_vxsat, write_vxsat,
3342 .min_priv_ver = PRIV_VERSION_1_12_0 },
3343 [CSR_VXRM] = { "vxrm", vs, read_vxrm, write_vxrm,
3344 .min_priv_ver = PRIV_VERSION_1_12_0 },
3345 [CSR_VCSR] = { "vcsr", vs, read_vcsr, write_vcsr,
3346 .min_priv_ver = PRIV_VERSION_1_12_0 },
3347 [CSR_VL] = { "vl", vs, read_vl,
3348 .min_priv_ver = PRIV_VERSION_1_12_0 },
3349 [CSR_VTYPE] = { "vtype", vs, read_vtype,
3350 .min_priv_ver = PRIV_VERSION_1_12_0 },
3351 [CSR_VLENB] = { "vlenb", vs, read_vlenb,
3352 .min_priv_ver = PRIV_VERSION_1_12_0 },
3353 /* User Timers and Counters */
3354 [CSR_CYCLE] = { "cycle", ctr, read_instret },
3355 [CSR_INSTRET] = { "instret", ctr, read_instret },
3356 [CSR_CYCLEH] = { "cycleh", ctr32, read_instreth },
3357 [CSR_INSTRETH] = { "instreth", ctr32, read_instreth },
3360 * In privileged mode, the monitor will have to emulate TIME CSRs only if
3361 * rdtime callback is not provided by machine/platform emulation.
3363 [CSR_TIME] = { "time", ctr, read_time },
3364 [CSR_TIMEH] = { "timeh", ctr32, read_timeh },
3366 /* Crypto Extension */
3367 [CSR_SEED] = { "seed", seed, NULL, NULL, rmw_seed },
3369 #if !defined(CONFIG_USER_ONLY)
3370 /* Machine Timers and Counters */
3371 [CSR_MCYCLE] = { "mcycle", any, read_instret },
3372 [CSR_MINSTRET] = { "minstret", any, read_instret },
3373 [CSR_MCYCLEH] = { "mcycleh", any32, read_instreth },
3374 [CSR_MINSTRETH] = { "minstreth", any32, read_instreth },
3376 /* Machine Information Registers */
3377 [CSR_MVENDORID] = { "mvendorid", any, read_mvendorid },
3378 [CSR_MARCHID] = { "marchid", any, read_marchid },
3379 [CSR_MIMPID] = { "mimpid", any, read_mimpid },
3380 [CSR_MHARTID] = { "mhartid", any, read_mhartid },
3382 [CSR_MCONFIGPTR] = { "mconfigptr", any, read_zero,
3383 .min_priv_ver = PRIV_VERSION_1_12_0 },
3384 /* Machine Trap Setup */
3385 [CSR_MSTATUS] = { "mstatus", any, read_mstatus, write_mstatus, NULL,
3386 read_mstatus_i128 },
3387 [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL,
3388 read_misa_i128 },
3389 [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg },
3390 [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg },
3391 [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie },
3392 [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec },
3393 [CSR_MCOUNTEREN] = { "mcounteren", any, read_mcounteren, write_mcounteren },
3395 [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush },
3397 /* Machine Trap Handling */
3398 [CSR_MSCRATCH] = { "mscratch", any, read_mscratch, write_mscratch, NULL,
3399 read_mscratch_i128, write_mscratch_i128 },
3400 [CSR_MEPC] = { "mepc", any, read_mepc, write_mepc },
3401 [CSR_MCAUSE] = { "mcause", any, read_mcause, write_mcause },
3402 [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval },
3403 [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip },
3405 /* Machine-Level Window to Indirectly Accessed Registers (AIA) */
3406 [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect },
3407 [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg },
3409 /* Machine-Level Interrupts (AIA) */
3410 [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
3412 /* Machine-Level IMSIC Interface (AIA) */
3413 [CSR_MSETEIPNUM] = { "mseteipnum", aia_any, NULL, NULL, rmw_xsetclreinum },
3414 [CSR_MCLREIPNUM] = { "mclreipnum", aia_any, NULL, NULL, rmw_xsetclreinum },
3415 [CSR_MSETEIENUM] = { "mseteienum", aia_any, NULL, NULL, rmw_xsetclreinum },
3416 [CSR_MCLREIENUM] = { "mclreienum", aia_any, NULL, NULL, rmw_xsetclreinum },
3417 [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei },
3419 /* Virtual Interrupts for Supervisor Level (AIA) */
3420 [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
3421 [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
3423 /* Machine-Level High-Half CSRs (AIA) */
3424 [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
3425 [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
3426 [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
3427 [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
3428 [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
3430 /* Execution environment configuration */
3431 [CSR_MENVCFG] = { "menvcfg", any, read_menvcfg, write_menvcfg,
3432 .min_priv_ver = PRIV_VERSION_1_12_0 },
3433 [CSR_MENVCFGH] = { "menvcfgh", any32, read_menvcfgh, write_menvcfgh,
3434 .min_priv_ver = PRIV_VERSION_1_12_0 },
3435 [CSR_SENVCFG] = { "senvcfg", smode, read_senvcfg, write_senvcfg,
3436 .min_priv_ver = PRIV_VERSION_1_12_0 },
3437 [CSR_HENVCFG] = { "henvcfg", hmode, read_henvcfg, write_henvcfg,
3438 .min_priv_ver = PRIV_VERSION_1_12_0 },
3439 [CSR_HENVCFGH] = { "henvcfgh", hmode32, read_henvcfgh, write_henvcfgh,
3440 .min_priv_ver = PRIV_VERSION_1_12_0 },
3442 /* Supervisor Trap Setup */
3443 [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL,
3444 read_sstatus_i128 },
3445 [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie },
3446 [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec },
3447 [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren },
3449 /* Supervisor Trap Handling */
3450 [CSR_SSCRATCH] = { "sscratch", smode, read_sscratch, write_sscratch, NULL,
3451 read_sscratch_i128, write_sscratch_i128 },
3452 [CSR_SEPC] = { "sepc", smode, read_sepc, write_sepc },
3453 [CSR_SCAUSE] = { "scause", smode, read_scause, write_scause },
3454 [CSR_STVAL] = { "stval", smode, read_stval, write_stval },
3455 [CSR_SIP] = { "sip", smode, NULL, NULL, rmw_sip },
3457 /* Supervisor Protection and Translation */
3458 [CSR_SATP] = { "satp", smode, read_satp, write_satp },
3460 /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
3461 [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect },
3462 [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg },
3464 /* Supervisor-Level Interrupts (AIA) */
3465 [CSR_STOPI] = { "stopi", aia_smode, read_stopi },
3467 /* Supervisor-Level IMSIC Interface (AIA) */
3468 [CSR_SSETEIPNUM] = { "sseteipnum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3469 [CSR_SCLREIPNUM] = { "sclreipnum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3470 [CSR_SSETEIENUM] = { "sseteienum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3471 [CSR_SCLREIENUM] = { "sclreienum", aia_smode, NULL, NULL, rmw_xsetclreinum },
3472 [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei },
3474 /* Supervisor-Level High-Half CSRs (AIA) */
3475 [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh },
3476 [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph },
3478 [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus,
3479 .min_priv_ver = PRIV_VERSION_1_12_0 },
3480 [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg,
3481 .min_priv_ver = PRIV_VERSION_1_12_0 },
3482 [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg,
3483 .min_priv_ver = PRIV_VERSION_1_12_0 },
3484 [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip,
3485 .min_priv_ver = PRIV_VERSION_1_12_0 },
3486 [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip,
3487 .min_priv_ver = PRIV_VERSION_1_12_0 },
3488 [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie,
3489 .min_priv_ver = PRIV_VERSION_1_12_0 },
3490 [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, write_hcounteren,
3491 .min_priv_ver = PRIV_VERSION_1_12_0 },
3492 [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie,
3493 .min_priv_ver = PRIV_VERSION_1_12_0 },
3494 [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval,
3495 .min_priv_ver = PRIV_VERSION_1_12_0 },
3496 [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst,
3497 .min_priv_ver = PRIV_VERSION_1_12_0 },
3498 [CSR_HGEIP] = { "hgeip", hmode, read_hgeip,
3499 .min_priv_ver = PRIV_VERSION_1_12_0 },
3500 [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp,
3501 .min_priv_ver = PRIV_VERSION_1_12_0 },
3502 [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta,
3503 .min_priv_ver = PRIV_VERSION_1_12_0 },
3504 [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, write_htimedeltah,
3505 .min_priv_ver = PRIV_VERSION_1_12_0 },
3507 [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, write_vsstatus,
3508 .min_priv_ver = PRIV_VERSION_1_12_0 },
3509 [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip,
3510 .min_priv_ver = PRIV_VERSION_1_12_0 },
3511 [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie ,
3512 .min_priv_ver = PRIV_VERSION_1_12_0 },
3513 [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec,
3514 .min_priv_ver = PRIV_VERSION_1_12_0 },
3515 [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, write_vsscratch,
3516 .min_priv_ver = PRIV_VERSION_1_12_0 },
3517 [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc,
3518 .min_priv_ver = PRIV_VERSION_1_12_0 },
3519 [CSR_VSCAUSE] = { "vscause", hmode, read_vscause, write_vscause,
3520 .min_priv_ver = PRIV_VERSION_1_12_0 },
3521 [CSR_VSTVAL] = { "vstval", hmode, read_vstval, write_vstval,
3522 .min_priv_ver = PRIV_VERSION_1_12_0 },
3523 [CSR_VSATP] = { "vsatp", hmode, read_vsatp, write_vsatp,
3524 .min_priv_ver = PRIV_VERSION_1_12_0 },
3526 [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2,
3527 .min_priv_ver = PRIV_VERSION_1_12_0 },
3528 [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst,
3529 .min_priv_ver = PRIV_VERSION_1_12_0 },
3531 /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
3532 [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
3533 [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, write_hvictl },
3534 [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, write_hviprio1 },
3535 [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, write_hviprio2 },
3538 * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
3540 [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL, rmw_xiselect },
3541 [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg },
3543 /* VS-Level Interrupts (H-extension with AIA) */
3544 [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi },
3546 /* VS-Level IMSIC Interface (H-extension with AIA) */
3547 [CSR_VSSETEIPNUM] = { "vsseteipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3548 [CSR_VSCLREIPNUM] = { "vsclreipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3549 [CSR_VSSETEIENUM] = { "vsseteienum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3550 [CSR_VSCLREIENUM] = { "vsclreienum", aia_hmode, NULL, NULL, rmw_xsetclreinum },
3551 [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei },
3553 /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
3554 [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, rmw_hidelegh },
3555 [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero, write_ignore },
3556 [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
3557 [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, write_hviprio1h },
3558 [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, write_hviprio2h },
3559 [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh },
3560 [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
3562 /* Physical Memory Protection */
3563 [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg,
3564 .min_priv_ver = PRIV_VERSION_1_12_0 },
3565 [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
3566 [CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
3567 [CSR_PMPCFG2] = { "pmpcfg2", pmp, read_pmpcfg, write_pmpcfg },
3568 [CSR_PMPCFG3] = { "pmpcfg3", pmp, read_pmpcfg, write_pmpcfg },
3569 [CSR_PMPADDR0] = { "pmpaddr0", pmp, read_pmpaddr, write_pmpaddr },
3570 [CSR_PMPADDR1] = { "pmpaddr1", pmp, read_pmpaddr, write_pmpaddr },
3571 [CSR_PMPADDR2] = { "pmpaddr2", pmp, read_pmpaddr, write_pmpaddr },
3572 [CSR_PMPADDR3] = { "pmpaddr3", pmp, read_pmpaddr, write_pmpaddr },
3573 [CSR_PMPADDR4] = { "pmpaddr4", pmp, read_pmpaddr, write_pmpaddr },
3574 [CSR_PMPADDR5] = { "pmpaddr5", pmp, read_pmpaddr, write_pmpaddr },
3575 [CSR_PMPADDR6] = { "pmpaddr6", pmp, read_pmpaddr, write_pmpaddr },
3576 [CSR_PMPADDR7] = { "pmpaddr7", pmp, read_pmpaddr, write_pmpaddr },
3577 [CSR_PMPADDR8] = { "pmpaddr8", pmp, read_pmpaddr, write_pmpaddr },
3578 [CSR_PMPADDR9] = { "pmpaddr9", pmp, read_pmpaddr, write_pmpaddr },
3579 [CSR_PMPADDR10] = { "pmpaddr10", pmp, read_pmpaddr, write_pmpaddr },
3580 [CSR_PMPADDR11] = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
3581 [CSR_PMPADDR12] = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
3582 [CSR_PMPADDR13] = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
3583 [CSR_PMPADDR14] = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
3584 [CSR_PMPADDR15] = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
3586 /* Debug CSRs */
3587 [CSR_TSELECT] = { "tselect", debug, read_tselect, write_tselect },
3588 [CSR_TDATA1] = { "tdata1", debug, read_tdata, write_tdata },
3589 [CSR_TDATA2] = { "tdata2", debug, read_tdata, write_tdata },
3590 [CSR_TDATA3] = { "tdata3", debug, read_tdata, write_tdata },
3592 /* User Pointer Masking */
3593 [CSR_UMTE] = { "umte", pointer_masking, read_umte, write_umte },
3594 [CSR_UPMMASK] = { "upmmask", pointer_masking, read_upmmask, write_upmmask },
3595 [CSR_UPMBASE] = { "upmbase", pointer_masking, read_upmbase, write_upmbase },
3596 /* Machine Pointer Masking */
3597 [CSR_MMTE] = { "mmte", pointer_masking, read_mmte, write_mmte },
3598 [CSR_MPMMASK] = { "mpmmask", pointer_masking, read_mpmmask, write_mpmmask },
3599 [CSR_MPMBASE] = { "mpmbase", pointer_masking, read_mpmbase, write_mpmbase },
3600 /* Supervisor Pointer Masking */
3601 [CSR_SMTE] = { "smte", pointer_masking, read_smte, write_smte },
3602 [CSR_SPMMASK] = { "spmmask", pointer_masking, read_spmmask, write_spmmask },
3603 [CSR_SPMBASE] = { "spmbase", pointer_masking, read_spmbase, write_spmbase },
3605 /* Performance Counters */
3606 [CSR_HPMCOUNTER3] = { "hpmcounter3", ctr, read_zero },
3607 [CSR_HPMCOUNTER4] = { "hpmcounter4", ctr, read_zero },
3608 [CSR_HPMCOUNTER5] = { "hpmcounter5", ctr, read_zero },
3609 [CSR_HPMCOUNTER6] = { "hpmcounter6", ctr, read_zero },
3610 [CSR_HPMCOUNTER7] = { "hpmcounter7", ctr, read_zero },
3611 [CSR_HPMCOUNTER8] = { "hpmcounter8", ctr, read_zero },
3612 [CSR_HPMCOUNTER9] = { "hpmcounter9", ctr, read_zero },
3613 [CSR_HPMCOUNTER10] = { "hpmcounter10", ctr, read_zero },
3614 [CSR_HPMCOUNTER11] = { "hpmcounter11", ctr, read_zero },
3615 [CSR_HPMCOUNTER12] = { "hpmcounter12", ctr, read_zero },
3616 [CSR_HPMCOUNTER13] = { "hpmcounter13", ctr, read_zero },
3617 [CSR_HPMCOUNTER14] = { "hpmcounter14", ctr, read_zero },
3618 [CSR_HPMCOUNTER15] = { "hpmcounter15", ctr, read_zero },
3619 [CSR_HPMCOUNTER16] = { "hpmcounter16", ctr, read_zero },
3620 [CSR_HPMCOUNTER17] = { "hpmcounter17", ctr, read_zero },
3621 [CSR_HPMCOUNTER18] = { "hpmcounter18", ctr, read_zero },
3622 [CSR_HPMCOUNTER19] = { "hpmcounter19", ctr, read_zero },
3623 [CSR_HPMCOUNTER20] = { "hpmcounter20", ctr, read_zero },
3624 [CSR_HPMCOUNTER21] = { "hpmcounter21", ctr, read_zero },
3625 [CSR_HPMCOUNTER22] = { "hpmcounter22", ctr, read_zero },
3626 [CSR_HPMCOUNTER23] = { "hpmcounter23", ctr, read_zero },
3627 [CSR_HPMCOUNTER24] = { "hpmcounter24", ctr, read_zero },
3628 [CSR_HPMCOUNTER25] = { "hpmcounter25", ctr, read_zero },
3629 [CSR_HPMCOUNTER26] = { "hpmcounter26", ctr, read_zero },
3630 [CSR_HPMCOUNTER27] = { "hpmcounter27", ctr, read_zero },
3631 [CSR_HPMCOUNTER28] = { "hpmcounter28", ctr, read_zero },
3632 [CSR_HPMCOUNTER29] = { "hpmcounter29", ctr, read_zero },
3633 [CSR_HPMCOUNTER30] = { "hpmcounter30", ctr, read_zero },
3634 [CSR_HPMCOUNTER31] = { "hpmcounter31", ctr, read_zero },
3636 [CSR_MHPMCOUNTER3] = { "mhpmcounter3", any, read_zero },
3637 [CSR_MHPMCOUNTER4] = { "mhpmcounter4", any, read_zero },
3638 [CSR_MHPMCOUNTER5] = { "mhpmcounter5", any, read_zero },
3639 [CSR_MHPMCOUNTER6] = { "mhpmcounter6", any, read_zero },
3640 [CSR_MHPMCOUNTER7] = { "mhpmcounter7", any, read_zero },
3641 [CSR_MHPMCOUNTER8] = { "mhpmcounter8", any, read_zero },
3642 [CSR_MHPMCOUNTER9] = { "mhpmcounter9", any, read_zero },
3643 [CSR_MHPMCOUNTER10] = { "mhpmcounter10", any, read_zero },
3644 [CSR_MHPMCOUNTER11] = { "mhpmcounter11", any, read_zero },
3645 [CSR_MHPMCOUNTER12] = { "mhpmcounter12", any, read_zero },
3646 [CSR_MHPMCOUNTER13] = { "mhpmcounter13", any, read_zero },
3647 [CSR_MHPMCOUNTER14] = { "mhpmcounter14", any, read_zero },
3648 [CSR_MHPMCOUNTER15] = { "mhpmcounter15", any, read_zero },
3649 [CSR_MHPMCOUNTER16] = { "mhpmcounter16", any, read_zero },
3650 [CSR_MHPMCOUNTER17] = { "mhpmcounter17", any, read_zero },
3651 [CSR_MHPMCOUNTER18] = { "mhpmcounter18", any, read_zero },
3652 [CSR_MHPMCOUNTER19] = { "mhpmcounter19", any, read_zero },
3653 [CSR_MHPMCOUNTER20] = { "mhpmcounter20", any, read_zero },
3654 [CSR_MHPMCOUNTER21] = { "mhpmcounter21", any, read_zero },
3655 [CSR_MHPMCOUNTER22] = { "mhpmcounter22", any, read_zero },
3656 [CSR_MHPMCOUNTER23] = { "mhpmcounter23", any, read_zero },
3657 [CSR_MHPMCOUNTER24] = { "mhpmcounter24", any, read_zero },
3658 [CSR_MHPMCOUNTER25] = { "mhpmcounter25", any, read_zero },
3659 [CSR_MHPMCOUNTER26] = { "mhpmcounter26", any, read_zero },
3660 [CSR_MHPMCOUNTER27] = { "mhpmcounter27", any, read_zero },
3661 [CSR_MHPMCOUNTER28] = { "mhpmcounter28", any, read_zero },
3662 [CSR_MHPMCOUNTER29] = { "mhpmcounter29", any, read_zero },
3663 [CSR_MHPMCOUNTER30] = { "mhpmcounter30", any, read_zero },
3664 [CSR_MHPMCOUNTER31] = { "mhpmcounter31", any, read_zero },
3666 [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_zero },
3667 [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_zero },
3668 [CSR_MHPMEVENT5] = { "mhpmevent5", any, read_zero },
3669 [CSR_MHPMEVENT6] = { "mhpmevent6", any, read_zero },
3670 [CSR_MHPMEVENT7] = { "mhpmevent7", any, read_zero },
3671 [CSR_MHPMEVENT8] = { "mhpmevent8", any, read_zero },
3672 [CSR_MHPMEVENT9] = { "mhpmevent9", any, read_zero },
3673 [CSR_MHPMEVENT10] = { "mhpmevent10", any, read_zero },
3674 [CSR_MHPMEVENT11] = { "mhpmevent11", any, read_zero },
3675 [CSR_MHPMEVENT12] = { "mhpmevent12", any, read_zero },
3676 [CSR_MHPMEVENT13] = { "mhpmevent13", any, read_zero },
3677 [CSR_MHPMEVENT14] = { "mhpmevent14", any, read_zero },
3678 [CSR_MHPMEVENT15] = { "mhpmevent15", any, read_zero },
3679 [CSR_MHPMEVENT16] = { "mhpmevent16", any, read_zero },
3680 [CSR_MHPMEVENT17] = { "mhpmevent17", any, read_zero },
3681 [CSR_MHPMEVENT18] = { "mhpmevent18", any, read_zero },
3682 [CSR_MHPMEVENT19] = { "mhpmevent19", any, read_zero },
3683 [CSR_MHPMEVENT20] = { "mhpmevent20", any, read_zero },
3684 [CSR_MHPMEVENT21] = { "mhpmevent21", any, read_zero },
3685 [CSR_MHPMEVENT22] = { "mhpmevent22", any, read_zero },
3686 [CSR_MHPMEVENT23] = { "mhpmevent23", any, read_zero },
3687 [CSR_MHPMEVENT24] = { "mhpmevent24", any, read_zero },
3688 [CSR_MHPMEVENT25] = { "mhpmevent25", any, read_zero },
3689 [CSR_MHPMEVENT26] = { "mhpmevent26", any, read_zero },
3690 [CSR_MHPMEVENT27] = { "mhpmevent27", any, read_zero },
3691 [CSR_MHPMEVENT28] = { "mhpmevent28", any, read_zero },
3692 [CSR_MHPMEVENT29] = { "mhpmevent29", any, read_zero },
3693 [CSR_MHPMEVENT30] = { "mhpmevent30", any, read_zero },
3694 [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_zero },
3696 [CSR_HPMCOUNTER3H] = { "hpmcounter3h", ctr32, read_zero },
3697 [CSR_HPMCOUNTER4H] = { "hpmcounter4h", ctr32, read_zero },
3698 [CSR_HPMCOUNTER5H] = { "hpmcounter5h", ctr32, read_zero },
3699 [CSR_HPMCOUNTER6H] = { "hpmcounter6h", ctr32, read_zero },
3700 [CSR_HPMCOUNTER7H] = { "hpmcounter7h", ctr32, read_zero },
3701 [CSR_HPMCOUNTER8H] = { "hpmcounter8h", ctr32, read_zero },
3702 [CSR_HPMCOUNTER9H] = { "hpmcounter9h", ctr32, read_zero },
3703 [CSR_HPMCOUNTER10H] = { "hpmcounter10h", ctr32, read_zero },
3704 [CSR_HPMCOUNTER11H] = { "hpmcounter11h", ctr32, read_zero },
3705 [CSR_HPMCOUNTER12H] = { "hpmcounter12h", ctr32, read_zero },
3706 [CSR_HPMCOUNTER13H] = { "hpmcounter13h", ctr32, read_zero },
3707 [CSR_HPMCOUNTER14H] = { "hpmcounter14h", ctr32, read_zero },
3708 [CSR_HPMCOUNTER15H] = { "hpmcounter15h", ctr32, read_zero },
3709 [CSR_HPMCOUNTER16H] = { "hpmcounter16h", ctr32, read_zero },
3710 [CSR_HPMCOUNTER17H] = { "hpmcounter17h", ctr32, read_zero },
3711 [CSR_HPMCOUNTER18H] = { "hpmcounter18h", ctr32, read_zero },
3712 [CSR_HPMCOUNTER19H] = { "hpmcounter19h", ctr32, read_zero },
3713 [CSR_HPMCOUNTER20H] = { "hpmcounter20h", ctr32, read_zero },
3714 [CSR_HPMCOUNTER21H] = { "hpmcounter21h", ctr32, read_zero },
3715 [CSR_HPMCOUNTER22H] = { "hpmcounter22h", ctr32, read_zero },
3716 [CSR_HPMCOUNTER23H] = { "hpmcounter23h", ctr32, read_zero },
3717 [CSR_HPMCOUNTER24H] = { "hpmcounter24h", ctr32, read_zero },
3718 [CSR_HPMCOUNTER25H] = { "hpmcounter25h", ctr32, read_zero },
3719 [CSR_HPMCOUNTER26H] = { "hpmcounter26h", ctr32, read_zero },
3720 [CSR_HPMCOUNTER27H] = { "hpmcounter27h", ctr32, read_zero },
3721 [CSR_HPMCOUNTER28H] = { "hpmcounter28h", ctr32, read_zero },
3722 [CSR_HPMCOUNTER29H] = { "hpmcounter29h", ctr32, read_zero },
3723 [CSR_HPMCOUNTER30H] = { "hpmcounter30h", ctr32, read_zero },
3724 [CSR_HPMCOUNTER31H] = { "hpmcounter31h", ctr32, read_zero },
3726 [CSR_MHPMCOUNTER3H] = { "mhpmcounter3h", any32, read_zero },
3727 [CSR_MHPMCOUNTER4H] = { "mhpmcounter4h", any32, read_zero },
3728 [CSR_MHPMCOUNTER5H] = { "mhpmcounter5h", any32, read_zero },
3729 [CSR_MHPMCOUNTER6H] = { "mhpmcounter6h", any32, read_zero },
3730 [CSR_MHPMCOUNTER7H] = { "mhpmcounter7h", any32, read_zero },
3731 [CSR_MHPMCOUNTER8H] = { "mhpmcounter8h", any32, read_zero },
3732 [CSR_MHPMCOUNTER9H] = { "mhpmcounter9h", any32, read_zero },
3733 [CSR_MHPMCOUNTER10H] = { "mhpmcounter10h", any32, read_zero },
3734 [CSR_MHPMCOUNTER11H] = { "mhpmcounter11h", any32, read_zero },
3735 [CSR_MHPMCOUNTER12H] = { "mhpmcounter12h", any32, read_zero },
3736 [CSR_MHPMCOUNTER13H] = { "mhpmcounter13h", any32, read_zero },
3737 [CSR_MHPMCOUNTER14H] = { "mhpmcounter14h", any32, read_zero },
3738 [CSR_MHPMCOUNTER15H] = { "mhpmcounter15h", any32, read_zero },
3739 [CSR_MHPMCOUNTER16H] = { "mhpmcounter16h", any32, read_zero },
3740 [CSR_MHPMCOUNTER17H] = { "mhpmcounter17h", any32, read_zero },
3741 [CSR_MHPMCOUNTER18H] = { "mhpmcounter18h", any32, read_zero },
3742 [CSR_MHPMCOUNTER19H] = { "mhpmcounter19h", any32, read_zero },
3743 [CSR_MHPMCOUNTER20H] = { "mhpmcounter20h", any32, read_zero },
3744 [CSR_MHPMCOUNTER21H] = { "mhpmcounter21h", any32, read_zero },
3745 [CSR_MHPMCOUNTER22H] = { "mhpmcounter22h", any32, read_zero },
3746 [CSR_MHPMCOUNTER23H] = { "mhpmcounter23h", any32, read_zero },
3747 [CSR_MHPMCOUNTER24H] = { "mhpmcounter24h", any32, read_zero },
3748 [CSR_MHPMCOUNTER25H] = { "mhpmcounter25h", any32, read_zero },
3749 [CSR_MHPMCOUNTER26H] = { "mhpmcounter26h", any32, read_zero },
3750 [CSR_MHPMCOUNTER27H] = { "mhpmcounter27h", any32, read_zero },
3751 [CSR_MHPMCOUNTER28H] = { "mhpmcounter28h", any32, read_zero },
3752 [CSR_MHPMCOUNTER29H] = { "mhpmcounter29h", any32, read_zero },
3753 [CSR_MHPMCOUNTER30H] = { "mhpmcounter30h", any32, read_zero },
3754 [CSR_MHPMCOUNTER31H] = { "mhpmcounter31h", any32, read_zero },
3755 #endif /* !CONFIG_USER_ONLY */