4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
25 #include "internals.h"
26 #include "exec/exec-all.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "hw/qdev-properties.h"
30 #include "migration/vmstate.h"
31 #include "fpu/softfloat-helpers.h"
32 #include "sysemu/kvm.h"
33 #include "kvm_riscv.h"
35 /* RISC-V CPU definitions */
37 #define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \
38 (QEMU_VERSION_MINOR << 8) | \
40 #define RISCV_CPU_MIMPID RISCV_CPU_MARCHID
42 static const char riscv_single_letter_exts
[] = "IEMAFDQCPVH";
49 const char * const riscv_int_regnames
[] = {
50 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
51 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
52 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
53 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
54 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
57 const char * const riscv_int_regnamesh
[] = {
58 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
59 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
60 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
61 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
62 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
63 "x30h/t5h", "x31h/t6h"
66 const char * const riscv_fpr_regnames
[] = {
67 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
68 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
69 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
70 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
71 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
72 "f30/ft10", "f31/ft11"
75 static const char * const riscv_excp_names
[] = {
78 "illegal_instruction",
96 "guest_exec_page_fault",
97 "guest_load_page_fault",
99 "guest_store_page_fault",
102 static const char * const riscv_intr_names
[] = {
121 static void register_cpu_props(DeviceState
*dev
);
123 const char *riscv_cpu_get_trap_name(target_ulong cause
, bool async
)
126 return (cause
< ARRAY_SIZE(riscv_intr_names
)) ?
127 riscv_intr_names
[cause
] : "(unknown)";
129 return (cause
< ARRAY_SIZE(riscv_excp_names
)) ?
130 riscv_excp_names
[cause
] : "(unknown)";
134 static void set_misa(CPURISCVState
*env
, RISCVMXL mxl
, uint32_t ext
)
136 env
->misa_mxl_max
= env
->misa_mxl
= mxl
;
137 env
->misa_ext_mask
= env
->misa_ext
= ext
;
140 static void set_priv_version(CPURISCVState
*env
, int priv_ver
)
142 env
->priv_ver
= priv_ver
;
145 static void set_vext_version(CPURISCVState
*env
, int vext_ver
)
147 env
->vext_ver
= vext_ver
;
150 static void set_resetvec(CPURISCVState
*env
, target_ulong resetvec
)
152 #ifndef CONFIG_USER_ONLY
153 env
->resetvec
= resetvec
;
157 static void riscv_any_cpu_init(Object
*obj
)
159 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
160 #if defined(TARGET_RISCV32)
161 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
162 #elif defined(TARGET_RISCV64)
163 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
165 set_priv_version(env
, PRIV_VERSION_1_12_0
);
166 register_cpu_props(DEVICE(obj
));
169 #if defined(TARGET_RISCV64)
170 static void rv64_base_cpu_init(Object
*obj
)
172 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
173 /* We set this in the realise function */
174 set_misa(env
, MXL_RV64
, 0);
175 register_cpu_props(DEVICE(obj
));
176 /* Set latest version of privileged specification */
177 set_priv_version(env
, PRIV_VERSION_1_12_0
);
180 static void rv64_sifive_u_cpu_init(Object
*obj
)
182 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
183 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
184 set_priv_version(env
, PRIV_VERSION_1_10_0
);
187 static void rv64_sifive_e_cpu_init(Object
*obj
)
189 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
190 RISCVCPU
*cpu
= RISCV_CPU(obj
);
192 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVC
| RVU
);
193 set_priv_version(env
, PRIV_VERSION_1_10_0
);
194 cpu
->cfg
.mmu
= false;
197 static void rv128_base_cpu_init(Object
*obj
)
199 if (qemu_tcg_mttcg_enabled()) {
200 /* Missing 128-bit aligned atomics */
201 error_report("128-bit RISC-V currently does not work with Multi "
202 "Threaded TCG. Please use: -accel tcg,thread=single");
205 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
206 /* We set this in the realise function */
207 set_misa(env
, MXL_RV128
, 0);
208 register_cpu_props(DEVICE(obj
));
209 /* Set latest version of privileged specification */
210 set_priv_version(env
, PRIV_VERSION_1_12_0
);
213 static void rv32_base_cpu_init(Object
*obj
)
215 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
216 /* We set this in the realise function */
217 set_misa(env
, MXL_RV32
, 0);
218 register_cpu_props(DEVICE(obj
));
219 /* Set latest version of privileged specification */
220 set_priv_version(env
, PRIV_VERSION_1_12_0
);
223 static void rv32_sifive_u_cpu_init(Object
*obj
)
225 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
226 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
227 set_priv_version(env
, PRIV_VERSION_1_10_0
);
230 static void rv32_sifive_e_cpu_init(Object
*obj
)
232 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
233 RISCVCPU
*cpu
= RISCV_CPU(obj
);
235 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVC
| RVU
);
236 set_priv_version(env
, PRIV_VERSION_1_10_0
);
237 cpu
->cfg
.mmu
= false;
240 static void rv32_ibex_cpu_init(Object
*obj
)
242 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
243 RISCVCPU
*cpu
= RISCV_CPU(obj
);
245 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVC
| RVU
);
246 set_priv_version(env
, PRIV_VERSION_1_11_0
);
247 cpu
->cfg
.mmu
= false;
248 cpu
->cfg
.epmp
= true;
251 static void rv32_imafcu_nommu_cpu_init(Object
*obj
)
253 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
254 RISCVCPU
*cpu
= RISCV_CPU(obj
);
256 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVC
| RVU
);
257 set_priv_version(env
, PRIV_VERSION_1_10_0
);
258 set_resetvec(env
, DEFAULT_RSTVEC
);
259 cpu
->cfg
.mmu
= false;
263 #if defined(CONFIG_KVM)
264 static void riscv_host_cpu_init(Object
*obj
)
266 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
267 #if defined(TARGET_RISCV32)
268 set_misa(env
, MXL_RV32
, 0);
269 #elif defined(TARGET_RISCV64)
270 set_misa(env
, MXL_RV64
, 0);
272 register_cpu_props(DEVICE(obj
));
276 static ObjectClass
*riscv_cpu_class_by_name(const char *cpu_model
)
282 cpuname
= g_strsplit(cpu_model
, ",", 1);
283 typename
= g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname
[0]);
284 oc
= object_class_by_name(typename
);
287 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_RISCV_CPU
) ||
288 object_class_is_abstract(oc
)) {
294 static void riscv_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
296 RISCVCPU
*cpu
= RISCV_CPU(cs
);
297 CPURISCVState
*env
= &cpu
->env
;
300 #if !defined(CONFIG_USER_ONLY)
301 if (riscv_has_ext(env
, RVH
)) {
302 qemu_fprintf(f
, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env
));
305 qemu_fprintf(f
, " %s " TARGET_FMT_lx
"\n", "pc ", env
->pc
);
306 #ifndef CONFIG_USER_ONLY
308 static const int dump_csrs
[] = {
345 for (int i
= 0; i
< ARRAY_SIZE(dump_csrs
); ++i
) {
346 int csrno
= dump_csrs
[i
];
347 target_ulong val
= 0;
348 RISCVException res
= riscv_csrrw_debug(env
, csrno
, &val
, 0, 0);
351 * Rely on the smode, hmode, etc, predicates within csr.c
352 * to do the filtering of the registers that are present.
354 if (res
== RISCV_EXCP_NONE
) {
355 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
"\n",
356 csr_ops
[csrno
].name
, val
);
362 for (i
= 0; i
< 32; i
++) {
363 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
,
364 riscv_int_regnames
[i
], env
->gpr
[i
]);
366 qemu_fprintf(f
, "\n");
369 if (flags
& CPU_DUMP_FPU
) {
370 for (i
= 0; i
< 32; i
++) {
371 qemu_fprintf(f
, " %-8s %016" PRIx64
,
372 riscv_fpr_regnames
[i
], env
->fpr
[i
]);
374 qemu_fprintf(f
, "\n");
380 static void riscv_cpu_set_pc(CPUState
*cs
, vaddr value
)
382 RISCVCPU
*cpu
= RISCV_CPU(cs
);
383 CPURISCVState
*env
= &cpu
->env
;
385 if (env
->xl
== MXL_RV32
) {
386 env
->pc
= (int32_t)value
;
392 static void riscv_cpu_synchronize_from_tb(CPUState
*cs
,
393 const TranslationBlock
*tb
)
395 RISCVCPU
*cpu
= RISCV_CPU(cs
);
396 CPURISCVState
*env
= &cpu
->env
;
397 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
399 if (xl
== MXL_RV32
) {
400 env
->pc
= (int32_t)tb
->pc
;
406 static bool riscv_cpu_has_work(CPUState
*cs
)
408 #ifndef CONFIG_USER_ONLY
409 RISCVCPU
*cpu
= RISCV_CPU(cs
);
410 CPURISCVState
*env
= &cpu
->env
;
412 * Definition of the WFI instruction requires it to ignore the privilege
413 * mode and delegation registers, but respect individual enables
415 return riscv_cpu_all_pending(env
) != 0;
421 void restore_state_to_opc(CPURISCVState
*env
, TranslationBlock
*tb
,
424 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
425 if (xl
== MXL_RV32
) {
426 env
->pc
= (int32_t)data
[0];
433 static void riscv_cpu_reset(DeviceState
*dev
)
435 #ifndef CONFIG_USER_ONLY
439 CPUState
*cs
= CPU(dev
);
440 RISCVCPU
*cpu
= RISCV_CPU(cs
);
441 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(cpu
);
442 CPURISCVState
*env
= &cpu
->env
;
444 mcc
->parent_reset(dev
);
445 #ifndef CONFIG_USER_ONLY
446 env
->misa_mxl
= env
->misa_mxl_max
;
448 env
->mstatus
&= ~(MSTATUS_MIE
| MSTATUS_MPRV
);
449 if (env
->misa_mxl
> MXL_RV32
) {
451 * The reset status of SXL/UXL is undefined, but mstatus is WARL
452 * and we must ensure that the value after init is valid for read.
454 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_SXL
, env
->misa_mxl
);
455 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_UXL
, env
->misa_mxl
);
456 if (riscv_has_ext(env
, RVH
)) {
457 env
->vsstatus
= set_field(env
->vsstatus
,
458 MSTATUS64_SXL
, env
->misa_mxl
);
459 env
->vsstatus
= set_field(env
->vsstatus
,
460 MSTATUS64_UXL
, env
->misa_mxl
);
461 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
462 MSTATUS64_SXL
, env
->misa_mxl
);
463 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
464 MSTATUS64_UXL
, env
->misa_mxl
);
468 env
->miclaim
= MIP_SGEIP
;
469 env
->pc
= env
->resetvec
;
471 env
->two_stage_lookup
= false;
473 /* Initialized default priorities of local interrupts. */
474 for (i
= 0; i
< ARRAY_SIZE(env
->miprio
); i
++) {
475 iprio
= riscv_cpu_default_priority(i
);
476 env
->miprio
[i
] = (i
== IRQ_M_EXT
) ? 0 : iprio
;
477 env
->siprio
[i
] = (i
== IRQ_S_EXT
) ? 0 : iprio
;
481 while (!riscv_cpu_hviprio_index2irq(i
, &irq
, &rdzero
)) {
483 env
->hviprio
[irq
] = env
->miprio
[irq
];
487 /* mmte is supposed to have pm.current hardwired to 1 */
488 env
->mmte
|= (PM_EXT_INITIAL
| MMTE_M_PM_CURRENT
);
490 env
->xl
= riscv_cpu_mxl(env
);
491 riscv_cpu_update_mask(env
);
492 cs
->exception_index
= RISCV_EXCP_NONE
;
494 set_default_nan_mode(1, &env
->fp_status
);
496 #ifndef CONFIG_USER_ONLY
497 if (riscv_feature(env
, RISCV_FEATURE_DEBUG
)) {
498 riscv_trigger_init(env
);
502 kvm_riscv_reset_vcpu(cpu
);
507 static void riscv_cpu_disas_set_info(CPUState
*s
, disassemble_info
*info
)
509 RISCVCPU
*cpu
= RISCV_CPU(s
);
511 switch (riscv_cpu_mxl(&cpu
->env
)) {
513 info
->print_insn
= print_insn_riscv32
;
516 info
->print_insn
= print_insn_riscv64
;
519 info
->print_insn
= print_insn_riscv128
;
522 g_assert_not_reached();
526 static void riscv_cpu_realize(DeviceState
*dev
, Error
**errp
)
528 CPUState
*cs
= CPU(dev
);
529 RISCVCPU
*cpu
= RISCV_CPU(dev
);
530 CPURISCVState
*env
= &cpu
->env
;
531 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(dev
);
532 CPUClass
*cc
= CPU_CLASS(mcc
);
533 int priv_version
= -1;
534 Error
*local_err
= NULL
;
536 cpu_exec_realizefn(cs
, &local_err
);
537 if (local_err
!= NULL
) {
538 error_propagate(errp
, local_err
);
542 if (cpu
->cfg
.priv_spec
) {
543 if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.12.0")) {
544 priv_version
= PRIV_VERSION_1_12_0
;
545 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.11.0")) {
546 priv_version
= PRIV_VERSION_1_11_0
;
547 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.10.0")) {
548 priv_version
= PRIV_VERSION_1_10_0
;
551 "Unsupported privilege spec version '%s'",
557 if (priv_version
>= PRIV_VERSION_1_10_0
) {
558 set_priv_version(env
, priv_version
);
562 riscv_set_feature(env
, RISCV_FEATURE_MMU
);
566 riscv_set_feature(env
, RISCV_FEATURE_PMP
);
569 * Enhanced PMP should only be available
570 * on harts with PMP support
573 riscv_set_feature(env
, RISCV_FEATURE_EPMP
);
578 riscv_set_feature(env
, RISCV_FEATURE_AIA
);
581 if (cpu
->cfg
.debug
) {
582 riscv_set_feature(env
, RISCV_FEATURE_DEBUG
);
585 set_resetvec(env
, cpu
->cfg
.resetvec
);
587 /* Validate that MISA_MXL is set properly. */
588 switch (env
->misa_mxl_max
) {
589 #ifdef TARGET_RISCV64
592 cc
->gdb_core_xml_file
= "riscv-64bit-cpu.xml";
596 cc
->gdb_core_xml_file
= "riscv-32bit-cpu.xml";
599 g_assert_not_reached();
601 assert(env
->misa_mxl_max
== env
->misa_mxl
);
603 /* If only MISA_EXT is unset for misa, then set it from properties */
604 if (env
->misa_ext
== 0) {
607 /* Do some ISA extension error checking */
608 if (cpu
->cfg
.ext_g
&& !(cpu
->cfg
.ext_i
&& cpu
->cfg
.ext_m
&&
609 cpu
->cfg
.ext_a
&& cpu
->cfg
.ext_f
&&
611 cpu
->cfg
.ext_icsr
&& cpu
->cfg
.ext_ifencei
)) {
612 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
613 cpu
->cfg
.ext_i
= true;
614 cpu
->cfg
.ext_m
= true;
615 cpu
->cfg
.ext_a
= true;
616 cpu
->cfg
.ext_f
= true;
617 cpu
->cfg
.ext_d
= true;
618 cpu
->cfg
.ext_icsr
= true;
619 cpu
->cfg
.ext_ifencei
= true;
622 if (cpu
->cfg
.ext_m
&& cpu
->cfg
.ext_zmmul
) {
623 warn_report("Zmmul will override M");
624 cpu
->cfg
.ext_m
= false;
627 if (cpu
->cfg
.ext_i
&& cpu
->cfg
.ext_e
) {
629 "I and E extensions are incompatible");
633 if (!cpu
->cfg
.ext_i
&& !cpu
->cfg
.ext_e
) {
635 "Either I or E extension must be set");
639 if (cpu
->cfg
.ext_f
&& !cpu
->cfg
.ext_icsr
) {
640 error_setg(errp
, "F extension requires Zicsr");
644 if ((cpu
->cfg
.ext_zfh
|| cpu
->cfg
.ext_zfhmin
) && !cpu
->cfg
.ext_f
) {
645 error_setg(errp
, "Zfh/Zfhmin extensions require F extension");
649 if (cpu
->cfg
.ext_d
&& !cpu
->cfg
.ext_f
) {
650 error_setg(errp
, "D extension requires F extension");
654 if (cpu
->cfg
.ext_v
&& !cpu
->cfg
.ext_d
) {
655 error_setg(errp
, "V extension requires D extension");
659 if ((cpu
->cfg
.ext_zve32f
|| cpu
->cfg
.ext_zve64f
) && !cpu
->cfg
.ext_f
) {
660 error_setg(errp
, "Zve32f/Zve64f extensions require F extension");
664 /* Set the ISA extensions, checks should have happened above */
665 if (cpu
->cfg
.ext_zdinx
|| cpu
->cfg
.ext_zhinx
||
666 cpu
->cfg
.ext_zhinxmin
) {
667 cpu
->cfg
.ext_zfinx
= true;
670 if (cpu
->cfg
.ext_zfinx
) {
671 if (!cpu
->cfg
.ext_icsr
) {
672 error_setg(errp
, "Zfinx extension requires Zicsr");
675 if (cpu
->cfg
.ext_f
) {
677 "Zfinx cannot be supported together with F extension");
682 if (cpu
->cfg
.ext_zk
) {
683 cpu
->cfg
.ext_zkn
= true;
684 cpu
->cfg
.ext_zkr
= true;
685 cpu
->cfg
.ext_zkt
= true;
688 if (cpu
->cfg
.ext_zkn
) {
689 cpu
->cfg
.ext_zbkb
= true;
690 cpu
->cfg
.ext_zbkc
= true;
691 cpu
->cfg
.ext_zbkx
= true;
692 cpu
->cfg
.ext_zkne
= true;
693 cpu
->cfg
.ext_zknd
= true;
694 cpu
->cfg
.ext_zknh
= true;
697 if (cpu
->cfg
.ext_zks
) {
698 cpu
->cfg
.ext_zbkb
= true;
699 cpu
->cfg
.ext_zbkc
= true;
700 cpu
->cfg
.ext_zbkx
= true;
701 cpu
->cfg
.ext_zksed
= true;
702 cpu
->cfg
.ext_zksh
= true;
705 if (cpu
->cfg
.ext_i
) {
708 if (cpu
->cfg
.ext_e
) {
711 if (cpu
->cfg
.ext_m
) {
714 if (cpu
->cfg
.ext_a
) {
717 if (cpu
->cfg
.ext_f
) {
720 if (cpu
->cfg
.ext_d
) {
723 if (cpu
->cfg
.ext_c
) {
726 if (cpu
->cfg
.ext_s
) {
729 if (cpu
->cfg
.ext_u
) {
732 if (cpu
->cfg
.ext_h
) {
735 if (cpu
->cfg
.ext_v
) {
736 int vext_version
= VEXT_VERSION_1_00_0
;
738 if (!is_power_of_2(cpu
->cfg
.vlen
)) {
740 "Vector extension VLEN must be power of 2");
743 if (cpu
->cfg
.vlen
> RV_VLEN_MAX
|| cpu
->cfg
.vlen
< 128) {
745 "Vector extension implementation only supports VLEN "
746 "in the range [128, %d]", RV_VLEN_MAX
);
749 if (!is_power_of_2(cpu
->cfg
.elen
)) {
751 "Vector extension ELEN must be power of 2");
754 if (cpu
->cfg
.elen
> 64 || cpu
->cfg
.vlen
< 8) {
756 "Vector extension implementation only supports ELEN "
757 "in the range [8, 64]");
760 if (cpu
->cfg
.vext_spec
) {
761 if (!g_strcmp0(cpu
->cfg
.vext_spec
, "v1.0")) {
762 vext_version
= VEXT_VERSION_1_00_0
;
765 "Unsupported vector spec version '%s'",
770 qemu_log("vector version is not specified, "
771 "use the default value v1.0\n");
773 set_vext_version(env
, vext_version
);
775 if (cpu
->cfg
.ext_j
) {
779 set_misa(env
, env
->misa_mxl
, ext
);
782 riscv_cpu_register_gdb_regs_for_features(cs
);
787 mcc
->parent_realize(dev
, errp
);
790 #ifndef CONFIG_USER_ONLY
791 static void riscv_cpu_set_irq(void *opaque
, int irq
, int level
)
793 RISCVCPU
*cpu
= RISCV_CPU(opaque
);
794 CPURISCVState
*env
= &cpu
->env
;
796 if (irq
< IRQ_LOCAL_MAX
) {
810 kvm_riscv_set_irq(cpu
, irq
, level
);
812 riscv_cpu_update_mip(cpu
, 1 << irq
, BOOL_TO_MASK(level
));
817 kvm_riscv_set_irq(cpu
, irq
, level
);
819 env
->external_seip
= level
;
820 riscv_cpu_update_mip(cpu
, 1 << irq
,
821 BOOL_TO_MASK(level
| env
->software_seip
));
825 g_assert_not_reached();
827 } else if (irq
< (IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
)) {
828 /* Require H-extension for handling guest local interrupts */
829 if (!riscv_has_ext(env
, RVH
)) {
830 g_assert_not_reached();
833 /* Compute bit position in HGEIP CSR */
834 irq
= irq
- IRQ_LOCAL_MAX
+ 1;
835 if (env
->geilen
< irq
) {
836 g_assert_not_reached();
839 /* Update HGEIP CSR */
840 env
->hgeip
&= ~((target_ulong
)1 << irq
);
842 env
->hgeip
|= (target_ulong
)1 << irq
;
845 /* Update mip.SGEIP bit */
846 riscv_cpu_update_mip(cpu
, MIP_SGEIP
,
847 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
849 g_assert_not_reached();
852 #endif /* CONFIG_USER_ONLY */
854 static void riscv_cpu_init(Object
*obj
)
856 RISCVCPU
*cpu
= RISCV_CPU(obj
);
858 cpu
->cfg
.ext_ifencei
= true;
859 cpu
->cfg
.ext_icsr
= true;
863 cpu_set_cpustate_pointers(cpu
);
865 #ifndef CONFIG_USER_ONLY
866 qdev_init_gpio_in(DEVICE(cpu
), riscv_cpu_set_irq
,
867 IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
);
868 #endif /* CONFIG_USER_ONLY */
871 static Property riscv_cpu_extensions
[] = {
872 /* Defaults for standard extensions */
873 DEFINE_PROP_BOOL("i", RISCVCPU
, cfg
.ext_i
, true),
874 DEFINE_PROP_BOOL("e", RISCVCPU
, cfg
.ext_e
, false),
875 DEFINE_PROP_BOOL("g", RISCVCPU
, cfg
.ext_g
, false),
876 DEFINE_PROP_BOOL("m", RISCVCPU
, cfg
.ext_m
, true),
877 DEFINE_PROP_BOOL("a", RISCVCPU
, cfg
.ext_a
, true),
878 DEFINE_PROP_BOOL("f", RISCVCPU
, cfg
.ext_f
, true),
879 DEFINE_PROP_BOOL("d", RISCVCPU
, cfg
.ext_d
, true),
880 DEFINE_PROP_BOOL("c", RISCVCPU
, cfg
.ext_c
, true),
881 DEFINE_PROP_BOOL("s", RISCVCPU
, cfg
.ext_s
, true),
882 DEFINE_PROP_BOOL("u", RISCVCPU
, cfg
.ext_u
, true),
883 DEFINE_PROP_BOOL("v", RISCVCPU
, cfg
.ext_v
, false),
884 DEFINE_PROP_BOOL("h", RISCVCPU
, cfg
.ext_h
, true),
885 DEFINE_PROP_UINT8("pmu-num", RISCVCPU
, cfg
.pmu_num
, 16),
886 DEFINE_PROP_BOOL("Zifencei", RISCVCPU
, cfg
.ext_ifencei
, true),
887 DEFINE_PROP_BOOL("Zicsr", RISCVCPU
, cfg
.ext_icsr
, true),
888 DEFINE_PROP_BOOL("Zfh", RISCVCPU
, cfg
.ext_zfh
, false),
889 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU
, cfg
.ext_zfhmin
, false),
890 DEFINE_PROP_BOOL("Zve32f", RISCVCPU
, cfg
.ext_zve32f
, false),
891 DEFINE_PROP_BOOL("Zve64f", RISCVCPU
, cfg
.ext_zve64f
, false),
892 DEFINE_PROP_BOOL("mmu", RISCVCPU
, cfg
.mmu
, true),
893 DEFINE_PROP_BOOL("pmp", RISCVCPU
, cfg
.pmp
, true),
895 DEFINE_PROP_STRING("priv_spec", RISCVCPU
, cfg
.priv_spec
),
896 DEFINE_PROP_STRING("vext_spec", RISCVCPU
, cfg
.vext_spec
),
897 DEFINE_PROP_UINT16("vlen", RISCVCPU
, cfg
.vlen
, 128),
898 DEFINE_PROP_UINT16("elen", RISCVCPU
, cfg
.elen
, 64),
900 DEFINE_PROP_BOOL("svinval", RISCVCPU
, cfg
.ext_svinval
, false),
901 DEFINE_PROP_BOOL("svnapot", RISCVCPU
, cfg
.ext_svnapot
, false),
902 DEFINE_PROP_BOOL("svpbmt", RISCVCPU
, cfg
.ext_svpbmt
, false),
904 DEFINE_PROP_BOOL("zba", RISCVCPU
, cfg
.ext_zba
, true),
905 DEFINE_PROP_BOOL("zbb", RISCVCPU
, cfg
.ext_zbb
, true),
906 DEFINE_PROP_BOOL("zbc", RISCVCPU
, cfg
.ext_zbc
, true),
907 DEFINE_PROP_BOOL("zbkb", RISCVCPU
, cfg
.ext_zbkb
, false),
908 DEFINE_PROP_BOOL("zbkc", RISCVCPU
, cfg
.ext_zbkc
, false),
909 DEFINE_PROP_BOOL("zbkx", RISCVCPU
, cfg
.ext_zbkx
, false),
910 DEFINE_PROP_BOOL("zbs", RISCVCPU
, cfg
.ext_zbs
, true),
911 DEFINE_PROP_BOOL("zk", RISCVCPU
, cfg
.ext_zk
, false),
912 DEFINE_PROP_BOOL("zkn", RISCVCPU
, cfg
.ext_zkn
, false),
913 DEFINE_PROP_BOOL("zknd", RISCVCPU
, cfg
.ext_zknd
, false),
914 DEFINE_PROP_BOOL("zkne", RISCVCPU
, cfg
.ext_zkne
, false),
915 DEFINE_PROP_BOOL("zknh", RISCVCPU
, cfg
.ext_zknh
, false),
916 DEFINE_PROP_BOOL("zkr", RISCVCPU
, cfg
.ext_zkr
, false),
917 DEFINE_PROP_BOOL("zks", RISCVCPU
, cfg
.ext_zks
, false),
918 DEFINE_PROP_BOOL("zksed", RISCVCPU
, cfg
.ext_zksed
, false),
919 DEFINE_PROP_BOOL("zksh", RISCVCPU
, cfg
.ext_zksh
, false),
920 DEFINE_PROP_BOOL("zkt", RISCVCPU
, cfg
.ext_zkt
, false),
922 DEFINE_PROP_BOOL("zdinx", RISCVCPU
, cfg
.ext_zdinx
, false),
923 DEFINE_PROP_BOOL("zfinx", RISCVCPU
, cfg
.ext_zfinx
, false),
924 DEFINE_PROP_BOOL("zhinx", RISCVCPU
, cfg
.ext_zhinx
, false),
925 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU
, cfg
.ext_zhinxmin
, false),
927 /* Vendor-specific custom extensions */
928 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU
, cfg
.ext_XVentanaCondOps
, false),
930 /* These are experimental so mark with 'x-' */
931 DEFINE_PROP_BOOL("x-j", RISCVCPU
, cfg
.ext_j
, false),
932 DEFINE_PROP_BOOL("x-zmmul", RISCVCPU
, cfg
.ext_zmmul
, false),
934 DEFINE_PROP_BOOL("x-epmp", RISCVCPU
, cfg
.epmp
, false),
935 DEFINE_PROP_BOOL("x-aia", RISCVCPU
, cfg
.aia
, false),
937 DEFINE_PROP_END_OF_LIST(),
940 static void register_cpu_props(DeviceState
*dev
)
944 for (prop
= riscv_cpu_extensions
; prop
&& prop
->name
; prop
++) {
945 qdev_property_add_static(dev
, prop
);
949 static Property riscv_cpu_properties
[] = {
950 DEFINE_PROP_BOOL("debug", RISCVCPU
, cfg
.debug
, true),
952 DEFINE_PROP_UINT32("mvendorid", RISCVCPU
, cfg
.mvendorid
, 0),
953 DEFINE_PROP_UINT64("marchid", RISCVCPU
, cfg
.marchid
, RISCV_CPU_MARCHID
),
954 DEFINE_PROP_UINT64("mimpid", RISCVCPU
, cfg
.mimpid
, RISCV_CPU_MIMPID
),
956 DEFINE_PROP_UINT64("resetvec", RISCVCPU
, cfg
.resetvec
, DEFAULT_RSTVEC
),
958 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU
, cfg
.short_isa_string
, false),
960 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU
, cfg
.rvv_ta_all_1s
, false),
961 DEFINE_PROP_END_OF_LIST(),
964 static gchar
*riscv_gdb_arch_name(CPUState
*cs
)
966 RISCVCPU
*cpu
= RISCV_CPU(cs
);
967 CPURISCVState
*env
= &cpu
->env
;
969 switch (riscv_cpu_mxl(env
)) {
971 return g_strdup("riscv:rv32");
974 return g_strdup("riscv:rv64");
976 g_assert_not_reached();
980 static const char *riscv_gdb_get_dynamic_xml(CPUState
*cs
, const char *xmlname
)
982 RISCVCPU
*cpu
= RISCV_CPU(cs
);
984 if (strcmp(xmlname
, "riscv-csr.xml") == 0) {
985 return cpu
->dyn_csr_xml
;
986 } else if (strcmp(xmlname
, "riscv-vector.xml") == 0) {
987 return cpu
->dyn_vreg_xml
;
993 #ifndef CONFIG_USER_ONLY
994 #include "hw/core/sysemu-cpu-ops.h"
996 static const struct SysemuCPUOps riscv_sysemu_ops
= {
997 .get_phys_page_debug
= riscv_cpu_get_phys_page_debug
,
998 .write_elf64_note
= riscv_cpu_write_elf64_note
,
999 .write_elf32_note
= riscv_cpu_write_elf32_note
,
1000 .legacy_vmsd
= &vmstate_riscv_cpu
,
1004 #include "hw/core/tcg-cpu-ops.h"
1006 static const struct TCGCPUOps riscv_tcg_ops
= {
1007 .initialize
= riscv_translate_init
,
1008 .synchronize_from_tb
= riscv_cpu_synchronize_from_tb
,
1010 #ifndef CONFIG_USER_ONLY
1011 .tlb_fill
= riscv_cpu_tlb_fill
,
1012 .cpu_exec_interrupt
= riscv_cpu_exec_interrupt
,
1013 .do_interrupt
= riscv_cpu_do_interrupt
,
1014 .do_transaction_failed
= riscv_cpu_do_transaction_failed
,
1015 .do_unaligned_access
= riscv_cpu_do_unaligned_access
,
1016 .debug_excp_handler
= riscv_cpu_debug_excp_handler
,
1017 .debug_check_breakpoint
= riscv_cpu_debug_check_breakpoint
,
1018 .debug_check_watchpoint
= riscv_cpu_debug_check_watchpoint
,
1019 #endif /* !CONFIG_USER_ONLY */
1022 static void riscv_cpu_class_init(ObjectClass
*c
, void *data
)
1024 RISCVCPUClass
*mcc
= RISCV_CPU_CLASS(c
);
1025 CPUClass
*cc
= CPU_CLASS(c
);
1026 DeviceClass
*dc
= DEVICE_CLASS(c
);
1028 device_class_set_parent_realize(dc
, riscv_cpu_realize
,
1029 &mcc
->parent_realize
);
1031 device_class_set_parent_reset(dc
, riscv_cpu_reset
, &mcc
->parent_reset
);
1033 cc
->class_by_name
= riscv_cpu_class_by_name
;
1034 cc
->has_work
= riscv_cpu_has_work
;
1035 cc
->dump_state
= riscv_cpu_dump_state
;
1036 cc
->set_pc
= riscv_cpu_set_pc
;
1037 cc
->gdb_read_register
= riscv_cpu_gdb_read_register
;
1038 cc
->gdb_write_register
= riscv_cpu_gdb_write_register
;
1039 cc
->gdb_num_core_regs
= 33;
1040 cc
->gdb_stop_before_watchpoint
= true;
1041 cc
->disas_set_info
= riscv_cpu_disas_set_info
;
1042 #ifndef CONFIG_USER_ONLY
1043 cc
->sysemu_ops
= &riscv_sysemu_ops
;
1045 cc
->gdb_arch_name
= riscv_gdb_arch_name
;
1046 cc
->gdb_get_dynamic_xml
= riscv_gdb_get_dynamic_xml
;
1047 cc
->tcg_ops
= &riscv_tcg_ops
;
1049 device_class_set_props(dc
, riscv_cpu_properties
);
1052 #define ISA_EDATA_ENTRY(name, prop) {#name, cpu->cfg.prop}
1054 static void riscv_isa_string_ext(RISCVCPU
*cpu
, char **isa_str
, int max_str_len
)
1056 char *old
= *isa_str
;
1057 char *new = *isa_str
;
1061 * Here are the ordering rules of extension naming defined by RISC-V
1063 * 1. All extensions should be separated from other multi-letter extensions
1065 * 2. The first letter following the 'Z' conventionally indicates the most
1066 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
1067 * If multiple 'Z' extensions are named, they should be ordered first
1068 * by category, then alphabetically within a category.
1069 * 3. Standard supervisor-level extensions (starts with 'S') should be
1070 * listed after standard unprivileged extensions. If multiple
1071 * supervisor-level extensions are listed, they should be ordered
1073 * 4. Non-standard extensions (starts with 'X') must be listed after all
1074 * standard extensions. They must be separated from other multi-letter
1075 * extensions by an underscore.
1077 struct isa_ext_data isa_edata_arr
[] = {
1078 ISA_EDATA_ENTRY(zicsr
, ext_icsr
),
1079 ISA_EDATA_ENTRY(zifencei
, ext_ifencei
),
1080 ISA_EDATA_ENTRY(zmmul
, ext_zmmul
),
1081 ISA_EDATA_ENTRY(zfh
, ext_zfh
),
1082 ISA_EDATA_ENTRY(zfhmin
, ext_zfhmin
),
1083 ISA_EDATA_ENTRY(zfinx
, ext_zfinx
),
1084 ISA_EDATA_ENTRY(zdinx
, ext_zdinx
),
1085 ISA_EDATA_ENTRY(zba
, ext_zba
),
1086 ISA_EDATA_ENTRY(zbb
, ext_zbb
),
1087 ISA_EDATA_ENTRY(zbc
, ext_zbc
),
1088 ISA_EDATA_ENTRY(zbkb
, ext_zbkb
),
1089 ISA_EDATA_ENTRY(zbkc
, ext_zbkc
),
1090 ISA_EDATA_ENTRY(zbkx
, ext_zbkx
),
1091 ISA_EDATA_ENTRY(zbs
, ext_zbs
),
1092 ISA_EDATA_ENTRY(zk
, ext_zk
),
1093 ISA_EDATA_ENTRY(zkn
, ext_zkn
),
1094 ISA_EDATA_ENTRY(zknd
, ext_zknd
),
1095 ISA_EDATA_ENTRY(zkne
, ext_zkne
),
1096 ISA_EDATA_ENTRY(zknh
, ext_zknh
),
1097 ISA_EDATA_ENTRY(zkr
, ext_zkr
),
1098 ISA_EDATA_ENTRY(zks
, ext_zks
),
1099 ISA_EDATA_ENTRY(zksed
, ext_zksed
),
1100 ISA_EDATA_ENTRY(zksh
, ext_zksh
),
1101 ISA_EDATA_ENTRY(zkt
, ext_zkt
),
1102 ISA_EDATA_ENTRY(zve32f
, ext_zve32f
),
1103 ISA_EDATA_ENTRY(zve64f
, ext_zve64f
),
1104 ISA_EDATA_ENTRY(zhinx
, ext_zhinx
),
1105 ISA_EDATA_ENTRY(zhinxmin
, ext_zhinxmin
),
1106 ISA_EDATA_ENTRY(svinval
, ext_svinval
),
1107 ISA_EDATA_ENTRY(svnapot
, ext_svnapot
),
1108 ISA_EDATA_ENTRY(svpbmt
, ext_svpbmt
),
1111 for (i
= 0; i
< ARRAY_SIZE(isa_edata_arr
); i
++) {
1112 if (isa_edata_arr
[i
].enabled
) {
1113 new = g_strconcat(old
, "_", isa_edata_arr
[i
].name
, NULL
);
1122 char *riscv_isa_string(RISCVCPU
*cpu
)
1125 const size_t maxlen
= sizeof("rv128") + sizeof(riscv_single_letter_exts
);
1126 char *isa_str
= g_new(char, maxlen
);
1127 char *p
= isa_str
+ snprintf(isa_str
, maxlen
, "rv%d", TARGET_LONG_BITS
);
1128 for (i
= 0; i
< sizeof(riscv_single_letter_exts
) - 1; i
++) {
1129 if (cpu
->env
.misa_ext
& RV(riscv_single_letter_exts
[i
])) {
1130 *p
++ = qemu_tolower(riscv_single_letter_exts
[i
]);
1134 if (!cpu
->cfg
.short_isa_string
) {
1135 riscv_isa_string_ext(cpu
, &isa_str
, maxlen
);
1140 static gint
riscv_cpu_list_compare(gconstpointer a
, gconstpointer b
)
1142 ObjectClass
*class_a
= (ObjectClass
*)a
;
1143 ObjectClass
*class_b
= (ObjectClass
*)b
;
1144 const char *name_a
, *name_b
;
1146 name_a
= object_class_get_name(class_a
);
1147 name_b
= object_class_get_name(class_b
);
1148 return strcmp(name_a
, name_b
);
1151 static void riscv_cpu_list_entry(gpointer data
, gpointer user_data
)
1153 const char *typename
= object_class_get_name(OBJECT_CLASS(data
));
1154 int len
= strlen(typename
) - strlen(RISCV_CPU_TYPE_SUFFIX
);
1156 qemu_printf("%.*s\n", len
, typename
);
1159 void riscv_cpu_list(void)
1163 list
= object_class_get_list(TYPE_RISCV_CPU
, false);
1164 list
= g_slist_sort(list
, riscv_cpu_list_compare
);
1165 g_slist_foreach(list
, riscv_cpu_list_entry
, NULL
);
1169 #define DEFINE_CPU(type_name, initfn) \
1171 .name = type_name, \
1172 .parent = TYPE_RISCV_CPU, \
1173 .instance_init = initfn \
1176 static const TypeInfo riscv_cpu_type_infos
[] = {
1178 .name
= TYPE_RISCV_CPU
,
1180 .instance_size
= sizeof(RISCVCPU
),
1181 .instance_align
= __alignof__(RISCVCPU
),
1182 .instance_init
= riscv_cpu_init
,
1184 .class_size
= sizeof(RISCVCPUClass
),
1185 .class_init
= riscv_cpu_class_init
,
1187 DEFINE_CPU(TYPE_RISCV_CPU_ANY
, riscv_any_cpu_init
),
1188 #if defined(CONFIG_KVM)
1189 DEFINE_CPU(TYPE_RISCV_CPU_HOST
, riscv_host_cpu_init
),
1191 #if defined(TARGET_RISCV32)
1192 DEFINE_CPU(TYPE_RISCV_CPU_BASE32
, rv32_base_cpu_init
),
1193 DEFINE_CPU(TYPE_RISCV_CPU_IBEX
, rv32_ibex_cpu_init
),
1194 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31
, rv32_sifive_e_cpu_init
),
1195 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34
, rv32_imafcu_nommu_cpu_init
),
1196 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34
, rv32_sifive_u_cpu_init
),
1197 #elif defined(TARGET_RISCV64)
1198 DEFINE_CPU(TYPE_RISCV_CPU_BASE64
, rv64_base_cpu_init
),
1199 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51
, rv64_sifive_e_cpu_init
),
1200 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54
, rv64_sifive_u_cpu_init
),
1201 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C
, rv64_sifive_u_cpu_init
),
1202 DEFINE_CPU(TYPE_RISCV_CPU_BASE128
, rv128_base_cpu_init
),
1206 DEFINE_TYPES(riscv_cpu_type_infos
)