4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
25 #include "internals.h"
26 #include "exec/exec-all.h"
27 #include "qapi/error.h"
28 #include "qemu/error-report.h"
29 #include "hw/qdev-properties.h"
30 #include "migration/vmstate.h"
31 #include "fpu/softfloat-helpers.h"
32 #include "sysemu/kvm.h"
33 #include "kvm_riscv.h"
35 /* RISC-V CPU definitions */
37 static const char riscv_exts
[26] = "IEMAFDQCLBJTPVNSUHKORWXYZG";
39 const char * const riscv_int_regnames
[] = {
40 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
41 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
42 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
43 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
44 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
47 const char * const riscv_int_regnamesh
[] = {
48 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
49 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
50 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
51 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
52 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
53 "x30h/t5h", "x31h/t6h"
56 const char * const riscv_fpr_regnames
[] = {
57 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
58 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
59 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
60 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
61 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
62 "f30/ft10", "f31/ft11"
65 static const char * const riscv_excp_names
[] = {
68 "illegal_instruction",
86 "guest_exec_page_fault",
87 "guest_load_page_fault",
89 "guest_store_page_fault",
92 static const char * const riscv_intr_names
[] = {
111 const char *riscv_cpu_get_trap_name(target_ulong cause
, bool async
)
114 return (cause
< ARRAY_SIZE(riscv_intr_names
)) ?
115 riscv_intr_names
[cause
] : "(unknown)";
117 return (cause
< ARRAY_SIZE(riscv_excp_names
)) ?
118 riscv_excp_names
[cause
] : "(unknown)";
122 static void set_misa(CPURISCVState
*env
, RISCVMXL mxl
, uint32_t ext
)
124 env
->misa_mxl_max
= env
->misa_mxl
= mxl
;
125 env
->misa_ext_mask
= env
->misa_ext
= ext
;
128 static void set_priv_version(CPURISCVState
*env
, int priv_ver
)
130 env
->priv_ver
= priv_ver
;
133 static void set_vext_version(CPURISCVState
*env
, int vext_ver
)
135 env
->vext_ver
= vext_ver
;
138 static void set_resetvec(CPURISCVState
*env
, target_ulong resetvec
)
140 #ifndef CONFIG_USER_ONLY
141 env
->resetvec
= resetvec
;
145 static void riscv_any_cpu_init(Object
*obj
)
147 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
148 #if defined(TARGET_RISCV32)
149 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
150 #elif defined(TARGET_RISCV64)
151 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
153 set_priv_version(env
, PRIV_VERSION_1_11_0
);
156 #if defined(TARGET_RISCV64)
157 static void rv64_base_cpu_init(Object
*obj
)
159 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
160 /* We set this in the realise function */
161 set_misa(env
, MXL_RV64
, 0);
164 static void rv64_sifive_u_cpu_init(Object
*obj
)
166 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
167 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
168 set_priv_version(env
, PRIV_VERSION_1_10_0
);
171 static void rv64_sifive_e_cpu_init(Object
*obj
)
173 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
174 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVC
| RVU
);
175 set_priv_version(env
, PRIV_VERSION_1_10_0
);
176 qdev_prop_set_bit(DEVICE(obj
), "mmu", false);
179 static void rv128_base_cpu_init(Object
*obj
)
181 if (qemu_tcg_mttcg_enabled()) {
182 /* Missing 128-bit aligned atomics */
183 error_report("128-bit RISC-V currently does not work with Multi "
184 "Threaded TCG. Please use: -accel tcg,thread=single");
187 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
188 /* We set this in the realise function */
189 set_misa(env
, MXL_RV128
, 0);
192 static void rv32_base_cpu_init(Object
*obj
)
194 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
195 /* We set this in the realise function */
196 set_misa(env
, MXL_RV32
, 0);
199 static void rv32_sifive_u_cpu_init(Object
*obj
)
201 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
202 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
203 set_priv_version(env
, PRIV_VERSION_1_10_0
);
206 static void rv32_sifive_e_cpu_init(Object
*obj
)
208 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
209 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVC
| RVU
);
210 set_priv_version(env
, PRIV_VERSION_1_10_0
);
211 qdev_prop_set_bit(DEVICE(obj
), "mmu", false);
214 static void rv32_ibex_cpu_init(Object
*obj
)
216 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
217 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVC
| RVU
);
218 set_priv_version(env
, PRIV_VERSION_1_10_0
);
219 qdev_prop_set_bit(DEVICE(obj
), "mmu", false);
220 qdev_prop_set_bit(DEVICE(obj
), "x-epmp", true);
223 static void rv32_imafcu_nommu_cpu_init(Object
*obj
)
225 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
226 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVC
| RVU
);
227 set_priv_version(env
, PRIV_VERSION_1_10_0
);
228 set_resetvec(env
, DEFAULT_RSTVEC
);
229 qdev_prop_set_bit(DEVICE(obj
), "mmu", false);
233 #if defined(CONFIG_KVM)
234 static void riscv_host_cpu_init(Object
*obj
)
236 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
237 #if defined(TARGET_RISCV32)
238 set_misa(env
, MXL_RV32
, 0);
239 #elif defined(TARGET_RISCV64)
240 set_misa(env
, MXL_RV64
, 0);
245 static ObjectClass
*riscv_cpu_class_by_name(const char *cpu_model
)
251 cpuname
= g_strsplit(cpu_model
, ",", 1);
252 typename
= g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname
[0]);
253 oc
= object_class_by_name(typename
);
256 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_RISCV_CPU
) ||
257 object_class_is_abstract(oc
)) {
263 static void riscv_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
265 RISCVCPU
*cpu
= RISCV_CPU(cs
);
266 CPURISCVState
*env
= &cpu
->env
;
269 #if !defined(CONFIG_USER_ONLY)
270 if (riscv_has_ext(env
, RVH
)) {
271 qemu_fprintf(f
, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env
));
274 qemu_fprintf(f
, " %s " TARGET_FMT_lx
"\n", "pc ", env
->pc
);
275 #ifndef CONFIG_USER_ONLY
277 static const int dump_csrs
[] = {
314 for (int i
= 0; i
< ARRAY_SIZE(dump_csrs
); ++i
) {
315 int csrno
= dump_csrs
[i
];
316 target_ulong val
= 0;
317 RISCVException res
= riscv_csrrw_debug(env
, csrno
, &val
, 0, 0);
320 * Rely on the smode, hmode, etc, predicates within csr.c
321 * to do the filtering of the registers that are present.
323 if (res
== RISCV_EXCP_NONE
) {
324 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
"\n",
325 csr_ops
[csrno
].name
, val
);
331 for (i
= 0; i
< 32; i
++) {
332 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
,
333 riscv_int_regnames
[i
], env
->gpr
[i
]);
335 qemu_fprintf(f
, "\n");
338 if (flags
& CPU_DUMP_FPU
) {
339 for (i
= 0; i
< 32; i
++) {
340 qemu_fprintf(f
, " %-8s %016" PRIx64
,
341 riscv_fpr_regnames
[i
], env
->fpr
[i
]);
343 qemu_fprintf(f
, "\n");
349 static void riscv_cpu_set_pc(CPUState
*cs
, vaddr value
)
351 RISCVCPU
*cpu
= RISCV_CPU(cs
);
352 CPURISCVState
*env
= &cpu
->env
;
354 if (env
->xl
== MXL_RV32
) {
355 env
->pc
= (int32_t)value
;
361 static void riscv_cpu_synchronize_from_tb(CPUState
*cs
,
362 const TranslationBlock
*tb
)
364 RISCVCPU
*cpu
= RISCV_CPU(cs
);
365 CPURISCVState
*env
= &cpu
->env
;
366 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
368 if (xl
== MXL_RV32
) {
369 env
->pc
= (int32_t)tb
->pc
;
375 static bool riscv_cpu_has_work(CPUState
*cs
)
377 #ifndef CONFIG_USER_ONLY
378 RISCVCPU
*cpu
= RISCV_CPU(cs
);
379 CPURISCVState
*env
= &cpu
->env
;
381 * Definition of the WFI instruction requires it to ignore the privilege
382 * mode and delegation registers, but respect individual enables
384 return (env
->mip
& env
->mie
) != 0;
390 void restore_state_to_opc(CPURISCVState
*env
, TranslationBlock
*tb
,
393 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
394 if (xl
== MXL_RV32
) {
395 env
->pc
= (int32_t)data
[0];
401 static void riscv_cpu_reset(DeviceState
*dev
)
403 #ifndef CONFIG_USER_ONLY
407 CPUState
*cs
= CPU(dev
);
408 RISCVCPU
*cpu
= RISCV_CPU(cs
);
409 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(cpu
);
410 CPURISCVState
*env
= &cpu
->env
;
412 mcc
->parent_reset(dev
);
413 #ifndef CONFIG_USER_ONLY
414 env
->misa_mxl
= env
->misa_mxl_max
;
416 env
->mstatus
&= ~(MSTATUS_MIE
| MSTATUS_MPRV
);
417 if (env
->misa_mxl
> MXL_RV32
) {
419 * The reset status of SXL/UXL is undefined, but mstatus is WARL
420 * and we must ensure that the value after init is valid for read.
422 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_SXL
, env
->misa_mxl
);
423 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_UXL
, env
->misa_mxl
);
424 if (riscv_has_ext(env
, RVH
)) {
425 env
->vsstatus
= set_field(env
->vsstatus
,
426 MSTATUS64_SXL
, env
->misa_mxl
);
427 env
->vsstatus
= set_field(env
->vsstatus
,
428 MSTATUS64_UXL
, env
->misa_mxl
);
429 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
430 MSTATUS64_SXL
, env
->misa_mxl
);
431 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
432 MSTATUS64_UXL
, env
->misa_mxl
);
436 env
->miclaim
= MIP_SGEIP
;
437 env
->pc
= env
->resetvec
;
438 env
->two_stage_lookup
= false;
440 /* Initialized default priorities of local interrupts. */
441 for (i
= 0; i
< ARRAY_SIZE(env
->miprio
); i
++) {
442 iprio
= riscv_cpu_default_priority(i
);
443 env
->miprio
[i
] = (i
== IRQ_M_EXT
) ? 0 : iprio
;
444 env
->siprio
[i
] = (i
== IRQ_S_EXT
) ? 0 : iprio
;
448 while (!riscv_cpu_hviprio_index2irq(i
, &irq
, &rdzero
)) {
450 env
->hviprio
[irq
] = env
->miprio
[irq
];
454 /* mmte is supposed to have pm.current hardwired to 1 */
455 env
->mmte
|= (PM_EXT_INITIAL
| MMTE_M_PM_CURRENT
);
457 env
->xl
= riscv_cpu_mxl(env
);
458 riscv_cpu_update_mask(env
);
459 cs
->exception_index
= RISCV_EXCP_NONE
;
461 set_default_nan_mode(1, &env
->fp_status
);
463 #ifndef CONFIG_USER_ONLY
465 kvm_riscv_reset_vcpu(cpu
);
470 static void riscv_cpu_disas_set_info(CPUState
*s
, disassemble_info
*info
)
472 RISCVCPU
*cpu
= RISCV_CPU(s
);
474 switch (riscv_cpu_mxl(&cpu
->env
)) {
476 info
->print_insn
= print_insn_riscv32
;
479 info
->print_insn
= print_insn_riscv64
;
482 info
->print_insn
= print_insn_riscv128
;
485 g_assert_not_reached();
489 static void riscv_cpu_realize(DeviceState
*dev
, Error
**errp
)
491 CPUState
*cs
= CPU(dev
);
492 RISCVCPU
*cpu
= RISCV_CPU(dev
);
493 CPURISCVState
*env
= &cpu
->env
;
494 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(dev
);
495 CPUClass
*cc
= CPU_CLASS(mcc
);
496 int priv_version
= 0;
497 Error
*local_err
= NULL
;
499 cpu_exec_realizefn(cs
, &local_err
);
500 if (local_err
!= NULL
) {
501 error_propagate(errp
, local_err
);
505 if (cpu
->cfg
.priv_spec
) {
506 if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.11.0")) {
507 priv_version
= PRIV_VERSION_1_11_0
;
508 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.10.0")) {
509 priv_version
= PRIV_VERSION_1_10_0
;
512 "Unsupported privilege spec version '%s'",
519 set_priv_version(env
, priv_version
);
520 } else if (!env
->priv_ver
) {
521 set_priv_version(env
, PRIV_VERSION_1_11_0
);
525 riscv_set_feature(env
, RISCV_FEATURE_MMU
);
529 riscv_set_feature(env
, RISCV_FEATURE_PMP
);
532 * Enhanced PMP should only be available
533 * on harts with PMP support
536 riscv_set_feature(env
, RISCV_FEATURE_EPMP
);
541 riscv_set_feature(env
, RISCV_FEATURE_AIA
);
544 set_resetvec(env
, cpu
->cfg
.resetvec
);
546 /* Validate that MISA_MXL is set properly. */
547 switch (env
->misa_mxl_max
) {
548 #ifdef TARGET_RISCV64
551 cc
->gdb_core_xml_file
= "riscv-64bit-cpu.xml";
555 cc
->gdb_core_xml_file
= "riscv-32bit-cpu.xml";
558 g_assert_not_reached();
560 assert(env
->misa_mxl_max
== env
->misa_mxl
);
562 /* If only MISA_EXT is unset for misa, then set it from properties */
563 if (env
->misa_ext
== 0) {
566 /* Do some ISA extension error checking */
567 if (cpu
->cfg
.ext_i
&& cpu
->cfg
.ext_e
) {
569 "I and E extensions are incompatible");
573 if (!cpu
->cfg
.ext_i
&& !cpu
->cfg
.ext_e
) {
575 "Either I or E extension must be set");
579 if (cpu
->cfg
.ext_g
&& !(cpu
->cfg
.ext_i
& cpu
->cfg
.ext_m
&
580 cpu
->cfg
.ext_a
& cpu
->cfg
.ext_f
&
582 warn_report("Setting G will also set IMAFD");
583 cpu
->cfg
.ext_i
= true;
584 cpu
->cfg
.ext_m
= true;
585 cpu
->cfg
.ext_a
= true;
586 cpu
->cfg
.ext_f
= true;
587 cpu
->cfg
.ext_d
= true;
590 if (cpu
->cfg
.ext_zdinx
|| cpu
->cfg
.ext_zhinx
||
591 cpu
->cfg
.ext_zhinxmin
) {
592 cpu
->cfg
.ext_zfinx
= true;
595 /* Set the ISA extensions, checks should have happened above */
596 if (cpu
->cfg
.ext_i
) {
599 if (cpu
->cfg
.ext_e
) {
602 if (cpu
->cfg
.ext_m
) {
605 if (cpu
->cfg
.ext_a
) {
608 if (cpu
->cfg
.ext_f
) {
611 if (cpu
->cfg
.ext_d
) {
614 if (cpu
->cfg
.ext_c
) {
617 if (cpu
->cfg
.ext_s
) {
620 if (cpu
->cfg
.ext_u
) {
623 if (cpu
->cfg
.ext_h
) {
626 if (cpu
->cfg
.ext_v
) {
627 int vext_version
= VEXT_VERSION_1_00_0
;
629 if (!is_power_of_2(cpu
->cfg
.vlen
)) {
631 "Vector extension VLEN must be power of 2");
634 if (cpu
->cfg
.vlen
> RV_VLEN_MAX
|| cpu
->cfg
.vlen
< 128) {
636 "Vector extension implementation only supports VLEN "
637 "in the range [128, %d]", RV_VLEN_MAX
);
640 if (!is_power_of_2(cpu
->cfg
.elen
)) {
642 "Vector extension ELEN must be power of 2");
645 if (cpu
->cfg
.elen
> 64 || cpu
->cfg
.vlen
< 8) {
647 "Vector extension implementation only supports ELEN "
648 "in the range [8, 64]");
651 if (cpu
->cfg
.vext_spec
) {
652 if (!g_strcmp0(cpu
->cfg
.vext_spec
, "v1.0")) {
653 vext_version
= VEXT_VERSION_1_00_0
;
656 "Unsupported vector spec version '%s'",
661 qemu_log("vector version is not specified, "
662 "use the default value v1.0\n");
664 set_vext_version(env
, vext_version
);
666 if ((cpu
->cfg
.ext_zve32f
|| cpu
->cfg
.ext_zve64f
) && !cpu
->cfg
.ext_f
) {
667 error_setg(errp
, "Zve32f/Zve64f extension depends upon RVF.");
670 if (cpu
->cfg
.ext_j
) {
673 if (cpu
->cfg
.ext_zfinx
&& ((ext
& (RVF
| RVD
)) || cpu
->cfg
.ext_zfh
||
674 cpu
->cfg
.ext_zfhmin
)) {
676 "'Zfinx' cannot be supported together with 'F', 'D', 'Zfh',"
681 set_misa(env
, env
->misa_mxl
, ext
);
684 riscv_cpu_register_gdb_regs_for_features(cs
);
689 mcc
->parent_realize(dev
, errp
);
692 #ifndef CONFIG_USER_ONLY
693 static void riscv_cpu_set_irq(void *opaque
, int irq
, int level
)
695 RISCVCPU
*cpu
= RISCV_CPU(opaque
);
696 CPURISCVState
*env
= &cpu
->env
;
698 if (irq
< IRQ_LOCAL_MAX
) {
713 kvm_riscv_set_irq(cpu
, irq
, level
);
715 riscv_cpu_update_mip(cpu
, 1 << irq
, BOOL_TO_MASK(level
));
719 g_assert_not_reached();
721 } else if (irq
< (IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
)) {
722 /* Require H-extension for handling guest local interrupts */
723 if (!riscv_has_ext(env
, RVH
)) {
724 g_assert_not_reached();
727 /* Compute bit position in HGEIP CSR */
728 irq
= irq
- IRQ_LOCAL_MAX
+ 1;
729 if (env
->geilen
< irq
) {
730 g_assert_not_reached();
733 /* Update HGEIP CSR */
734 env
->hgeip
&= ~((target_ulong
)1 << irq
);
736 env
->hgeip
|= (target_ulong
)1 << irq
;
739 /* Update mip.SGEIP bit */
740 riscv_cpu_update_mip(cpu
, MIP_SGEIP
,
741 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
743 g_assert_not_reached();
746 #endif /* CONFIG_USER_ONLY */
748 static void riscv_cpu_init(Object
*obj
)
750 RISCVCPU
*cpu
= RISCV_CPU(obj
);
752 cpu_set_cpustate_pointers(cpu
);
754 #ifndef CONFIG_USER_ONLY
755 qdev_init_gpio_in(DEVICE(cpu
), riscv_cpu_set_irq
,
756 IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
);
757 #endif /* CONFIG_USER_ONLY */
760 static Property riscv_cpu_properties
[] = {
761 /* Defaults for standard extensions */
762 DEFINE_PROP_BOOL("i", RISCVCPU
, cfg
.ext_i
, true),
763 DEFINE_PROP_BOOL("e", RISCVCPU
, cfg
.ext_e
, false),
764 DEFINE_PROP_BOOL("g", RISCVCPU
, cfg
.ext_g
, true),
765 DEFINE_PROP_BOOL("m", RISCVCPU
, cfg
.ext_m
, true),
766 DEFINE_PROP_BOOL("a", RISCVCPU
, cfg
.ext_a
, true),
767 DEFINE_PROP_BOOL("f", RISCVCPU
, cfg
.ext_f
, true),
768 DEFINE_PROP_BOOL("d", RISCVCPU
, cfg
.ext_d
, true),
769 DEFINE_PROP_BOOL("c", RISCVCPU
, cfg
.ext_c
, true),
770 DEFINE_PROP_BOOL("s", RISCVCPU
, cfg
.ext_s
, true),
771 DEFINE_PROP_BOOL("u", RISCVCPU
, cfg
.ext_u
, true),
772 DEFINE_PROP_BOOL("v", RISCVCPU
, cfg
.ext_v
, false),
773 DEFINE_PROP_BOOL("h", RISCVCPU
, cfg
.ext_h
, true),
774 DEFINE_PROP_BOOL("Counters", RISCVCPU
, cfg
.ext_counters
, true),
775 DEFINE_PROP_BOOL("Zifencei", RISCVCPU
, cfg
.ext_ifencei
, true),
776 DEFINE_PROP_BOOL("Zicsr", RISCVCPU
, cfg
.ext_icsr
, true),
777 DEFINE_PROP_BOOL("Zfh", RISCVCPU
, cfg
.ext_zfh
, false),
778 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU
, cfg
.ext_zfhmin
, false),
779 DEFINE_PROP_BOOL("Zve32f", RISCVCPU
, cfg
.ext_zve32f
, false),
780 DEFINE_PROP_BOOL("Zve64f", RISCVCPU
, cfg
.ext_zve64f
, false),
781 DEFINE_PROP_BOOL("mmu", RISCVCPU
, cfg
.mmu
, true),
782 DEFINE_PROP_BOOL("pmp", RISCVCPU
, cfg
.pmp
, true),
784 DEFINE_PROP_STRING("priv_spec", RISCVCPU
, cfg
.priv_spec
),
785 DEFINE_PROP_STRING("vext_spec", RISCVCPU
, cfg
.vext_spec
),
786 DEFINE_PROP_UINT16("vlen", RISCVCPU
, cfg
.vlen
, 128),
787 DEFINE_PROP_UINT16("elen", RISCVCPU
, cfg
.elen
, 64),
789 DEFINE_PROP_BOOL("svinval", RISCVCPU
, cfg
.ext_svinval
, false),
790 DEFINE_PROP_BOOL("svnapot", RISCVCPU
, cfg
.ext_svnapot
, false),
791 DEFINE_PROP_BOOL("svpbmt", RISCVCPU
, cfg
.ext_svpbmt
, false),
793 DEFINE_PROP_BOOL("zba", RISCVCPU
, cfg
.ext_zba
, true),
794 DEFINE_PROP_BOOL("zbb", RISCVCPU
, cfg
.ext_zbb
, true),
795 DEFINE_PROP_BOOL("zbc", RISCVCPU
, cfg
.ext_zbc
, true),
796 DEFINE_PROP_BOOL("zbs", RISCVCPU
, cfg
.ext_zbs
, true),
798 DEFINE_PROP_BOOL("zdinx", RISCVCPU
, cfg
.ext_zdinx
, false),
799 DEFINE_PROP_BOOL("zfinx", RISCVCPU
, cfg
.ext_zfinx
, false),
800 DEFINE_PROP_BOOL("zhinx", RISCVCPU
, cfg
.ext_zhinx
, false),
801 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU
, cfg
.ext_zhinxmin
, false),
803 /* Vendor-specific custom extensions */
804 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU
, cfg
.ext_XVentanaCondOps
, false),
806 /* These are experimental so mark with 'x-' */
807 DEFINE_PROP_BOOL("x-j", RISCVCPU
, cfg
.ext_j
, false),
809 DEFINE_PROP_BOOL("x-epmp", RISCVCPU
, cfg
.epmp
, false),
810 DEFINE_PROP_BOOL("x-aia", RISCVCPU
, cfg
.aia
, false),
812 DEFINE_PROP_UINT64("resetvec", RISCVCPU
, cfg
.resetvec
, DEFAULT_RSTVEC
),
813 DEFINE_PROP_END_OF_LIST(),
816 static gchar
*riscv_gdb_arch_name(CPUState
*cs
)
818 RISCVCPU
*cpu
= RISCV_CPU(cs
);
819 CPURISCVState
*env
= &cpu
->env
;
821 switch (riscv_cpu_mxl(env
)) {
823 return g_strdup("riscv:rv32");
826 return g_strdup("riscv:rv64");
828 g_assert_not_reached();
832 static const char *riscv_gdb_get_dynamic_xml(CPUState
*cs
, const char *xmlname
)
834 RISCVCPU
*cpu
= RISCV_CPU(cs
);
836 if (strcmp(xmlname
, "riscv-csr.xml") == 0) {
837 return cpu
->dyn_csr_xml
;
838 } else if (strcmp(xmlname
, "riscv-vector.xml") == 0) {
839 return cpu
->dyn_vreg_xml
;
845 #ifndef CONFIG_USER_ONLY
846 #include "hw/core/sysemu-cpu-ops.h"
848 static const struct SysemuCPUOps riscv_sysemu_ops
= {
849 .get_phys_page_debug
= riscv_cpu_get_phys_page_debug
,
850 .write_elf64_note
= riscv_cpu_write_elf64_note
,
851 .write_elf32_note
= riscv_cpu_write_elf32_note
,
852 .legacy_vmsd
= &vmstate_riscv_cpu
,
856 #include "hw/core/tcg-cpu-ops.h"
858 static const struct TCGCPUOps riscv_tcg_ops
= {
859 .initialize
= riscv_translate_init
,
860 .synchronize_from_tb
= riscv_cpu_synchronize_from_tb
,
862 #ifndef CONFIG_USER_ONLY
863 .tlb_fill
= riscv_cpu_tlb_fill
,
864 .cpu_exec_interrupt
= riscv_cpu_exec_interrupt
,
865 .do_interrupt
= riscv_cpu_do_interrupt
,
866 .do_transaction_failed
= riscv_cpu_do_transaction_failed
,
867 .do_unaligned_access
= riscv_cpu_do_unaligned_access
,
868 #endif /* !CONFIG_USER_ONLY */
871 static void riscv_cpu_class_init(ObjectClass
*c
, void *data
)
873 RISCVCPUClass
*mcc
= RISCV_CPU_CLASS(c
);
874 CPUClass
*cc
= CPU_CLASS(c
);
875 DeviceClass
*dc
= DEVICE_CLASS(c
);
877 device_class_set_parent_realize(dc
, riscv_cpu_realize
,
878 &mcc
->parent_realize
);
880 device_class_set_parent_reset(dc
, riscv_cpu_reset
, &mcc
->parent_reset
);
882 cc
->class_by_name
= riscv_cpu_class_by_name
;
883 cc
->has_work
= riscv_cpu_has_work
;
884 cc
->dump_state
= riscv_cpu_dump_state
;
885 cc
->set_pc
= riscv_cpu_set_pc
;
886 cc
->gdb_read_register
= riscv_cpu_gdb_read_register
;
887 cc
->gdb_write_register
= riscv_cpu_gdb_write_register
;
888 cc
->gdb_num_core_regs
= 33;
889 cc
->gdb_stop_before_watchpoint
= true;
890 cc
->disas_set_info
= riscv_cpu_disas_set_info
;
891 #ifndef CONFIG_USER_ONLY
892 cc
->sysemu_ops
= &riscv_sysemu_ops
;
894 cc
->gdb_arch_name
= riscv_gdb_arch_name
;
895 cc
->gdb_get_dynamic_xml
= riscv_gdb_get_dynamic_xml
;
896 cc
->tcg_ops
= &riscv_tcg_ops
;
898 device_class_set_props(dc
, riscv_cpu_properties
);
901 char *riscv_isa_string(RISCVCPU
*cpu
)
904 const size_t maxlen
= sizeof("rv128") + sizeof(riscv_exts
) + 1;
905 char *isa_str
= g_new(char, maxlen
);
906 char *p
= isa_str
+ snprintf(isa_str
, maxlen
, "rv%d", TARGET_LONG_BITS
);
907 for (i
= 0; i
< sizeof(riscv_exts
); i
++) {
908 if (cpu
->env
.misa_ext
& RV(riscv_exts
[i
])) {
909 *p
++ = qemu_tolower(riscv_exts
[i
]);
916 static gint
riscv_cpu_list_compare(gconstpointer a
, gconstpointer b
)
918 ObjectClass
*class_a
= (ObjectClass
*)a
;
919 ObjectClass
*class_b
= (ObjectClass
*)b
;
920 const char *name_a
, *name_b
;
922 name_a
= object_class_get_name(class_a
);
923 name_b
= object_class_get_name(class_b
);
924 return strcmp(name_a
, name_b
);
927 static void riscv_cpu_list_entry(gpointer data
, gpointer user_data
)
929 const char *typename
= object_class_get_name(OBJECT_CLASS(data
));
930 int len
= strlen(typename
) - strlen(RISCV_CPU_TYPE_SUFFIX
);
932 qemu_printf("%.*s\n", len
, typename
);
935 void riscv_cpu_list(void)
939 list
= object_class_get_list(TYPE_RISCV_CPU
, false);
940 list
= g_slist_sort(list
, riscv_cpu_list_compare
);
941 g_slist_foreach(list
, riscv_cpu_list_entry
, NULL
);
945 #define DEFINE_CPU(type_name, initfn) \
948 .parent = TYPE_RISCV_CPU, \
949 .instance_init = initfn \
952 static const TypeInfo riscv_cpu_type_infos
[] = {
954 .name
= TYPE_RISCV_CPU
,
956 .instance_size
= sizeof(RISCVCPU
),
957 .instance_align
= __alignof__(RISCVCPU
),
958 .instance_init
= riscv_cpu_init
,
960 .class_size
= sizeof(RISCVCPUClass
),
961 .class_init
= riscv_cpu_class_init
,
963 DEFINE_CPU(TYPE_RISCV_CPU_ANY
, riscv_any_cpu_init
),
964 #if defined(CONFIG_KVM)
965 DEFINE_CPU(TYPE_RISCV_CPU_HOST
, riscv_host_cpu_init
),
967 #if defined(TARGET_RISCV32)
968 DEFINE_CPU(TYPE_RISCV_CPU_BASE32
, rv32_base_cpu_init
),
969 DEFINE_CPU(TYPE_RISCV_CPU_IBEX
, rv32_ibex_cpu_init
),
970 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31
, rv32_sifive_e_cpu_init
),
971 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34
, rv32_imafcu_nommu_cpu_init
),
972 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34
, rv32_sifive_u_cpu_init
),
973 #elif defined(TARGET_RISCV64)
974 DEFINE_CPU(TYPE_RISCV_CPU_BASE64
, rv64_base_cpu_init
),
975 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51
, rv64_sifive_e_cpu_init
),
976 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54
, rv64_sifive_u_cpu_init
),
977 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C
, rv64_sifive_u_cpu_init
),
978 DEFINE_CPU(TYPE_RISCV_CPU_BASE128
, rv128_base_cpu_init
),
982 DEFINE_TYPES(riscv_cpu_type_infos
)