4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
25 #include "cpu_vendorid.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qapi/visitor.h"
32 #include "qemu/error-report.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35 #include "fpu/softfloat-helpers.h"
36 #include "sysemu/kvm.h"
37 #include "kvm_riscv.h"
40 /* RISC-V CPU definitions */
42 #define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \
43 (QEMU_VERSION_MINOR << 8) | \
45 #define RISCV_CPU_MIMPID RISCV_CPU_MARCHID
47 static const char riscv_single_letter_exts
[] = "IEMAFDQCPVH";
52 int ext_enable_offset
;
55 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
56 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
59 * Here are the ordering rules of extension naming defined by RISC-V
61 * 1. All extensions should be separated from other multi-letter extensions
63 * 2. The first letter following the 'Z' conventionally indicates the most
64 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
65 * If multiple 'Z' extensions are named, they should be ordered first
66 * by category, then alphabetically within a category.
67 * 3. Standard supervisor-level extensions (starts with 'S') should be
68 * listed after standard unprivileged extensions. If multiple
69 * supervisor-level extensions are listed, they should be ordered
71 * 4. Non-standard extensions (starts with 'X') must be listed after all
72 * standard extensions. They must be separated from other multi-letter
73 * extensions by an underscore.
75 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
78 static const struct isa_ext_data isa_edata_arr
[] = {
79 ISA_EXT_DATA_ENTRY(zicbom
, PRIV_VERSION_1_12_0
, ext_icbom
),
80 ISA_EXT_DATA_ENTRY(zicboz
, PRIV_VERSION_1_12_0
, ext_icboz
),
81 ISA_EXT_DATA_ENTRY(zicond
, PRIV_VERSION_1_12_0
, ext_zicond
),
82 ISA_EXT_DATA_ENTRY(zicsr
, PRIV_VERSION_1_10_0
, ext_icsr
),
83 ISA_EXT_DATA_ENTRY(zifencei
, PRIV_VERSION_1_10_0
, ext_ifencei
),
84 ISA_EXT_DATA_ENTRY(zihintpause
, PRIV_VERSION_1_10_0
, ext_zihintpause
),
85 ISA_EXT_DATA_ENTRY(zawrs
, PRIV_VERSION_1_12_0
, ext_zawrs
),
86 ISA_EXT_DATA_ENTRY(zfh
, PRIV_VERSION_1_11_0
, ext_zfh
),
87 ISA_EXT_DATA_ENTRY(zfhmin
, PRIV_VERSION_1_11_0
, ext_zfhmin
),
88 ISA_EXT_DATA_ENTRY(zfinx
, PRIV_VERSION_1_12_0
, ext_zfinx
),
89 ISA_EXT_DATA_ENTRY(zdinx
, PRIV_VERSION_1_12_0
, ext_zdinx
),
90 ISA_EXT_DATA_ENTRY(zca
, PRIV_VERSION_1_12_0
, ext_zca
),
91 ISA_EXT_DATA_ENTRY(zcb
, PRIV_VERSION_1_12_0
, ext_zcb
),
92 ISA_EXT_DATA_ENTRY(zcf
, PRIV_VERSION_1_12_0
, ext_zcf
),
93 ISA_EXT_DATA_ENTRY(zcd
, PRIV_VERSION_1_12_0
, ext_zcd
),
94 ISA_EXT_DATA_ENTRY(zce
, PRIV_VERSION_1_12_0
, ext_zce
),
95 ISA_EXT_DATA_ENTRY(zcmp
, PRIV_VERSION_1_12_0
, ext_zcmp
),
96 ISA_EXT_DATA_ENTRY(zcmt
, PRIV_VERSION_1_12_0
, ext_zcmt
),
97 ISA_EXT_DATA_ENTRY(zba
, PRIV_VERSION_1_12_0
, ext_zba
),
98 ISA_EXT_DATA_ENTRY(zbb
, PRIV_VERSION_1_12_0
, ext_zbb
),
99 ISA_EXT_DATA_ENTRY(zbc
, PRIV_VERSION_1_12_0
, ext_zbc
),
100 ISA_EXT_DATA_ENTRY(zbkb
, PRIV_VERSION_1_12_0
, ext_zbkb
),
101 ISA_EXT_DATA_ENTRY(zbkc
, PRIV_VERSION_1_12_0
, ext_zbkc
),
102 ISA_EXT_DATA_ENTRY(zbkx
, PRIV_VERSION_1_12_0
, ext_zbkx
),
103 ISA_EXT_DATA_ENTRY(zbs
, PRIV_VERSION_1_12_0
, ext_zbs
),
104 ISA_EXT_DATA_ENTRY(zk
, PRIV_VERSION_1_12_0
, ext_zk
),
105 ISA_EXT_DATA_ENTRY(zkn
, PRIV_VERSION_1_12_0
, ext_zkn
),
106 ISA_EXT_DATA_ENTRY(zknd
, PRIV_VERSION_1_12_0
, ext_zknd
),
107 ISA_EXT_DATA_ENTRY(zkne
, PRIV_VERSION_1_12_0
, ext_zkne
),
108 ISA_EXT_DATA_ENTRY(zknh
, PRIV_VERSION_1_12_0
, ext_zknh
),
109 ISA_EXT_DATA_ENTRY(zkr
, PRIV_VERSION_1_12_0
, ext_zkr
),
110 ISA_EXT_DATA_ENTRY(zks
, PRIV_VERSION_1_12_0
, ext_zks
),
111 ISA_EXT_DATA_ENTRY(zksed
, PRIV_VERSION_1_12_0
, ext_zksed
),
112 ISA_EXT_DATA_ENTRY(zksh
, PRIV_VERSION_1_12_0
, ext_zksh
),
113 ISA_EXT_DATA_ENTRY(zkt
, PRIV_VERSION_1_12_0
, ext_zkt
),
114 ISA_EXT_DATA_ENTRY(zve32f
, PRIV_VERSION_1_10_0
, ext_zve32f
),
115 ISA_EXT_DATA_ENTRY(zve64f
, PRIV_VERSION_1_10_0
, ext_zve64f
),
116 ISA_EXT_DATA_ENTRY(zve64d
, PRIV_VERSION_1_10_0
, ext_zve64d
),
117 ISA_EXT_DATA_ENTRY(zvfh
, PRIV_VERSION_1_12_0
, ext_zvfh
),
118 ISA_EXT_DATA_ENTRY(zvfhmin
, PRIV_VERSION_1_12_0
, ext_zvfhmin
),
119 ISA_EXT_DATA_ENTRY(zhinx
, PRIV_VERSION_1_12_0
, ext_zhinx
),
120 ISA_EXT_DATA_ENTRY(zhinxmin
, PRIV_VERSION_1_12_0
, ext_zhinxmin
),
121 ISA_EXT_DATA_ENTRY(smaia
, PRIV_VERSION_1_12_0
, ext_smaia
),
122 ISA_EXT_DATA_ENTRY(ssaia
, PRIV_VERSION_1_12_0
, ext_ssaia
),
123 ISA_EXT_DATA_ENTRY(sscofpmf
, PRIV_VERSION_1_12_0
, ext_sscofpmf
),
124 ISA_EXT_DATA_ENTRY(sstc
, PRIV_VERSION_1_12_0
, ext_sstc
),
125 ISA_EXT_DATA_ENTRY(svadu
, PRIV_VERSION_1_12_0
, ext_svadu
),
126 ISA_EXT_DATA_ENTRY(svinval
, PRIV_VERSION_1_12_0
, ext_svinval
),
127 ISA_EXT_DATA_ENTRY(svnapot
, PRIV_VERSION_1_12_0
, ext_svnapot
),
128 ISA_EXT_DATA_ENTRY(svpbmt
, PRIV_VERSION_1_12_0
, ext_svpbmt
),
129 ISA_EXT_DATA_ENTRY(xtheadba
, PRIV_VERSION_1_11_0
, ext_xtheadba
),
130 ISA_EXT_DATA_ENTRY(xtheadbb
, PRIV_VERSION_1_11_0
, ext_xtheadbb
),
131 ISA_EXT_DATA_ENTRY(xtheadbs
, PRIV_VERSION_1_11_0
, ext_xtheadbs
),
132 ISA_EXT_DATA_ENTRY(xtheadcmo
, PRIV_VERSION_1_11_0
, ext_xtheadcmo
),
133 ISA_EXT_DATA_ENTRY(xtheadcondmov
, PRIV_VERSION_1_11_0
, ext_xtheadcondmov
),
134 ISA_EXT_DATA_ENTRY(xtheadfmemidx
, PRIV_VERSION_1_11_0
, ext_xtheadfmemidx
),
135 ISA_EXT_DATA_ENTRY(xtheadfmv
, PRIV_VERSION_1_11_0
, ext_xtheadfmv
),
136 ISA_EXT_DATA_ENTRY(xtheadmac
, PRIV_VERSION_1_11_0
, ext_xtheadmac
),
137 ISA_EXT_DATA_ENTRY(xtheadmemidx
, PRIV_VERSION_1_11_0
, ext_xtheadmemidx
),
138 ISA_EXT_DATA_ENTRY(xtheadmempair
, PRIV_VERSION_1_11_0
, ext_xtheadmempair
),
139 ISA_EXT_DATA_ENTRY(xtheadsync
, PRIV_VERSION_1_11_0
, ext_xtheadsync
),
140 ISA_EXT_DATA_ENTRY(xventanacondops
, PRIV_VERSION_1_12_0
, ext_XVentanaCondOps
),
143 static bool isa_ext_is_enabled(RISCVCPU
*cpu
,
144 const struct isa_ext_data
*edata
)
146 bool *ext_enabled
= (void *)&cpu
->cfg
+ edata
->ext_enable_offset
;
151 static void isa_ext_update_enabled(RISCVCPU
*cpu
,
152 const struct isa_ext_data
*edata
, bool en
)
154 bool *ext_enabled
= (void *)&cpu
->cfg
+ edata
->ext_enable_offset
;
159 const char * const riscv_int_regnames
[] = {
160 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
161 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
162 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
163 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
164 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
167 const char * const riscv_int_regnamesh
[] = {
168 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
169 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
170 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
171 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
172 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
173 "x30h/t5h", "x31h/t6h"
176 const char * const riscv_fpr_regnames
[] = {
177 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
178 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
179 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
180 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
181 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
182 "f30/ft10", "f31/ft11"
185 static const char * const riscv_excp_names
[] = {
188 "illegal_instruction",
206 "guest_exec_page_fault",
207 "guest_load_page_fault",
209 "guest_store_page_fault",
212 static const char * const riscv_intr_names
[] = {
231 static void riscv_cpu_add_user_properties(Object
*obj
);
233 const char *riscv_cpu_get_trap_name(target_ulong cause
, bool async
)
236 return (cause
< ARRAY_SIZE(riscv_intr_names
)) ?
237 riscv_intr_names
[cause
] : "(unknown)";
239 return (cause
< ARRAY_SIZE(riscv_excp_names
)) ?
240 riscv_excp_names
[cause
] : "(unknown)";
244 static void set_misa(CPURISCVState
*env
, RISCVMXL mxl
, uint32_t ext
)
246 env
->misa_mxl_max
= env
->misa_mxl
= mxl
;
247 env
->misa_ext_mask
= env
->misa_ext
= ext
;
250 #ifndef CONFIG_USER_ONLY
251 static uint8_t satp_mode_from_str(const char *satp_mode_str
)
253 if (!strncmp(satp_mode_str
, "mbare", 5)) {
254 return VM_1_10_MBARE
;
257 if (!strncmp(satp_mode_str
, "sv32", 4)) {
261 if (!strncmp(satp_mode_str
, "sv39", 4)) {
265 if (!strncmp(satp_mode_str
, "sv48", 4)) {
269 if (!strncmp(satp_mode_str
, "sv57", 4)) {
273 if (!strncmp(satp_mode_str
, "sv64", 4)) {
277 g_assert_not_reached();
280 uint8_t satp_mode_max_from_map(uint32_t map
)
282 /* map here has at least one bit set, so no problem with clz */
283 return 31 - __builtin_clz(map
);
286 const char *satp_mode_str(uint8_t satp_mode
, bool is_32_bit
)
310 g_assert_not_reached();
313 static void set_satp_mode_max_supported(RISCVCPU
*cpu
,
316 bool rv32
= riscv_cpu_mxl(&cpu
->env
) == MXL_RV32
;
317 const bool *valid_vm
= rv32
? valid_vm_1_10_32
: valid_vm_1_10_64
;
319 for (int i
= 0; i
<= satp_mode
; ++i
) {
321 cpu
->cfg
.satp_mode
.supported
|= (1 << i
);
326 /* Set the satp mode to the max supported */
327 static void set_satp_mode_default_map(RISCVCPU
*cpu
)
329 cpu
->cfg
.satp_mode
.map
= cpu
->cfg
.satp_mode
.supported
;
333 static void riscv_any_cpu_init(Object
*obj
)
335 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
336 #if defined(TARGET_RISCV32)
337 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
338 #elif defined(TARGET_RISCV64)
339 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVU
);
342 #ifndef CONFIG_USER_ONLY
343 set_satp_mode_max_supported(RISCV_CPU(obj
),
344 riscv_cpu_mxl(&RISCV_CPU(obj
)->env
) == MXL_RV32
?
345 VM_1_10_SV32
: VM_1_10_SV57
);
348 env
->priv_ver
= PRIV_VERSION_LATEST
;
351 #if defined(TARGET_RISCV64)
352 static void rv64_base_cpu_init(Object
*obj
)
354 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
355 /* We set this in the realise function */
356 set_misa(env
, MXL_RV64
, 0);
357 riscv_cpu_add_user_properties(obj
);
358 /* Set latest version of privileged specification */
359 env
->priv_ver
= PRIV_VERSION_LATEST
;
360 #ifndef CONFIG_USER_ONLY
361 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV57
);
365 static void rv64_sifive_u_cpu_init(Object
*obj
)
367 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
368 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
369 env
->priv_ver
= PRIV_VERSION_1_10_0
;
370 #ifndef CONFIG_USER_ONLY
371 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV39
);
375 static void rv64_sifive_e_cpu_init(Object
*obj
)
377 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
378 RISCVCPU
*cpu
= RISCV_CPU(obj
);
380 set_misa(env
, MXL_RV64
, RVI
| RVM
| RVA
| RVC
| RVU
);
381 env
->priv_ver
= PRIV_VERSION_1_10_0
;
382 cpu
->cfg
.mmu
= false;
383 #ifndef CONFIG_USER_ONLY
384 set_satp_mode_max_supported(cpu
, VM_1_10_MBARE
);
388 static void rv64_thead_c906_cpu_init(Object
*obj
)
390 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
391 RISCVCPU
*cpu
= RISCV_CPU(obj
);
393 set_misa(env
, MXL_RV64
, RVG
| RVC
| RVS
| RVU
);
394 env
->priv_ver
= PRIV_VERSION_1_11_0
;
396 cpu
->cfg
.ext_zfh
= true;
398 cpu
->cfg
.ext_xtheadba
= true;
399 cpu
->cfg
.ext_xtheadbb
= true;
400 cpu
->cfg
.ext_xtheadbs
= true;
401 cpu
->cfg
.ext_xtheadcmo
= true;
402 cpu
->cfg
.ext_xtheadcondmov
= true;
403 cpu
->cfg
.ext_xtheadfmemidx
= true;
404 cpu
->cfg
.ext_xtheadmac
= true;
405 cpu
->cfg
.ext_xtheadmemidx
= true;
406 cpu
->cfg
.ext_xtheadmempair
= true;
407 cpu
->cfg
.ext_xtheadsync
= true;
409 cpu
->cfg
.mvendorid
= THEAD_VENDOR_ID
;
410 #ifndef CONFIG_USER_ONLY
411 set_satp_mode_max_supported(cpu
, VM_1_10_SV39
);
415 static void rv64_veyron_v1_cpu_init(Object
*obj
)
417 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
418 RISCVCPU
*cpu
= RISCV_CPU(obj
);
420 set_misa(env
, MXL_RV64
, RVG
| RVC
| RVS
| RVU
| RVH
);
421 env
->priv_ver
= PRIV_VERSION_1_12_0
;
423 /* Enable ISA extensions */
425 cpu
->cfg
.ext_icbom
= true;
426 cpu
->cfg
.cbom_blocksize
= 64;
427 cpu
->cfg
.cboz_blocksize
= 64;
428 cpu
->cfg
.ext_icboz
= true;
429 cpu
->cfg
.ext_smaia
= true;
430 cpu
->cfg
.ext_ssaia
= true;
431 cpu
->cfg
.ext_sscofpmf
= true;
432 cpu
->cfg
.ext_sstc
= true;
433 cpu
->cfg
.ext_svinval
= true;
434 cpu
->cfg
.ext_svnapot
= true;
435 cpu
->cfg
.ext_svpbmt
= true;
436 cpu
->cfg
.ext_smstateen
= true;
437 cpu
->cfg
.ext_zba
= true;
438 cpu
->cfg
.ext_zbb
= true;
439 cpu
->cfg
.ext_zbc
= true;
440 cpu
->cfg
.ext_zbs
= true;
441 cpu
->cfg
.ext_XVentanaCondOps
= true;
443 cpu
->cfg
.mvendorid
= VEYRON_V1_MVENDORID
;
444 cpu
->cfg
.marchid
= VEYRON_V1_MARCHID
;
445 cpu
->cfg
.mimpid
= VEYRON_V1_MIMPID
;
447 #ifndef CONFIG_USER_ONLY
448 set_satp_mode_max_supported(cpu
, VM_1_10_SV48
);
452 static void rv128_base_cpu_init(Object
*obj
)
454 if (qemu_tcg_mttcg_enabled()) {
455 /* Missing 128-bit aligned atomics */
456 error_report("128-bit RISC-V currently does not work with Multi "
457 "Threaded TCG. Please use: -accel tcg,thread=single");
460 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
461 /* We set this in the realise function */
462 set_misa(env
, MXL_RV128
, 0);
463 riscv_cpu_add_user_properties(obj
);
464 /* Set latest version of privileged specification */
465 env
->priv_ver
= PRIV_VERSION_LATEST
;
466 #ifndef CONFIG_USER_ONLY
467 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV57
);
471 static void rv32_base_cpu_init(Object
*obj
)
473 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
474 /* We set this in the realise function */
475 set_misa(env
, MXL_RV32
, 0);
476 riscv_cpu_add_user_properties(obj
);
477 /* Set latest version of privileged specification */
478 env
->priv_ver
= PRIV_VERSION_LATEST
;
479 #ifndef CONFIG_USER_ONLY
480 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV32
);
484 static void rv32_sifive_u_cpu_init(Object
*obj
)
486 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
487 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVD
| RVC
| RVS
| RVU
);
488 env
->priv_ver
= PRIV_VERSION_1_10_0
;
489 #ifndef CONFIG_USER_ONLY
490 set_satp_mode_max_supported(RISCV_CPU(obj
), VM_1_10_SV32
);
494 static void rv32_sifive_e_cpu_init(Object
*obj
)
496 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
497 RISCVCPU
*cpu
= RISCV_CPU(obj
);
499 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVC
| RVU
);
500 env
->priv_ver
= PRIV_VERSION_1_10_0
;
501 cpu
->cfg
.mmu
= false;
502 #ifndef CONFIG_USER_ONLY
503 set_satp_mode_max_supported(cpu
, VM_1_10_MBARE
);
507 static void rv32_ibex_cpu_init(Object
*obj
)
509 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
510 RISCVCPU
*cpu
= RISCV_CPU(obj
);
512 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVC
| RVU
);
513 env
->priv_ver
= PRIV_VERSION_1_11_0
;
514 cpu
->cfg
.mmu
= false;
515 #ifndef CONFIG_USER_ONLY
516 set_satp_mode_max_supported(cpu
, VM_1_10_MBARE
);
518 cpu
->cfg
.epmp
= true;
521 static void rv32_imafcu_nommu_cpu_init(Object
*obj
)
523 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
524 RISCVCPU
*cpu
= RISCV_CPU(obj
);
526 set_misa(env
, MXL_RV32
, RVI
| RVM
| RVA
| RVF
| RVC
| RVU
);
527 env
->priv_ver
= PRIV_VERSION_1_10_0
;
528 cpu
->cfg
.mmu
= false;
529 #ifndef CONFIG_USER_ONLY
530 set_satp_mode_max_supported(cpu
, VM_1_10_MBARE
);
535 #if defined(CONFIG_KVM)
536 static void riscv_host_cpu_init(Object
*obj
)
538 CPURISCVState
*env
= &RISCV_CPU(obj
)->env
;
539 #if defined(TARGET_RISCV32)
540 set_misa(env
, MXL_RV32
, 0);
541 #elif defined(TARGET_RISCV64)
542 set_misa(env
, MXL_RV64
, 0);
544 riscv_cpu_add_user_properties(obj
);
548 static ObjectClass
*riscv_cpu_class_by_name(const char *cpu_model
)
554 cpuname
= g_strsplit(cpu_model
, ",", 1);
555 typename
= g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname
[0]);
556 oc
= object_class_by_name(typename
);
559 if (!oc
|| !object_class_dynamic_cast(oc
, TYPE_RISCV_CPU
) ||
560 object_class_is_abstract(oc
)) {
566 static void riscv_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
568 RISCVCPU
*cpu
= RISCV_CPU(cs
);
569 CPURISCVState
*env
= &cpu
->env
;
572 #if !defined(CONFIG_USER_ONLY)
573 if (riscv_has_ext(env
, RVH
)) {
574 qemu_fprintf(f
, " %s %d\n", "V = ", env
->virt_enabled
);
577 qemu_fprintf(f
, " %s " TARGET_FMT_lx
"\n", "pc ", env
->pc
);
578 #ifndef CONFIG_USER_ONLY
580 static const int dump_csrs
[] = {
585 * CSR_SSTATUS is intentionally omitted here as its value
586 * can be figured out by looking at CSR_MSTATUS
621 for (int i
= 0; i
< ARRAY_SIZE(dump_csrs
); ++i
) {
622 int csrno
= dump_csrs
[i
];
623 target_ulong val
= 0;
624 RISCVException res
= riscv_csrrw_debug(env
, csrno
, &val
, 0, 0);
627 * Rely on the smode, hmode, etc, predicates within csr.c
628 * to do the filtering of the registers that are present.
630 if (res
== RISCV_EXCP_NONE
) {
631 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
"\n",
632 csr_ops
[csrno
].name
, val
);
638 for (i
= 0; i
< 32; i
++) {
639 qemu_fprintf(f
, " %-8s " TARGET_FMT_lx
,
640 riscv_int_regnames
[i
], env
->gpr
[i
]);
642 qemu_fprintf(f
, "\n");
645 if (flags
& CPU_DUMP_FPU
) {
646 for (i
= 0; i
< 32; i
++) {
647 qemu_fprintf(f
, " %-8s %016" PRIx64
,
648 riscv_fpr_regnames
[i
], env
->fpr
[i
]);
650 qemu_fprintf(f
, "\n");
656 static void riscv_cpu_set_pc(CPUState
*cs
, vaddr value
)
658 RISCVCPU
*cpu
= RISCV_CPU(cs
);
659 CPURISCVState
*env
= &cpu
->env
;
661 if (env
->xl
== MXL_RV32
) {
662 env
->pc
= (int32_t)value
;
668 static vaddr
riscv_cpu_get_pc(CPUState
*cs
)
670 RISCVCPU
*cpu
= RISCV_CPU(cs
);
671 CPURISCVState
*env
= &cpu
->env
;
673 /* Match cpu_get_tb_cpu_state. */
674 if (env
->xl
== MXL_RV32
) {
675 return env
->pc
& UINT32_MAX
;
680 static void riscv_cpu_synchronize_from_tb(CPUState
*cs
,
681 const TranslationBlock
*tb
)
683 RISCVCPU
*cpu
= RISCV_CPU(cs
);
684 CPURISCVState
*env
= &cpu
->env
;
685 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
687 tcg_debug_assert(!(cs
->tcg_cflags
& CF_PCREL
));
689 if (xl
== MXL_RV32
) {
690 env
->pc
= (int32_t) tb
->pc
;
696 static bool riscv_cpu_has_work(CPUState
*cs
)
698 #ifndef CONFIG_USER_ONLY
699 RISCVCPU
*cpu
= RISCV_CPU(cs
);
700 CPURISCVState
*env
= &cpu
->env
;
702 * Definition of the WFI instruction requires it to ignore the privilege
703 * mode and delegation registers, but respect individual enables
705 return riscv_cpu_all_pending(env
) != 0;
711 static void riscv_restore_state_to_opc(CPUState
*cs
,
712 const TranslationBlock
*tb
,
713 const uint64_t *data
)
715 RISCVCPU
*cpu
= RISCV_CPU(cs
);
716 CPURISCVState
*env
= &cpu
->env
;
717 RISCVMXL xl
= FIELD_EX32(tb
->flags
, TB_FLAGS
, XL
);
719 if (xl
== MXL_RV32
) {
720 env
->pc
= (int32_t)data
[0];
727 static void riscv_cpu_reset_hold(Object
*obj
)
729 #ifndef CONFIG_USER_ONLY
733 CPUState
*cs
= CPU(obj
);
734 RISCVCPU
*cpu
= RISCV_CPU(cs
);
735 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(cpu
);
736 CPURISCVState
*env
= &cpu
->env
;
738 if (mcc
->parent_phases
.hold
) {
739 mcc
->parent_phases
.hold(obj
);
741 #ifndef CONFIG_USER_ONLY
742 env
->misa_mxl
= env
->misa_mxl_max
;
744 env
->mstatus
&= ~(MSTATUS_MIE
| MSTATUS_MPRV
);
745 if (env
->misa_mxl
> MXL_RV32
) {
747 * The reset status of SXL/UXL is undefined, but mstatus is WARL
748 * and we must ensure that the value after init is valid for read.
750 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_SXL
, env
->misa_mxl
);
751 env
->mstatus
= set_field(env
->mstatus
, MSTATUS64_UXL
, env
->misa_mxl
);
752 if (riscv_has_ext(env
, RVH
)) {
753 env
->vsstatus
= set_field(env
->vsstatus
,
754 MSTATUS64_SXL
, env
->misa_mxl
);
755 env
->vsstatus
= set_field(env
->vsstatus
,
756 MSTATUS64_UXL
, env
->misa_mxl
);
757 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
758 MSTATUS64_SXL
, env
->misa_mxl
);
759 env
->mstatus_hs
= set_field(env
->mstatus_hs
,
760 MSTATUS64_UXL
, env
->misa_mxl
);
764 env
->miclaim
= MIP_SGEIP
;
765 env
->pc
= env
->resetvec
;
767 env
->two_stage_lookup
= false;
769 env
->menvcfg
= (cpu
->cfg
.ext_svpbmt
? MENVCFG_PBMTE
: 0) |
770 (cpu
->cfg
.ext_svadu
? MENVCFG_HADE
: 0);
771 env
->henvcfg
= (cpu
->cfg
.ext_svpbmt
? HENVCFG_PBMTE
: 0) |
772 (cpu
->cfg
.ext_svadu
? HENVCFG_HADE
: 0);
774 /* Initialized default priorities of local interrupts. */
775 for (i
= 0; i
< ARRAY_SIZE(env
->miprio
); i
++) {
776 iprio
= riscv_cpu_default_priority(i
);
777 env
->miprio
[i
] = (i
== IRQ_M_EXT
) ? 0 : iprio
;
778 env
->siprio
[i
] = (i
== IRQ_S_EXT
) ? 0 : iprio
;
782 while (!riscv_cpu_hviprio_index2irq(i
, &irq
, &rdzero
)) {
784 env
->hviprio
[irq
] = env
->miprio
[irq
];
788 /* mmte is supposed to have pm.current hardwired to 1 */
789 env
->mmte
|= (EXT_STATUS_INITIAL
| MMTE_M_PM_CURRENT
);
791 env
->xl
= riscv_cpu_mxl(env
);
792 riscv_cpu_update_mask(env
);
793 cs
->exception_index
= RISCV_EXCP_NONE
;
795 set_default_nan_mode(1, &env
->fp_status
);
797 #ifndef CONFIG_USER_ONLY
798 if (cpu
->cfg
.debug
) {
799 riscv_trigger_init(env
);
803 kvm_riscv_reset_vcpu(cpu
);
808 static void riscv_cpu_disas_set_info(CPUState
*s
, disassemble_info
*info
)
810 RISCVCPU
*cpu
= RISCV_CPU(s
);
812 switch (riscv_cpu_mxl(&cpu
->env
)) {
814 info
->print_insn
= print_insn_riscv32
;
817 info
->print_insn
= print_insn_riscv64
;
820 info
->print_insn
= print_insn_riscv128
;
823 g_assert_not_reached();
827 static void riscv_cpu_validate_v(CPURISCVState
*env
, RISCVCPUConfig
*cfg
,
830 int vext_version
= VEXT_VERSION_1_00_0
;
832 if (!is_power_of_2(cfg
->vlen
)) {
833 error_setg(errp
, "Vector extension VLEN must be power of 2");
836 if (cfg
->vlen
> RV_VLEN_MAX
|| cfg
->vlen
< 128) {
838 "Vector extension implementation only supports VLEN "
839 "in the range [128, %d]", RV_VLEN_MAX
);
842 if (!is_power_of_2(cfg
->elen
)) {
843 error_setg(errp
, "Vector extension ELEN must be power of 2");
846 if (cfg
->elen
> 64 || cfg
->elen
< 8) {
848 "Vector extension implementation only supports ELEN "
849 "in the range [8, 64]");
852 if (cfg
->vext_spec
) {
853 if (!g_strcmp0(cfg
->vext_spec
, "v1.0")) {
854 vext_version
= VEXT_VERSION_1_00_0
;
856 error_setg(errp
, "Unsupported vector spec version '%s'",
861 qemu_log("vector version is not specified, "
862 "use the default value v1.0\n");
864 env
->vext_ver
= vext_version
;
868 * Check consistency between chosen extensions while setting
869 * cpu->cfg accordingly.
871 static void riscv_cpu_validate_set_extensions(RISCVCPU
*cpu
, Error
**errp
)
873 CPURISCVState
*env
= &cpu
->env
;
874 Error
*local_err
= NULL
;
876 /* Do some ISA extension error checking */
877 if (riscv_has_ext(env
, RVG
) &&
878 !(riscv_has_ext(env
, RVI
) && riscv_has_ext(env
, RVM
) &&
879 riscv_has_ext(env
, RVA
) && riscv_has_ext(env
, RVF
) &&
880 riscv_has_ext(env
, RVD
) &&
881 cpu
->cfg
.ext_icsr
&& cpu
->cfg
.ext_ifencei
)) {
882 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
883 cpu
->cfg
.ext_icsr
= true;
884 cpu
->cfg
.ext_ifencei
= true;
886 env
->misa_ext
|= RVI
| RVM
| RVA
| RVF
| RVD
;
887 env
->misa_ext_mask
= env
->misa_ext
;
890 if (riscv_has_ext(env
, RVI
) && riscv_has_ext(env
, RVE
)) {
892 "I and E extensions are incompatible");
896 if (!riscv_has_ext(env
, RVI
) && !riscv_has_ext(env
, RVE
)) {
898 "Either I or E extension must be set");
902 if (riscv_has_ext(env
, RVS
) && !riscv_has_ext(env
, RVU
)) {
904 "Setting S extension without U extension is illegal");
908 if (riscv_has_ext(env
, RVH
) && !riscv_has_ext(env
, RVI
)) {
910 "H depends on an I base integer ISA with 32 x registers");
914 if (riscv_has_ext(env
, RVH
) && !riscv_has_ext(env
, RVS
)) {
915 error_setg(errp
, "H extension implicitly requires S-mode");
919 if (riscv_has_ext(env
, RVF
) && !cpu
->cfg
.ext_icsr
) {
920 error_setg(errp
, "F extension requires Zicsr");
924 if ((cpu
->cfg
.ext_zawrs
) && !riscv_has_ext(env
, RVA
)) {
925 error_setg(errp
, "Zawrs extension requires A extension");
929 if (cpu
->cfg
.ext_zfh
) {
930 cpu
->cfg
.ext_zfhmin
= true;
933 if (cpu
->cfg
.ext_zfhmin
&& !riscv_has_ext(env
, RVF
)) {
934 error_setg(errp
, "Zfh/Zfhmin extensions require F extension");
938 if (riscv_has_ext(env
, RVD
) && !riscv_has_ext(env
, RVF
)) {
939 error_setg(errp
, "D extension requires F extension");
943 if (riscv_has_ext(env
, RVV
)) {
944 riscv_cpu_validate_v(env
, &cpu
->cfg
, &local_err
);
945 if (local_err
!= NULL
) {
946 error_propagate(errp
, local_err
);
950 /* The V vector extension depends on the Zve64d extension */
951 cpu
->cfg
.ext_zve64d
= true;
954 /* The Zve64d extension depends on the Zve64f extension */
955 if (cpu
->cfg
.ext_zve64d
) {
956 cpu
->cfg
.ext_zve64f
= true;
959 /* The Zve64f extension depends on the Zve32f extension */
960 if (cpu
->cfg
.ext_zve64f
) {
961 cpu
->cfg
.ext_zve32f
= true;
964 if (cpu
->cfg
.ext_zve64d
&& !riscv_has_ext(env
, RVD
)) {
965 error_setg(errp
, "Zve64d/V extensions require D extension");
969 if (cpu
->cfg
.ext_zve32f
&& !riscv_has_ext(env
, RVF
)) {
970 error_setg(errp
, "Zve32f/Zve64f extensions require F extension");
974 if (cpu
->cfg
.ext_zvfh
) {
975 cpu
->cfg
.ext_zvfhmin
= true;
978 if (cpu
->cfg
.ext_zvfhmin
&& !cpu
->cfg
.ext_zve32f
) {
979 error_setg(errp
, "Zvfh/Zvfhmin extensions require Zve32f extension");
983 if (cpu
->cfg
.ext_zvfh
&& !cpu
->cfg
.ext_zfhmin
) {
984 error_setg(errp
, "Zvfh extensions requires Zfhmin extension");
988 /* Set the ISA extensions, checks should have happened above */
989 if (cpu
->cfg
.ext_zhinx
) {
990 cpu
->cfg
.ext_zhinxmin
= true;
993 if ((cpu
->cfg
.ext_zdinx
|| cpu
->cfg
.ext_zhinxmin
) && !cpu
->cfg
.ext_zfinx
) {
994 error_setg(errp
, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
998 if (cpu
->cfg
.ext_zfinx
) {
999 if (!cpu
->cfg
.ext_icsr
) {
1000 error_setg(errp
, "Zfinx extension requires Zicsr");
1003 if (riscv_has_ext(env
, RVF
)) {
1005 "Zfinx cannot be supported together with F extension");
1010 if (cpu
->cfg
.ext_zce
) {
1011 cpu
->cfg
.ext_zca
= true;
1012 cpu
->cfg
.ext_zcb
= true;
1013 cpu
->cfg
.ext_zcmp
= true;
1014 cpu
->cfg
.ext_zcmt
= true;
1015 if (riscv_has_ext(env
, RVF
) && env
->misa_mxl_max
== MXL_RV32
) {
1016 cpu
->cfg
.ext_zcf
= true;
1020 if (riscv_has_ext(env
, RVC
)) {
1021 cpu
->cfg
.ext_zca
= true;
1022 if (riscv_has_ext(env
, RVF
) && env
->misa_mxl_max
== MXL_RV32
) {
1023 cpu
->cfg
.ext_zcf
= true;
1025 if (riscv_has_ext(env
, RVD
)) {
1026 cpu
->cfg
.ext_zcd
= true;
1030 if (env
->misa_mxl_max
!= MXL_RV32
&& cpu
->cfg
.ext_zcf
) {
1031 error_setg(errp
, "Zcf extension is only relevant to RV32");
1035 if (!riscv_has_ext(env
, RVF
) && cpu
->cfg
.ext_zcf
) {
1036 error_setg(errp
, "Zcf extension requires F extension");
1040 if (!riscv_has_ext(env
, RVD
) && cpu
->cfg
.ext_zcd
) {
1041 error_setg(errp
, "Zcd extension requires D extension");
1045 if ((cpu
->cfg
.ext_zcf
|| cpu
->cfg
.ext_zcd
|| cpu
->cfg
.ext_zcb
||
1046 cpu
->cfg
.ext_zcmp
|| cpu
->cfg
.ext_zcmt
) && !cpu
->cfg
.ext_zca
) {
1047 error_setg(errp
, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
1052 if (cpu
->cfg
.ext_zcd
&& (cpu
->cfg
.ext_zcmp
|| cpu
->cfg
.ext_zcmt
)) {
1053 error_setg(errp
, "Zcmp/Zcmt extensions are incompatible with "
1058 if (cpu
->cfg
.ext_zcmt
&& !cpu
->cfg
.ext_icsr
) {
1059 error_setg(errp
, "Zcmt extension requires Zicsr extension");
1063 if (cpu
->cfg
.ext_zk
) {
1064 cpu
->cfg
.ext_zkn
= true;
1065 cpu
->cfg
.ext_zkr
= true;
1066 cpu
->cfg
.ext_zkt
= true;
1069 if (cpu
->cfg
.ext_zkn
) {
1070 cpu
->cfg
.ext_zbkb
= true;
1071 cpu
->cfg
.ext_zbkc
= true;
1072 cpu
->cfg
.ext_zbkx
= true;
1073 cpu
->cfg
.ext_zkne
= true;
1074 cpu
->cfg
.ext_zknd
= true;
1075 cpu
->cfg
.ext_zknh
= true;
1078 if (cpu
->cfg
.ext_zks
) {
1079 cpu
->cfg
.ext_zbkb
= true;
1080 cpu
->cfg
.ext_zbkc
= true;
1081 cpu
->cfg
.ext_zbkx
= true;
1082 cpu
->cfg
.ext_zksed
= true;
1083 cpu
->cfg
.ext_zksh
= true;
1087 #ifndef CONFIG_USER_ONLY
1088 static void riscv_cpu_satp_mode_finalize(RISCVCPU
*cpu
, Error
**errp
)
1090 bool rv32
= riscv_cpu_mxl(&cpu
->env
) == MXL_RV32
;
1091 uint8_t satp_mode_map_max
;
1092 uint8_t satp_mode_supported_max
=
1093 satp_mode_max_from_map(cpu
->cfg
.satp_mode
.supported
);
1095 if (cpu
->cfg
.satp_mode
.map
== 0) {
1096 if (cpu
->cfg
.satp_mode
.init
== 0) {
1097 /* If unset by the user, we fallback to the default satp mode. */
1098 set_satp_mode_default_map(cpu
);
1101 * Find the lowest level that was disabled and then enable the
1102 * first valid level below which can be found in
1103 * valid_vm_1_10_32/64.
1105 for (int i
= 1; i
< 16; ++i
) {
1106 if ((cpu
->cfg
.satp_mode
.init
& (1 << i
)) &&
1107 (cpu
->cfg
.satp_mode
.supported
& (1 << i
))) {
1108 for (int j
= i
- 1; j
>= 0; --j
) {
1109 if (cpu
->cfg
.satp_mode
.supported
& (1 << j
)) {
1110 cpu
->cfg
.satp_mode
.map
|= (1 << j
);
1120 satp_mode_map_max
= satp_mode_max_from_map(cpu
->cfg
.satp_mode
.map
);
1122 /* Make sure the user asked for a supported configuration (HW and qemu) */
1123 if (satp_mode_map_max
> satp_mode_supported_max
) {
1124 error_setg(errp
, "satp_mode %s is higher than hw max capability %s",
1125 satp_mode_str(satp_mode_map_max
, rv32
),
1126 satp_mode_str(satp_mode_supported_max
, rv32
));
1131 * Make sure the user did not ask for an invalid configuration as per
1132 * the specification.
1135 for (int i
= satp_mode_map_max
- 1; i
>= 0; --i
) {
1136 if (!(cpu
->cfg
.satp_mode
.map
& (1 << i
)) &&
1137 (cpu
->cfg
.satp_mode
.init
& (1 << i
)) &&
1138 (cpu
->cfg
.satp_mode
.supported
& (1 << i
))) {
1139 error_setg(errp
, "cannot disable %s satp mode if %s "
1140 "is enabled", satp_mode_str(i
, false),
1141 satp_mode_str(satp_mode_map_max
, false));
1147 /* Finally expand the map so that all valid modes are set */
1148 for (int i
= satp_mode_map_max
- 1; i
>= 0; --i
) {
1149 if (cpu
->cfg
.satp_mode
.supported
& (1 << i
)) {
1150 cpu
->cfg
.satp_mode
.map
|= (1 << i
);
1156 static void riscv_cpu_finalize_features(RISCVCPU
*cpu
, Error
**errp
)
1158 #ifndef CONFIG_USER_ONLY
1159 Error
*local_err
= NULL
;
1161 riscv_cpu_satp_mode_finalize(cpu
, &local_err
);
1162 if (local_err
!= NULL
) {
1163 error_propagate(errp
, local_err
);
1169 static void riscv_cpu_validate_misa_priv(CPURISCVState
*env
, Error
**errp
)
1171 if (riscv_has_ext(env
, RVH
) && env
->priv_ver
< PRIV_VERSION_1_12_0
) {
1172 error_setg(errp
, "H extension requires priv spec 1.12.0");
1177 static void riscv_cpu_realize(DeviceState
*dev
, Error
**errp
)
1179 CPUState
*cs
= CPU(dev
);
1180 RISCVCPU
*cpu
= RISCV_CPU(dev
);
1181 CPURISCVState
*env
= &cpu
->env
;
1182 RISCVCPUClass
*mcc
= RISCV_CPU_GET_CLASS(dev
);
1183 CPUClass
*cc
= CPU_CLASS(mcc
);
1184 int i
, priv_version
= -1;
1185 Error
*local_err
= NULL
;
1187 cpu_exec_realizefn(cs
, &local_err
);
1188 if (local_err
!= NULL
) {
1189 error_propagate(errp
, local_err
);
1193 if (cpu
->cfg
.priv_spec
) {
1194 if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.12.0")) {
1195 priv_version
= PRIV_VERSION_1_12_0
;
1196 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.11.0")) {
1197 priv_version
= PRIV_VERSION_1_11_0
;
1198 } else if (!g_strcmp0(cpu
->cfg
.priv_spec
, "v1.10.0")) {
1199 priv_version
= PRIV_VERSION_1_10_0
;
1202 "Unsupported privilege spec version '%s'",
1203 cpu
->cfg
.priv_spec
);
1208 if (priv_version
>= PRIV_VERSION_1_10_0
) {
1209 env
->priv_ver
= priv_version
;
1212 riscv_cpu_validate_misa_priv(env
, &local_err
);
1213 if (local_err
!= NULL
) {
1214 error_propagate(errp
, local_err
);
1218 /* Force disable extensions if priv spec version does not match */
1219 for (i
= 0; i
< ARRAY_SIZE(isa_edata_arr
); i
++) {
1220 if (isa_ext_is_enabled(cpu
, &isa_edata_arr
[i
]) &&
1221 (env
->priv_ver
< isa_edata_arr
[i
].min_version
)) {
1222 isa_ext_update_enabled(cpu
, &isa_edata_arr
[i
], false);
1223 #ifndef CONFIG_USER_ONLY
1224 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
1225 " because privilege spec version does not match",
1226 isa_edata_arr
[i
].name
, env
->mhartid
);
1228 warn_report("disabling %s extension because "
1229 "privilege spec version does not match",
1230 isa_edata_arr
[i
].name
);
1235 if (cpu
->cfg
.epmp
&& !cpu
->cfg
.pmp
) {
1237 * Enhanced PMP should only be available
1238 * on harts with PMP support
1240 error_setg(errp
, "Invalid configuration: EPMP requires PMP support");
1245 #ifndef CONFIG_USER_ONLY
1246 if (cpu
->cfg
.ext_sstc
) {
1247 riscv_timer_init(cpu
);
1249 #endif /* CONFIG_USER_ONLY */
1251 /* Validate that MISA_MXL is set properly. */
1252 switch (env
->misa_mxl_max
) {
1253 #ifdef TARGET_RISCV64
1256 cc
->gdb_core_xml_file
= "riscv-64bit-cpu.xml";
1260 cc
->gdb_core_xml_file
= "riscv-32bit-cpu.xml";
1263 g_assert_not_reached();
1265 assert(env
->misa_mxl_max
== env
->misa_mxl
);
1267 riscv_cpu_validate_set_extensions(cpu
, &local_err
);
1268 if (local_err
!= NULL
) {
1269 error_propagate(errp
, local_err
);
1273 #ifndef CONFIG_USER_ONLY
1274 if (cpu
->cfg
.pmu_num
) {
1275 if (!riscv_pmu_init(cpu
, cpu
->cfg
.pmu_num
) && cpu
->cfg
.ext_sscofpmf
) {
1276 cpu
->pmu_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1277 riscv_pmu_timer_cb
, cpu
);
1282 riscv_cpu_finalize_features(cpu
, &local_err
);
1283 if (local_err
!= NULL
) {
1284 error_propagate(errp
, local_err
);
1288 riscv_cpu_register_gdb_regs_for_features(cs
);
1293 mcc
->parent_realize(dev
, errp
);
1296 #ifndef CONFIG_USER_ONLY
1297 static void cpu_riscv_get_satp(Object
*obj
, Visitor
*v
, const char *name
,
1298 void *opaque
, Error
**errp
)
1300 RISCVSATPMap
*satp_map
= opaque
;
1301 uint8_t satp
= satp_mode_from_str(name
);
1304 value
= satp_map
->map
& (1 << satp
);
1306 visit_type_bool(v
, name
, &value
, errp
);
1309 static void cpu_riscv_set_satp(Object
*obj
, Visitor
*v
, const char *name
,
1310 void *opaque
, Error
**errp
)
1312 RISCVSATPMap
*satp_map
= opaque
;
1313 uint8_t satp
= satp_mode_from_str(name
);
1316 if (!visit_type_bool(v
, name
, &value
, errp
)) {
1320 satp_map
->map
= deposit32(satp_map
->map
, satp
, 1, value
);
1321 satp_map
->init
|= 1 << satp
;
1324 static void riscv_add_satp_mode_properties(Object
*obj
)
1326 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1328 if (cpu
->env
.misa_mxl
== MXL_RV32
) {
1329 object_property_add(obj
, "sv32", "bool", cpu_riscv_get_satp
,
1330 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1332 object_property_add(obj
, "sv39", "bool", cpu_riscv_get_satp
,
1333 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1334 object_property_add(obj
, "sv48", "bool", cpu_riscv_get_satp
,
1335 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1336 object_property_add(obj
, "sv57", "bool", cpu_riscv_get_satp
,
1337 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1338 object_property_add(obj
, "sv64", "bool", cpu_riscv_get_satp
,
1339 cpu_riscv_set_satp
, NULL
, &cpu
->cfg
.satp_mode
);
1343 static void riscv_cpu_set_irq(void *opaque
, int irq
, int level
)
1345 RISCVCPU
*cpu
= RISCV_CPU(opaque
);
1346 CPURISCVState
*env
= &cpu
->env
;
1348 if (irq
< IRQ_LOCAL_MAX
) {
1361 if (kvm_enabled()) {
1362 kvm_riscv_set_irq(cpu
, irq
, level
);
1364 riscv_cpu_update_mip(env
, 1 << irq
, BOOL_TO_MASK(level
));
1368 if (kvm_enabled()) {
1369 kvm_riscv_set_irq(cpu
, irq
, level
);
1371 env
->external_seip
= level
;
1372 riscv_cpu_update_mip(env
, 1 << irq
,
1373 BOOL_TO_MASK(level
| env
->software_seip
));
1377 g_assert_not_reached();
1379 } else if (irq
< (IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
)) {
1380 /* Require H-extension for handling guest local interrupts */
1381 if (!riscv_has_ext(env
, RVH
)) {
1382 g_assert_not_reached();
1385 /* Compute bit position in HGEIP CSR */
1386 irq
= irq
- IRQ_LOCAL_MAX
+ 1;
1387 if (env
->geilen
< irq
) {
1388 g_assert_not_reached();
1391 /* Update HGEIP CSR */
1392 env
->hgeip
&= ~((target_ulong
)1 << irq
);
1394 env
->hgeip
|= (target_ulong
)1 << irq
;
1397 /* Update mip.SGEIP bit */
1398 riscv_cpu_update_mip(env
, MIP_SGEIP
,
1399 BOOL_TO_MASK(!!(env
->hgeie
& env
->hgeip
)));
1401 g_assert_not_reached();
1404 #endif /* CONFIG_USER_ONLY */
1406 static void riscv_cpu_init(Object
*obj
)
1408 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1410 cpu
->cfg
.ext_ifencei
= true;
1411 cpu
->cfg
.ext_icsr
= true;
1412 cpu
->cfg
.mmu
= true;
1413 cpu
->cfg
.pmp
= true;
1415 cpu_set_cpustate_pointers(cpu
);
1417 #ifndef CONFIG_USER_ONLY
1418 qdev_init_gpio_in(DEVICE(cpu
), riscv_cpu_set_irq
,
1419 IRQ_LOCAL_MAX
+ IRQ_LOCAL_GUEST_MAX
);
1420 #endif /* CONFIG_USER_ONLY */
1423 typedef struct RISCVCPUMisaExtConfig
{
1425 const char *description
;
1426 target_ulong misa_bit
;
1428 } RISCVCPUMisaExtConfig
;
1430 static void cpu_set_misa_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
1431 void *opaque
, Error
**errp
)
1433 const RISCVCPUMisaExtConfig
*misa_ext_cfg
= opaque
;
1434 target_ulong misa_bit
= misa_ext_cfg
->misa_bit
;
1435 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1436 CPURISCVState
*env
= &cpu
->env
;
1439 if (!visit_type_bool(v
, name
, &value
, errp
)) {
1444 env
->misa_ext
|= misa_bit
;
1445 env
->misa_ext_mask
|= misa_bit
;
1447 env
->misa_ext
&= ~misa_bit
;
1448 env
->misa_ext_mask
&= ~misa_bit
;
1452 static void cpu_get_misa_ext_cfg(Object
*obj
, Visitor
*v
, const char *name
,
1453 void *opaque
, Error
**errp
)
1455 const RISCVCPUMisaExtConfig
*misa_ext_cfg
= opaque
;
1456 target_ulong misa_bit
= misa_ext_cfg
->misa_bit
;
1457 RISCVCPU
*cpu
= RISCV_CPU(obj
);
1458 CPURISCVState
*env
= &cpu
->env
;
1461 value
= env
->misa_ext
& misa_bit
;
1463 visit_type_bool(v
, name
, &value
, errp
);
1466 static const RISCVCPUMisaExtConfig misa_ext_cfgs
[] = {
1467 {.name
= "a", .description
= "Atomic instructions",
1468 .misa_bit
= RVA
, .enabled
= true},
1469 {.name
= "c", .description
= "Compressed instructions",
1470 .misa_bit
= RVC
, .enabled
= true},
1471 {.name
= "d", .description
= "Double-precision float point",
1472 .misa_bit
= RVD
, .enabled
= true},
1473 {.name
= "f", .description
= "Single-precision float point",
1474 .misa_bit
= RVF
, .enabled
= true},
1475 {.name
= "i", .description
= "Base integer instruction set",
1476 .misa_bit
= RVI
, .enabled
= true},
1477 {.name
= "e", .description
= "Base integer instruction set (embedded)",
1478 .misa_bit
= RVE
, .enabled
= false},
1479 {.name
= "m", .description
= "Integer multiplication and division",
1480 .misa_bit
= RVM
, .enabled
= true},
1481 {.name
= "s", .description
= "Supervisor-level instructions",
1482 .misa_bit
= RVS
, .enabled
= true},
1483 {.name
= "u", .description
= "User-level instructions",
1484 .misa_bit
= RVU
, .enabled
= true},
1485 {.name
= "h", .description
= "Hypervisor",
1486 .misa_bit
= RVH
, .enabled
= true},
1487 {.name
= "x-j", .description
= "Dynamic translated languages",
1488 .misa_bit
= RVJ
, .enabled
= false},
1489 {.name
= "v", .description
= "Vector operations",
1490 .misa_bit
= RVV
, .enabled
= false},
1491 {.name
= "g", .description
= "General purpose (IMAFD_Zicsr_Zifencei)",
1492 .misa_bit
= RVG
, .enabled
= false},
1495 static void riscv_cpu_add_misa_properties(Object
*cpu_obj
)
1499 for (i
= 0; i
< ARRAY_SIZE(misa_ext_cfgs
); i
++) {
1500 const RISCVCPUMisaExtConfig
*misa_cfg
= &misa_ext_cfgs
[i
];
1502 object_property_add(cpu_obj
, misa_cfg
->name
, "bool",
1503 cpu_get_misa_ext_cfg
,
1504 cpu_set_misa_ext_cfg
,
1505 NULL
, (void *)misa_cfg
);
1506 object_property_set_description(cpu_obj
, misa_cfg
->name
,
1507 misa_cfg
->description
);
1508 object_property_set_bool(cpu_obj
, misa_cfg
->name
,
1509 misa_cfg
->enabled
, NULL
);
1513 static Property riscv_cpu_extensions
[] = {
1514 /* Defaults for standard extensions */
1515 DEFINE_PROP_UINT8("pmu-num", RISCVCPU
, cfg
.pmu_num
, 16),
1516 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU
, cfg
.ext_sscofpmf
, false),
1517 DEFINE_PROP_BOOL("Zifencei", RISCVCPU
, cfg
.ext_ifencei
, true),
1518 DEFINE_PROP_BOOL("Zicsr", RISCVCPU
, cfg
.ext_icsr
, true),
1519 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU
, cfg
.ext_zihintpause
, true),
1520 DEFINE_PROP_BOOL("Zawrs", RISCVCPU
, cfg
.ext_zawrs
, true),
1521 DEFINE_PROP_BOOL("Zfh", RISCVCPU
, cfg
.ext_zfh
, false),
1522 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU
, cfg
.ext_zfhmin
, false),
1523 DEFINE_PROP_BOOL("Zve32f", RISCVCPU
, cfg
.ext_zve32f
, false),
1524 DEFINE_PROP_BOOL("Zve64f", RISCVCPU
, cfg
.ext_zve64f
, false),
1525 DEFINE_PROP_BOOL("Zve64d", RISCVCPU
, cfg
.ext_zve64d
, false),
1526 DEFINE_PROP_BOOL("mmu", RISCVCPU
, cfg
.mmu
, true),
1527 DEFINE_PROP_BOOL("pmp", RISCVCPU
, cfg
.pmp
, true),
1528 DEFINE_PROP_BOOL("sstc", RISCVCPU
, cfg
.ext_sstc
, true),
1530 DEFINE_PROP_STRING("priv_spec", RISCVCPU
, cfg
.priv_spec
),
1531 DEFINE_PROP_STRING("vext_spec", RISCVCPU
, cfg
.vext_spec
),
1532 DEFINE_PROP_UINT16("vlen", RISCVCPU
, cfg
.vlen
, 128),
1533 DEFINE_PROP_UINT16("elen", RISCVCPU
, cfg
.elen
, 64),
1535 DEFINE_PROP_BOOL("svadu", RISCVCPU
, cfg
.ext_svadu
, true),
1537 DEFINE_PROP_BOOL("svinval", RISCVCPU
, cfg
.ext_svinval
, false),
1538 DEFINE_PROP_BOOL("svnapot", RISCVCPU
, cfg
.ext_svnapot
, false),
1539 DEFINE_PROP_BOOL("svpbmt", RISCVCPU
, cfg
.ext_svpbmt
, false),
1541 DEFINE_PROP_BOOL("zba", RISCVCPU
, cfg
.ext_zba
, true),
1542 DEFINE_PROP_BOOL("zbb", RISCVCPU
, cfg
.ext_zbb
, true),
1543 DEFINE_PROP_BOOL("zbc", RISCVCPU
, cfg
.ext_zbc
, true),
1544 DEFINE_PROP_BOOL("zbkb", RISCVCPU
, cfg
.ext_zbkb
, false),
1545 DEFINE_PROP_BOOL("zbkc", RISCVCPU
, cfg
.ext_zbkc
, false),
1546 DEFINE_PROP_BOOL("zbkx", RISCVCPU
, cfg
.ext_zbkx
, false),
1547 DEFINE_PROP_BOOL("zbs", RISCVCPU
, cfg
.ext_zbs
, true),
1548 DEFINE_PROP_BOOL("zk", RISCVCPU
, cfg
.ext_zk
, false),
1549 DEFINE_PROP_BOOL("zkn", RISCVCPU
, cfg
.ext_zkn
, false),
1550 DEFINE_PROP_BOOL("zknd", RISCVCPU
, cfg
.ext_zknd
, false),
1551 DEFINE_PROP_BOOL("zkne", RISCVCPU
, cfg
.ext_zkne
, false),
1552 DEFINE_PROP_BOOL("zknh", RISCVCPU
, cfg
.ext_zknh
, false),
1553 DEFINE_PROP_BOOL("zkr", RISCVCPU
, cfg
.ext_zkr
, false),
1554 DEFINE_PROP_BOOL("zks", RISCVCPU
, cfg
.ext_zks
, false),
1555 DEFINE_PROP_BOOL("zksed", RISCVCPU
, cfg
.ext_zksed
, false),
1556 DEFINE_PROP_BOOL("zksh", RISCVCPU
, cfg
.ext_zksh
, false),
1557 DEFINE_PROP_BOOL("zkt", RISCVCPU
, cfg
.ext_zkt
, false),
1559 DEFINE_PROP_BOOL("zdinx", RISCVCPU
, cfg
.ext_zdinx
, false),
1560 DEFINE_PROP_BOOL("zfinx", RISCVCPU
, cfg
.ext_zfinx
, false),
1561 DEFINE_PROP_BOOL("zhinx", RISCVCPU
, cfg
.ext_zhinx
, false),
1562 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU
, cfg
.ext_zhinxmin
, false),
1564 DEFINE_PROP_BOOL("zicbom", RISCVCPU
, cfg
.ext_icbom
, true),
1565 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU
, cfg
.cbom_blocksize
, 64),
1566 DEFINE_PROP_BOOL("zicboz", RISCVCPU
, cfg
.ext_icboz
, true),
1567 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU
, cfg
.cboz_blocksize
, 64),
1569 DEFINE_PROP_BOOL("zmmul", RISCVCPU
, cfg
.ext_zmmul
, false),
1571 DEFINE_PROP_BOOL("zca", RISCVCPU
, cfg
.ext_zca
, false),
1572 DEFINE_PROP_BOOL("zcb", RISCVCPU
, cfg
.ext_zcb
, false),
1573 DEFINE_PROP_BOOL("zcd", RISCVCPU
, cfg
.ext_zcd
, false),
1574 DEFINE_PROP_BOOL("zce", RISCVCPU
, cfg
.ext_zce
, false),
1575 DEFINE_PROP_BOOL("zcf", RISCVCPU
, cfg
.ext_zcf
, false),
1576 DEFINE_PROP_BOOL("zcmp", RISCVCPU
, cfg
.ext_zcmp
, false),
1577 DEFINE_PROP_BOOL("zcmt", RISCVCPU
, cfg
.ext_zcmt
, false),
1579 /* Vendor-specific custom extensions */
1580 DEFINE_PROP_BOOL("xtheadba", RISCVCPU
, cfg
.ext_xtheadba
, false),
1581 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU
, cfg
.ext_xtheadbb
, false),
1582 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU
, cfg
.ext_xtheadbs
, false),
1583 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU
, cfg
.ext_xtheadcmo
, false),
1584 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU
, cfg
.ext_xtheadcondmov
, false),
1585 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU
, cfg
.ext_xtheadfmemidx
, false),
1586 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU
, cfg
.ext_xtheadfmv
, false),
1587 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU
, cfg
.ext_xtheadmac
, false),
1588 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU
, cfg
.ext_xtheadmemidx
, false),
1589 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU
, cfg
.ext_xtheadmempair
, false),
1590 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU
, cfg
.ext_xtheadsync
, false),
1591 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU
, cfg
.ext_XVentanaCondOps
, false),
1593 /* These are experimental so mark with 'x-' */
1594 DEFINE_PROP_BOOL("x-zicond", RISCVCPU
, cfg
.ext_zicond
, false),
1597 DEFINE_PROP_BOOL("x-epmp", RISCVCPU
, cfg
.epmp
, false),
1598 DEFINE_PROP_BOOL("x-smaia", RISCVCPU
, cfg
.ext_smaia
, false),
1599 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU
, cfg
.ext_ssaia
, false),
1601 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU
, cfg
.ext_zvfh
, false),
1602 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU
, cfg
.ext_zvfhmin
, false),
1604 DEFINE_PROP_END_OF_LIST(),
1608 * Add CPU properties with user-facing flags.
1610 * This will overwrite existing env->misa_ext values with the
1611 * defaults set via riscv_cpu_add_misa_properties().
1613 static void riscv_cpu_add_user_properties(Object
*obj
)
1616 DeviceState
*dev
= DEVICE(obj
);
1618 riscv_cpu_add_misa_properties(obj
);
1620 for (prop
= riscv_cpu_extensions
; prop
&& prop
->name
; prop
++) {
1621 qdev_property_add_static(dev
, prop
);
1624 #ifndef CONFIG_USER_ONLY
1625 riscv_add_satp_mode_properties(obj
);
1629 static Property riscv_cpu_properties
[] = {
1630 DEFINE_PROP_BOOL("debug", RISCVCPU
, cfg
.debug
, true),
1632 DEFINE_PROP_UINT32("mvendorid", RISCVCPU
, cfg
.mvendorid
, 0),
1633 DEFINE_PROP_UINT64("marchid", RISCVCPU
, cfg
.marchid
, RISCV_CPU_MARCHID
),
1634 DEFINE_PROP_UINT64("mimpid", RISCVCPU
, cfg
.mimpid
, RISCV_CPU_MIMPID
),
1636 #ifndef CONFIG_USER_ONLY
1637 DEFINE_PROP_UINT64("resetvec", RISCVCPU
, env
.resetvec
, DEFAULT_RSTVEC
),
1640 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU
, cfg
.short_isa_string
, false),
1642 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU
, cfg
.rvv_ta_all_1s
, false),
1643 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU
, cfg
.rvv_ma_all_1s
, false),
1646 * write_misa() is marked as experimental for now so mark
1647 * it with -x and default to 'false'.
1649 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU
, cfg
.misa_w
, false),
1650 DEFINE_PROP_END_OF_LIST(),
1653 static gchar
*riscv_gdb_arch_name(CPUState
*cs
)
1655 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1656 CPURISCVState
*env
= &cpu
->env
;
1658 switch (riscv_cpu_mxl(env
)) {
1660 return g_strdup("riscv:rv32");
1663 return g_strdup("riscv:rv64");
1665 g_assert_not_reached();
1669 static const char *riscv_gdb_get_dynamic_xml(CPUState
*cs
, const char *xmlname
)
1671 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1673 if (strcmp(xmlname
, "riscv-csr.xml") == 0) {
1674 return cpu
->dyn_csr_xml
;
1675 } else if (strcmp(xmlname
, "riscv-vector.xml") == 0) {
1676 return cpu
->dyn_vreg_xml
;
1682 #ifndef CONFIG_USER_ONLY
1683 static int64_t riscv_get_arch_id(CPUState
*cs
)
1685 RISCVCPU
*cpu
= RISCV_CPU(cs
);
1687 return cpu
->env
.mhartid
;
1690 #include "hw/core/sysemu-cpu-ops.h"
1692 static const struct SysemuCPUOps riscv_sysemu_ops
= {
1693 .get_phys_page_debug
= riscv_cpu_get_phys_page_debug
,
1694 .write_elf64_note
= riscv_cpu_write_elf64_note
,
1695 .write_elf32_note
= riscv_cpu_write_elf32_note
,
1696 .legacy_vmsd
= &vmstate_riscv_cpu
,
1700 #include "hw/core/tcg-cpu-ops.h"
1702 static const struct TCGCPUOps riscv_tcg_ops
= {
1703 .initialize
= riscv_translate_init
,
1704 .synchronize_from_tb
= riscv_cpu_synchronize_from_tb
,
1705 .restore_state_to_opc
= riscv_restore_state_to_opc
,
1707 #ifndef CONFIG_USER_ONLY
1708 .tlb_fill
= riscv_cpu_tlb_fill
,
1709 .cpu_exec_interrupt
= riscv_cpu_exec_interrupt
,
1710 .do_interrupt
= riscv_cpu_do_interrupt
,
1711 .do_transaction_failed
= riscv_cpu_do_transaction_failed
,
1712 .do_unaligned_access
= riscv_cpu_do_unaligned_access
,
1713 .debug_excp_handler
= riscv_cpu_debug_excp_handler
,
1714 .debug_check_breakpoint
= riscv_cpu_debug_check_breakpoint
,
1715 .debug_check_watchpoint
= riscv_cpu_debug_check_watchpoint
,
1716 #endif /* !CONFIG_USER_ONLY */
1719 static void riscv_cpu_class_init(ObjectClass
*c
, void *data
)
1721 RISCVCPUClass
*mcc
= RISCV_CPU_CLASS(c
);
1722 CPUClass
*cc
= CPU_CLASS(c
);
1723 DeviceClass
*dc
= DEVICE_CLASS(c
);
1724 ResettableClass
*rc
= RESETTABLE_CLASS(c
);
1726 device_class_set_parent_realize(dc
, riscv_cpu_realize
,
1727 &mcc
->parent_realize
);
1729 resettable_class_set_parent_phases(rc
, NULL
, riscv_cpu_reset_hold
, NULL
,
1730 &mcc
->parent_phases
);
1732 cc
->class_by_name
= riscv_cpu_class_by_name
;
1733 cc
->has_work
= riscv_cpu_has_work
;
1734 cc
->dump_state
= riscv_cpu_dump_state
;
1735 cc
->set_pc
= riscv_cpu_set_pc
;
1736 cc
->get_pc
= riscv_cpu_get_pc
;
1737 cc
->gdb_read_register
= riscv_cpu_gdb_read_register
;
1738 cc
->gdb_write_register
= riscv_cpu_gdb_write_register
;
1739 cc
->gdb_num_core_regs
= 33;
1740 cc
->gdb_stop_before_watchpoint
= true;
1741 cc
->disas_set_info
= riscv_cpu_disas_set_info
;
1742 #ifndef CONFIG_USER_ONLY
1743 cc
->sysemu_ops
= &riscv_sysemu_ops
;
1744 cc
->get_arch_id
= riscv_get_arch_id
;
1746 cc
->gdb_arch_name
= riscv_gdb_arch_name
;
1747 cc
->gdb_get_dynamic_xml
= riscv_gdb_get_dynamic_xml
;
1748 cc
->tcg_ops
= &riscv_tcg_ops
;
1750 device_class_set_props(dc
, riscv_cpu_properties
);
1753 static void riscv_isa_string_ext(RISCVCPU
*cpu
, char **isa_str
,
1756 char *old
= *isa_str
;
1757 char *new = *isa_str
;
1760 for (i
= 0; i
< ARRAY_SIZE(isa_edata_arr
); i
++) {
1761 if (isa_ext_is_enabled(cpu
, &isa_edata_arr
[i
])) {
1762 new = g_strconcat(old
, "_", isa_edata_arr
[i
].name
, NULL
);
1771 char *riscv_isa_string(RISCVCPU
*cpu
)
1774 const size_t maxlen
= sizeof("rv128") + sizeof(riscv_single_letter_exts
);
1775 char *isa_str
= g_new(char, maxlen
);
1776 char *p
= isa_str
+ snprintf(isa_str
, maxlen
, "rv%d", TARGET_LONG_BITS
);
1777 for (i
= 0; i
< sizeof(riscv_single_letter_exts
) - 1; i
++) {
1778 if (cpu
->env
.misa_ext
& RV(riscv_single_letter_exts
[i
])) {
1779 *p
++ = qemu_tolower(riscv_single_letter_exts
[i
]);
1783 if (!cpu
->cfg
.short_isa_string
) {
1784 riscv_isa_string_ext(cpu
, &isa_str
, maxlen
);
1789 static gint
riscv_cpu_list_compare(gconstpointer a
, gconstpointer b
)
1791 ObjectClass
*class_a
= (ObjectClass
*)a
;
1792 ObjectClass
*class_b
= (ObjectClass
*)b
;
1793 const char *name_a
, *name_b
;
1795 name_a
= object_class_get_name(class_a
);
1796 name_b
= object_class_get_name(class_b
);
1797 return strcmp(name_a
, name_b
);
1800 static void riscv_cpu_list_entry(gpointer data
, gpointer user_data
)
1802 const char *typename
= object_class_get_name(OBJECT_CLASS(data
));
1803 int len
= strlen(typename
) - strlen(RISCV_CPU_TYPE_SUFFIX
);
1805 qemu_printf("%.*s\n", len
, typename
);
1808 void riscv_cpu_list(void)
1812 list
= object_class_get_list(TYPE_RISCV_CPU
, false);
1813 list
= g_slist_sort(list
, riscv_cpu_list_compare
);
1814 g_slist_foreach(list
, riscv_cpu_list_entry
, NULL
);
1818 #define DEFINE_CPU(type_name, initfn) \
1820 .name = type_name, \
1821 .parent = TYPE_RISCV_CPU, \
1822 .instance_init = initfn \
1825 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
1827 .name = type_name, \
1828 .parent = TYPE_RISCV_DYNAMIC_CPU, \
1829 .instance_init = initfn \
1832 static const TypeInfo riscv_cpu_type_infos
[] = {
1834 .name
= TYPE_RISCV_CPU
,
1836 .instance_size
= sizeof(RISCVCPU
),
1837 .instance_align
= __alignof__(RISCVCPU
),
1838 .instance_init
= riscv_cpu_init
,
1840 .class_size
= sizeof(RISCVCPUClass
),
1841 .class_init
= riscv_cpu_class_init
,
1844 .name
= TYPE_RISCV_DYNAMIC_CPU
,
1845 .parent
= TYPE_RISCV_CPU
,
1848 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY
, riscv_any_cpu_init
),
1849 #if defined(CONFIG_KVM)
1850 DEFINE_CPU(TYPE_RISCV_CPU_HOST
, riscv_host_cpu_init
),
1852 #if defined(TARGET_RISCV32)
1853 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32
, rv32_base_cpu_init
),
1854 DEFINE_CPU(TYPE_RISCV_CPU_IBEX
, rv32_ibex_cpu_init
),
1855 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31
, rv32_sifive_e_cpu_init
),
1856 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34
, rv32_imafcu_nommu_cpu_init
),
1857 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34
, rv32_sifive_u_cpu_init
),
1858 #elif defined(TARGET_RISCV64)
1859 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64
, rv64_base_cpu_init
),
1860 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51
, rv64_sifive_e_cpu_init
),
1861 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54
, rv64_sifive_u_cpu_init
),
1862 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C
, rv64_sifive_u_cpu_init
),
1863 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906
, rv64_thead_c906_cpu_init
),
1864 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1
, rv64_veyron_v1_cpu_init
),
1865 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128
, rv128_base_cpu_init
),
1869 DEFINE_TYPES(riscv_cpu_type_infos
)