hw/ppc/ppc.c: Tidy over-long lines
[qemu/kevin.git] / target / riscv / cpu.c
blob6b93b04453c8cb827aa90d63ba8947f1ad26bc3f
1 /*
2 * QEMU RISC-V CPU
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "pmu.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qapi/visitor.h"
32 #include "qemu/error-report.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35 #include "fpu/softfloat-helpers.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm_riscv.h"
39 #include "tcg/tcg.h"
41 /* RISC-V CPU definitions */
42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
44 struct isa_ext_data {
45 const char *name;
46 int min_version;
47 int ext_enable_offset;
50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
54 * From vector_helper.c
55 * Note that vector data is stored in host-endian 64-bit chunks,
56 * so addressing bytes needs a host-endian fixup.
58 #if HOST_BIG_ENDIAN
59 #define BYTE(x) ((x) ^ 7)
60 #else
61 #define BYTE(x) (x)
62 #endif
65 * Here are the ordering rules of extension naming defined by RISC-V
66 * specification :
67 * 1. All extensions should be separated from other multi-letter extensions
68 * by an underscore.
69 * 2. The first letter following the 'Z' conventionally indicates the most
70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
71 * If multiple 'Z' extensions are named, they should be ordered first
72 * by category, then alphabetically within a category.
73 * 3. Standard supervisor-level extensions (starts with 'S') should be
74 * listed after standard unprivileged extensions. If multiple
75 * supervisor-level extensions are listed, they should be ordered
76 * alphabetically.
77 * 4. Non-standard extensions (starts with 'X') must be listed after all
78 * standard extensions. They must be separated from other multi-letter
79 * extensions by an underscore.
81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
82 * instead.
84 static const struct isa_ext_data isa_edata_arr[] = {
85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom),
86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz),
87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
91 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
92 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
93 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
94 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
95 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
96 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
97 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
98 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
99 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
100 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
101 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
102 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
103 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
104 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
105 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
106 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
107 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
108 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
109 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
110 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
111 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
112 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
113 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
114 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
115 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
116 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
117 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
118 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
119 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
120 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
121 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
122 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
123 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
124 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
125 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
126 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
127 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
128 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
129 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
130 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
131 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
132 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
133 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
134 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
135 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
136 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
137 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
138 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
139 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
140 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
141 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
142 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
143 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
144 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
145 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
146 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
147 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
148 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
149 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
150 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
151 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
154 static bool isa_ext_is_enabled(RISCVCPU *cpu,
155 const struct isa_ext_data *edata)
157 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
159 return *ext_enabled;
162 static void isa_ext_update_enabled(RISCVCPU *cpu,
163 const struct isa_ext_data *edata, bool en)
165 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
167 *ext_enabled = en;
170 const char * const riscv_int_regnames[] = {
171 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
172 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
173 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
174 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
175 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
178 const char * const riscv_int_regnamesh[] = {
179 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
180 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
181 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
182 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
183 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
184 "x30h/t5h", "x31h/t6h"
187 const char * const riscv_fpr_regnames[] = {
188 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
189 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
190 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
191 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
192 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
193 "f30/ft10", "f31/ft11"
196 const char * const riscv_rvv_regnames[] = {
197 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
198 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
199 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
200 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
201 "v28", "v29", "v30", "v31"
204 static const char * const riscv_excp_names[] = {
205 "misaligned_fetch",
206 "fault_fetch",
207 "illegal_instruction",
208 "breakpoint",
209 "misaligned_load",
210 "fault_load",
211 "misaligned_store",
212 "fault_store",
213 "user_ecall",
214 "supervisor_ecall",
215 "hypervisor_ecall",
216 "machine_ecall",
217 "exec_page_fault",
218 "load_page_fault",
219 "reserved",
220 "store_page_fault",
221 "reserved",
222 "reserved",
223 "reserved",
224 "reserved",
225 "guest_exec_page_fault",
226 "guest_load_page_fault",
227 "reserved",
228 "guest_store_page_fault",
231 static const char * const riscv_intr_names[] = {
232 "u_software",
233 "s_software",
234 "vs_software",
235 "m_software",
236 "u_timer",
237 "s_timer",
238 "vs_timer",
239 "m_timer",
240 "u_external",
241 "s_external",
242 "vs_external",
243 "m_external",
244 "reserved",
245 "reserved",
246 "reserved",
247 "reserved"
250 static void riscv_cpu_add_user_properties(Object *obj);
252 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
254 if (async) {
255 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
256 riscv_intr_names[cause] : "(unknown)";
257 } else {
258 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
259 riscv_excp_names[cause] : "(unknown)";
263 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
265 env->misa_mxl_max = env->misa_mxl = mxl;
266 env->misa_ext_mask = env->misa_ext = ext;
269 #ifndef CONFIG_USER_ONLY
270 static uint8_t satp_mode_from_str(const char *satp_mode_str)
272 if (!strncmp(satp_mode_str, "mbare", 5)) {
273 return VM_1_10_MBARE;
276 if (!strncmp(satp_mode_str, "sv32", 4)) {
277 return VM_1_10_SV32;
280 if (!strncmp(satp_mode_str, "sv39", 4)) {
281 return VM_1_10_SV39;
284 if (!strncmp(satp_mode_str, "sv48", 4)) {
285 return VM_1_10_SV48;
288 if (!strncmp(satp_mode_str, "sv57", 4)) {
289 return VM_1_10_SV57;
292 if (!strncmp(satp_mode_str, "sv64", 4)) {
293 return VM_1_10_SV64;
296 g_assert_not_reached();
299 uint8_t satp_mode_max_from_map(uint32_t map)
301 /* map here has at least one bit set, so no problem with clz */
302 return 31 - __builtin_clz(map);
305 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
307 if (is_32_bit) {
308 switch (satp_mode) {
309 case VM_1_10_SV32:
310 return "sv32";
311 case VM_1_10_MBARE:
312 return "none";
314 } else {
315 switch (satp_mode) {
316 case VM_1_10_SV64:
317 return "sv64";
318 case VM_1_10_SV57:
319 return "sv57";
320 case VM_1_10_SV48:
321 return "sv48";
322 case VM_1_10_SV39:
323 return "sv39";
324 case VM_1_10_MBARE:
325 return "none";
329 g_assert_not_reached();
332 static void set_satp_mode_max_supported(RISCVCPU *cpu,
333 uint8_t satp_mode)
335 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
336 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
338 for (int i = 0; i <= satp_mode; ++i) {
339 if (valid_vm[i]) {
340 cpu->cfg.satp_mode.supported |= (1 << i);
345 /* Set the satp mode to the max supported */
346 static void set_satp_mode_default_map(RISCVCPU *cpu)
348 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
350 #endif
352 static void riscv_any_cpu_init(Object *obj)
354 RISCVCPU *cpu = RISCV_CPU(obj);
355 CPURISCVState *env = &cpu->env;
356 #if defined(TARGET_RISCV32)
357 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
358 #elif defined(TARGET_RISCV64)
359 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
360 #endif
362 #ifndef CONFIG_USER_ONLY
363 set_satp_mode_max_supported(RISCV_CPU(obj),
364 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
365 VM_1_10_SV32 : VM_1_10_SV57);
366 #endif
368 env->priv_ver = PRIV_VERSION_LATEST;
370 /* inherited from parent obj via riscv_cpu_init() */
371 cpu->cfg.ext_ifencei = true;
372 cpu->cfg.ext_icsr = true;
373 cpu->cfg.mmu = true;
374 cpu->cfg.pmp = true;
377 #if defined(TARGET_RISCV64)
378 static void rv64_base_cpu_init(Object *obj)
380 CPURISCVState *env = &RISCV_CPU(obj)->env;
381 /* We set this in the realise function */
382 set_misa(env, MXL_RV64, 0);
383 riscv_cpu_add_user_properties(obj);
384 /* Set latest version of privileged specification */
385 env->priv_ver = PRIV_VERSION_LATEST;
386 #ifndef CONFIG_USER_ONLY
387 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
388 #endif
391 static void rv64_sifive_u_cpu_init(Object *obj)
393 RISCVCPU *cpu = RISCV_CPU(obj);
394 CPURISCVState *env = &cpu->env;
395 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
396 env->priv_ver = PRIV_VERSION_1_10_0;
397 #ifndef CONFIG_USER_ONLY
398 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
399 #endif
401 /* inherited from parent obj via riscv_cpu_init() */
402 cpu->cfg.ext_ifencei = true;
403 cpu->cfg.ext_icsr = true;
404 cpu->cfg.mmu = true;
405 cpu->cfg.pmp = true;
408 static void rv64_sifive_e_cpu_init(Object *obj)
410 CPURISCVState *env = &RISCV_CPU(obj)->env;
411 RISCVCPU *cpu = RISCV_CPU(obj);
413 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
414 env->priv_ver = PRIV_VERSION_1_10_0;
415 #ifndef CONFIG_USER_ONLY
416 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
417 #endif
419 /* inherited from parent obj via riscv_cpu_init() */
420 cpu->cfg.ext_ifencei = true;
421 cpu->cfg.ext_icsr = true;
422 cpu->cfg.pmp = true;
425 static void rv64_thead_c906_cpu_init(Object *obj)
427 CPURISCVState *env = &RISCV_CPU(obj)->env;
428 RISCVCPU *cpu = RISCV_CPU(obj);
430 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
431 env->priv_ver = PRIV_VERSION_1_11_0;
433 cpu->cfg.ext_zfa = true;
434 cpu->cfg.ext_zfh = true;
435 cpu->cfg.mmu = true;
436 cpu->cfg.ext_xtheadba = true;
437 cpu->cfg.ext_xtheadbb = true;
438 cpu->cfg.ext_xtheadbs = true;
439 cpu->cfg.ext_xtheadcmo = true;
440 cpu->cfg.ext_xtheadcondmov = true;
441 cpu->cfg.ext_xtheadfmemidx = true;
442 cpu->cfg.ext_xtheadmac = true;
443 cpu->cfg.ext_xtheadmemidx = true;
444 cpu->cfg.ext_xtheadmempair = true;
445 cpu->cfg.ext_xtheadsync = true;
447 cpu->cfg.mvendorid = THEAD_VENDOR_ID;
448 #ifndef CONFIG_USER_ONLY
449 set_satp_mode_max_supported(cpu, VM_1_10_SV39);
450 #endif
452 /* inherited from parent obj via riscv_cpu_init() */
453 cpu->cfg.pmp = true;
456 static void rv64_veyron_v1_cpu_init(Object *obj)
458 CPURISCVState *env = &RISCV_CPU(obj)->env;
459 RISCVCPU *cpu = RISCV_CPU(obj);
461 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
462 env->priv_ver = PRIV_VERSION_1_12_0;
464 /* Enable ISA extensions */
465 cpu->cfg.mmu = true;
466 cpu->cfg.ext_ifencei = true;
467 cpu->cfg.ext_icsr = true;
468 cpu->cfg.pmp = true;
469 cpu->cfg.ext_icbom = true;
470 cpu->cfg.cbom_blocksize = 64;
471 cpu->cfg.cboz_blocksize = 64;
472 cpu->cfg.ext_icboz = true;
473 cpu->cfg.ext_smaia = true;
474 cpu->cfg.ext_ssaia = true;
475 cpu->cfg.ext_sscofpmf = true;
476 cpu->cfg.ext_sstc = true;
477 cpu->cfg.ext_svinval = true;
478 cpu->cfg.ext_svnapot = true;
479 cpu->cfg.ext_svpbmt = true;
480 cpu->cfg.ext_smstateen = true;
481 cpu->cfg.ext_zba = true;
482 cpu->cfg.ext_zbb = true;
483 cpu->cfg.ext_zbc = true;
484 cpu->cfg.ext_zbs = true;
485 cpu->cfg.ext_XVentanaCondOps = true;
487 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
488 cpu->cfg.marchid = VEYRON_V1_MARCHID;
489 cpu->cfg.mimpid = VEYRON_V1_MIMPID;
491 #ifndef CONFIG_USER_ONLY
492 set_satp_mode_max_supported(cpu, VM_1_10_SV48);
493 #endif
496 static void rv128_base_cpu_init(Object *obj)
498 if (qemu_tcg_mttcg_enabled()) {
499 /* Missing 128-bit aligned atomics */
500 error_report("128-bit RISC-V currently does not work with Multi "
501 "Threaded TCG. Please use: -accel tcg,thread=single");
502 exit(EXIT_FAILURE);
504 CPURISCVState *env = &RISCV_CPU(obj)->env;
505 /* We set this in the realise function */
506 set_misa(env, MXL_RV128, 0);
507 riscv_cpu_add_user_properties(obj);
508 /* Set latest version of privileged specification */
509 env->priv_ver = PRIV_VERSION_LATEST;
510 #ifndef CONFIG_USER_ONLY
511 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
512 #endif
514 #else
515 static void rv32_base_cpu_init(Object *obj)
517 CPURISCVState *env = &RISCV_CPU(obj)->env;
518 /* We set this in the realise function */
519 set_misa(env, MXL_RV32, 0);
520 riscv_cpu_add_user_properties(obj);
521 /* Set latest version of privileged specification */
522 env->priv_ver = PRIV_VERSION_LATEST;
523 #ifndef CONFIG_USER_ONLY
524 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
525 #endif
528 static void rv32_sifive_u_cpu_init(Object *obj)
530 RISCVCPU *cpu = RISCV_CPU(obj);
531 CPURISCVState *env = &cpu->env;
532 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
533 env->priv_ver = PRIV_VERSION_1_10_0;
534 #ifndef CONFIG_USER_ONLY
535 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
536 #endif
538 /* inherited from parent obj via riscv_cpu_init() */
539 cpu->cfg.ext_ifencei = true;
540 cpu->cfg.ext_icsr = true;
541 cpu->cfg.mmu = true;
542 cpu->cfg.pmp = true;
545 static void rv32_sifive_e_cpu_init(Object *obj)
547 CPURISCVState *env = &RISCV_CPU(obj)->env;
548 RISCVCPU *cpu = RISCV_CPU(obj);
550 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
551 env->priv_ver = PRIV_VERSION_1_10_0;
552 #ifndef CONFIG_USER_ONLY
553 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
554 #endif
556 /* inherited from parent obj via riscv_cpu_init() */
557 cpu->cfg.ext_ifencei = true;
558 cpu->cfg.ext_icsr = true;
559 cpu->cfg.pmp = true;
562 static void rv32_ibex_cpu_init(Object *obj)
564 CPURISCVState *env = &RISCV_CPU(obj)->env;
565 RISCVCPU *cpu = RISCV_CPU(obj);
567 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
568 env->priv_ver = PRIV_VERSION_1_11_0;
569 #ifndef CONFIG_USER_ONLY
570 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
571 #endif
572 cpu->cfg.epmp = true;
574 /* inherited from parent obj via riscv_cpu_init() */
575 cpu->cfg.ext_ifencei = true;
576 cpu->cfg.ext_icsr = true;
577 cpu->cfg.pmp = true;
580 static void rv32_imafcu_nommu_cpu_init(Object *obj)
582 CPURISCVState *env = &RISCV_CPU(obj)->env;
583 RISCVCPU *cpu = RISCV_CPU(obj);
585 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
586 env->priv_ver = PRIV_VERSION_1_10_0;
587 #ifndef CONFIG_USER_ONLY
588 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
589 #endif
591 /* inherited from parent obj via riscv_cpu_init() */
592 cpu->cfg.ext_ifencei = true;
593 cpu->cfg.ext_icsr = true;
594 cpu->cfg.pmp = true;
596 #endif
598 #if defined(CONFIG_KVM)
599 static void riscv_host_cpu_init(Object *obj)
601 CPURISCVState *env = &RISCV_CPU(obj)->env;
602 #if defined(TARGET_RISCV32)
603 set_misa(env, MXL_RV32, 0);
604 #elif defined(TARGET_RISCV64)
605 set_misa(env, MXL_RV64, 0);
606 #endif
607 riscv_cpu_add_user_properties(obj);
609 #endif /* CONFIG_KVM */
611 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
613 ObjectClass *oc;
614 char *typename;
615 char **cpuname;
617 cpuname = g_strsplit(cpu_model, ",", 1);
618 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
619 oc = object_class_by_name(typename);
620 g_strfreev(cpuname);
621 g_free(typename);
622 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
623 object_class_is_abstract(oc)) {
624 return NULL;
626 return oc;
629 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
631 RISCVCPU *cpu = RISCV_CPU(cs);
632 CPURISCVState *env = &cpu->env;
633 int i, j;
634 uint8_t *p;
636 #if !defined(CONFIG_USER_ONLY)
637 if (riscv_has_ext(env, RVH)) {
638 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
640 #endif
641 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
642 #ifndef CONFIG_USER_ONLY
644 static const int dump_csrs[] = {
645 CSR_MHARTID,
646 CSR_MSTATUS,
647 CSR_MSTATUSH,
649 * CSR_SSTATUS is intentionally omitted here as its value
650 * can be figured out by looking at CSR_MSTATUS
652 CSR_HSTATUS,
653 CSR_VSSTATUS,
654 CSR_MIP,
655 CSR_MIE,
656 CSR_MIDELEG,
657 CSR_HIDELEG,
658 CSR_MEDELEG,
659 CSR_HEDELEG,
660 CSR_MTVEC,
661 CSR_STVEC,
662 CSR_VSTVEC,
663 CSR_MEPC,
664 CSR_SEPC,
665 CSR_VSEPC,
666 CSR_MCAUSE,
667 CSR_SCAUSE,
668 CSR_VSCAUSE,
669 CSR_MTVAL,
670 CSR_STVAL,
671 CSR_HTVAL,
672 CSR_MTVAL2,
673 CSR_MSCRATCH,
674 CSR_SSCRATCH,
675 CSR_SATP,
676 CSR_MMTE,
677 CSR_UPMBASE,
678 CSR_UPMMASK,
679 CSR_SPMBASE,
680 CSR_SPMMASK,
681 CSR_MPMBASE,
682 CSR_MPMMASK,
685 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
686 int csrno = dump_csrs[i];
687 target_ulong val = 0;
688 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
691 * Rely on the smode, hmode, etc, predicates within csr.c
692 * to do the filtering of the registers that are present.
694 if (res == RISCV_EXCP_NONE) {
695 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
696 csr_ops[csrno].name, val);
700 #endif
702 for (i = 0; i < 32; i++) {
703 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
704 riscv_int_regnames[i], env->gpr[i]);
705 if ((i & 3) == 3) {
706 qemu_fprintf(f, "\n");
709 if (flags & CPU_DUMP_FPU) {
710 for (i = 0; i < 32; i++) {
711 qemu_fprintf(f, " %-8s %016" PRIx64,
712 riscv_fpr_regnames[i], env->fpr[i]);
713 if ((i & 3) == 3) {
714 qemu_fprintf(f, "\n");
718 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
719 static const int dump_rvv_csrs[] = {
720 CSR_VSTART,
721 CSR_VXSAT,
722 CSR_VXRM,
723 CSR_VCSR,
724 CSR_VL,
725 CSR_VTYPE,
726 CSR_VLENB,
728 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
729 int csrno = dump_rvv_csrs[i];
730 target_ulong val = 0;
731 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
734 * Rely on the smode, hmode, etc, predicates within csr.c
735 * to do the filtering of the registers that are present.
737 if (res == RISCV_EXCP_NONE) {
738 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
739 csr_ops[csrno].name, val);
742 uint16_t vlenb = cpu->cfg.vlen >> 3;
744 for (i = 0; i < 32; i++) {
745 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
746 p = (uint8_t *)env->vreg;
747 for (j = vlenb - 1 ; j >= 0; j--) {
748 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
750 qemu_fprintf(f, "\n");
755 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
757 RISCVCPU *cpu = RISCV_CPU(cs);
758 CPURISCVState *env = &cpu->env;
760 if (env->xl == MXL_RV32) {
761 env->pc = (int32_t)value;
762 } else {
763 env->pc = value;
767 static vaddr riscv_cpu_get_pc(CPUState *cs)
769 RISCVCPU *cpu = RISCV_CPU(cs);
770 CPURISCVState *env = &cpu->env;
772 /* Match cpu_get_tb_cpu_state. */
773 if (env->xl == MXL_RV32) {
774 return env->pc & UINT32_MAX;
776 return env->pc;
779 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
780 const TranslationBlock *tb)
782 if (!(tb_cflags(tb) & CF_PCREL)) {
783 RISCVCPU *cpu = RISCV_CPU(cs);
784 CPURISCVState *env = &cpu->env;
785 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
787 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
789 if (xl == MXL_RV32) {
790 env->pc = (int32_t) tb->pc;
791 } else {
792 env->pc = tb->pc;
797 static bool riscv_cpu_has_work(CPUState *cs)
799 #ifndef CONFIG_USER_ONLY
800 RISCVCPU *cpu = RISCV_CPU(cs);
801 CPURISCVState *env = &cpu->env;
803 * Definition of the WFI instruction requires it to ignore the privilege
804 * mode and delegation registers, but respect individual enables
806 return riscv_cpu_all_pending(env) != 0;
807 #else
808 return true;
809 #endif
812 static void riscv_restore_state_to_opc(CPUState *cs,
813 const TranslationBlock *tb,
814 const uint64_t *data)
816 RISCVCPU *cpu = RISCV_CPU(cs);
817 CPURISCVState *env = &cpu->env;
818 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
819 target_ulong pc;
821 if (tb_cflags(tb) & CF_PCREL) {
822 pc = (env->pc & TARGET_PAGE_MASK) | data[0];
823 } else {
824 pc = data[0];
827 if (xl == MXL_RV32) {
828 env->pc = (int32_t)pc;
829 } else {
830 env->pc = pc;
832 env->bins = data[1];
835 static void riscv_cpu_reset_hold(Object *obj)
837 #ifndef CONFIG_USER_ONLY
838 uint8_t iprio;
839 int i, irq, rdzero;
840 #endif
841 CPUState *cs = CPU(obj);
842 RISCVCPU *cpu = RISCV_CPU(cs);
843 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
844 CPURISCVState *env = &cpu->env;
846 if (mcc->parent_phases.hold) {
847 mcc->parent_phases.hold(obj);
849 #ifndef CONFIG_USER_ONLY
850 env->misa_mxl = env->misa_mxl_max;
851 env->priv = PRV_M;
852 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
853 if (env->misa_mxl > MXL_RV32) {
855 * The reset status of SXL/UXL is undefined, but mstatus is WARL
856 * and we must ensure that the value after init is valid for read.
858 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
859 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
860 if (riscv_has_ext(env, RVH)) {
861 env->vsstatus = set_field(env->vsstatus,
862 MSTATUS64_SXL, env->misa_mxl);
863 env->vsstatus = set_field(env->vsstatus,
864 MSTATUS64_UXL, env->misa_mxl);
865 env->mstatus_hs = set_field(env->mstatus_hs,
866 MSTATUS64_SXL, env->misa_mxl);
867 env->mstatus_hs = set_field(env->mstatus_hs,
868 MSTATUS64_UXL, env->misa_mxl);
871 env->mcause = 0;
872 env->miclaim = MIP_SGEIP;
873 env->pc = env->resetvec;
874 env->bins = 0;
875 env->two_stage_lookup = false;
877 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
878 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
879 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
880 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
882 /* Initialized default priorities of local interrupts. */
883 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
884 iprio = riscv_cpu_default_priority(i);
885 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
886 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
887 env->hviprio[i] = 0;
889 i = 0;
890 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
891 if (!rdzero) {
892 env->hviprio[irq] = env->miprio[irq];
894 i++;
896 /* mmte is supposed to have pm.current hardwired to 1 */
897 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
898 #endif
899 env->xl = riscv_cpu_mxl(env);
900 riscv_cpu_update_mask(env);
901 cs->exception_index = RISCV_EXCP_NONE;
902 env->load_res = -1;
903 set_default_nan_mode(1, &env->fp_status);
905 #ifndef CONFIG_USER_ONLY
906 if (cpu->cfg.debug) {
907 riscv_trigger_init(env);
910 if (kvm_enabled()) {
911 kvm_riscv_reset_vcpu(cpu);
913 #endif
916 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
918 RISCVCPU *cpu = RISCV_CPU(s);
919 CPURISCVState *env = &cpu->env;
920 info->target_info = &cpu->cfg;
922 switch (env->xl) {
923 case MXL_RV32:
924 info->print_insn = print_insn_riscv32;
925 break;
926 case MXL_RV64:
927 info->print_insn = print_insn_riscv64;
928 break;
929 case MXL_RV128:
930 info->print_insn = print_insn_riscv128;
931 break;
932 default:
933 g_assert_not_reached();
937 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
938 Error **errp)
940 int vext_version = VEXT_VERSION_1_00_0;
942 if (!is_power_of_2(cfg->vlen)) {
943 error_setg(errp, "Vector extension VLEN must be power of 2");
944 return;
946 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) {
947 error_setg(errp,
948 "Vector extension implementation only supports VLEN "
949 "in the range [128, %d]", RV_VLEN_MAX);
950 return;
952 if (!is_power_of_2(cfg->elen)) {
953 error_setg(errp, "Vector extension ELEN must be power of 2");
954 return;
956 if (cfg->elen > 64 || cfg->elen < 8) {
957 error_setg(errp,
958 "Vector extension implementation only supports ELEN "
959 "in the range [8, 64]");
960 return;
962 if (cfg->vext_spec) {
963 if (!g_strcmp0(cfg->vext_spec, "v1.0")) {
964 vext_version = VEXT_VERSION_1_00_0;
965 } else {
966 error_setg(errp, "Unsupported vector spec version '%s'",
967 cfg->vext_spec);
968 return;
970 } else {
971 qemu_log("vector version is not specified, "
972 "use the default value v1.0\n");
974 env->vext_ver = vext_version;
977 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp)
979 CPURISCVState *env = &cpu->env;
980 int priv_version = -1;
982 if (cpu->cfg.priv_spec) {
983 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
984 priv_version = PRIV_VERSION_1_12_0;
985 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
986 priv_version = PRIV_VERSION_1_11_0;
987 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
988 priv_version = PRIV_VERSION_1_10_0;
989 } else {
990 error_setg(errp,
991 "Unsupported privilege spec version '%s'",
992 cpu->cfg.priv_spec);
993 return;
996 env->priv_ver = priv_version;
1000 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
1002 CPURISCVState *env = &cpu->env;
1003 int i;
1005 /* Force disable extensions if priv spec version does not match */
1006 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1007 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
1008 (env->priv_ver < isa_edata_arr[i].min_version)) {
1009 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
1010 #ifndef CONFIG_USER_ONLY
1011 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
1012 " because privilege spec version does not match",
1013 isa_edata_arr[i].name, env->mhartid);
1014 #else
1015 warn_report("disabling %s extension because "
1016 "privilege spec version does not match",
1017 isa_edata_arr[i].name);
1018 #endif
1023 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp)
1025 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1026 CPUClass *cc = CPU_CLASS(mcc);
1027 CPURISCVState *env = &cpu->env;
1029 /* Validate that MISA_MXL is set properly. */
1030 switch (env->misa_mxl_max) {
1031 #ifdef TARGET_RISCV64
1032 case MXL_RV64:
1033 case MXL_RV128:
1034 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1035 break;
1036 #endif
1037 case MXL_RV32:
1038 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1039 break;
1040 default:
1041 g_assert_not_reached();
1044 if (env->misa_mxl_max != env->misa_mxl) {
1045 error_setg(errp, "misa_mxl_max must be equal to misa_mxl");
1046 return;
1051 * Check consistency between chosen extensions while setting
1052 * cpu->cfg accordingly.
1054 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
1056 CPURISCVState *env = &cpu->env;
1057 Error *local_err = NULL;
1059 /* Do some ISA extension error checking */
1060 if (riscv_has_ext(env, RVG) &&
1061 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) &&
1062 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) &&
1063 riscv_has_ext(env, RVD) &&
1064 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
1065 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
1066 cpu->cfg.ext_icsr = true;
1067 cpu->cfg.ext_ifencei = true;
1069 env->misa_ext |= RVI | RVM | RVA | RVF | RVD;
1070 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD;
1073 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
1074 error_setg(errp,
1075 "I and E extensions are incompatible");
1076 return;
1079 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
1080 error_setg(errp,
1081 "Either I or E extension must be set");
1082 return;
1085 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
1086 error_setg(errp,
1087 "Setting S extension without U extension is illegal");
1088 return;
1091 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
1092 error_setg(errp,
1093 "H depends on an I base integer ISA with 32 x registers");
1094 return;
1097 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
1098 error_setg(errp, "H extension implicitly requires S-mode");
1099 return;
1102 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) {
1103 error_setg(errp, "F extension requires Zicsr");
1104 return;
1107 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
1108 error_setg(errp, "Zawrs extension requires A extension");
1109 return;
1112 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
1113 error_setg(errp, "Zfa extension requires F extension");
1114 return;
1117 if (cpu->cfg.ext_zfh) {
1118 cpu->cfg.ext_zfhmin = true;
1121 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
1122 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
1123 return;
1126 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
1127 error_setg(errp, "Zfbfmin extension depends on F extension");
1128 return;
1131 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
1132 error_setg(errp, "D extension requires F extension");
1133 return;
1136 if (riscv_has_ext(env, RVV)) {
1137 riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
1138 if (local_err != NULL) {
1139 error_propagate(errp, local_err);
1140 return;
1143 /* The V vector extension depends on the Zve64d extension */
1144 cpu->cfg.ext_zve64d = true;
1147 /* The Zve64d extension depends on the Zve64f extension */
1148 if (cpu->cfg.ext_zve64d) {
1149 cpu->cfg.ext_zve64f = true;
1152 /* The Zve64f extension depends on the Zve32f extension */
1153 if (cpu->cfg.ext_zve64f) {
1154 cpu->cfg.ext_zve32f = true;
1157 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) {
1158 error_setg(errp, "Zve64d/V extensions require D extension");
1159 return;
1162 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) {
1163 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
1164 return;
1167 if (cpu->cfg.ext_zvfh) {
1168 cpu->cfg.ext_zvfhmin = true;
1171 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
1172 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
1173 return;
1176 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
1177 error_setg(errp, "Zvfh extensions requires Zfhmin extension");
1178 return;
1181 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) {
1182 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension");
1183 return;
1186 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
1187 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
1188 return;
1191 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
1192 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
1193 return;
1196 /* Set the ISA extensions, checks should have happened above */
1197 if (cpu->cfg.ext_zhinx) {
1198 cpu->cfg.ext_zhinxmin = true;
1201 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
1202 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
1203 return;
1206 if (cpu->cfg.ext_zfinx) {
1207 if (!cpu->cfg.ext_icsr) {
1208 error_setg(errp, "Zfinx extension requires Zicsr");
1209 return;
1211 if (riscv_has_ext(env, RVF)) {
1212 error_setg(errp,
1213 "Zfinx cannot be supported together with F extension");
1214 return;
1218 if (cpu->cfg.ext_zce) {
1219 cpu->cfg.ext_zca = true;
1220 cpu->cfg.ext_zcb = true;
1221 cpu->cfg.ext_zcmp = true;
1222 cpu->cfg.ext_zcmt = true;
1223 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
1224 cpu->cfg.ext_zcf = true;
1228 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */
1229 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
1230 cpu->cfg.ext_zca = true;
1231 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
1232 cpu->cfg.ext_zcf = true;
1234 if (riscv_has_ext(env, RVD)) {
1235 cpu->cfg.ext_zcd = true;
1239 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
1240 error_setg(errp, "Zcf extension is only relevant to RV32");
1241 return;
1244 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
1245 error_setg(errp, "Zcf extension requires F extension");
1246 return;
1249 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
1250 error_setg(errp, "Zcd extension requires D extension");
1251 return;
1254 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
1255 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
1256 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
1257 "extension");
1258 return;
1261 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
1262 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
1263 "Zcd extension");
1264 return;
1267 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) {
1268 error_setg(errp, "Zcmt extension requires Zicsr extension");
1269 return;
1272 if (cpu->cfg.ext_zk) {
1273 cpu->cfg.ext_zkn = true;
1274 cpu->cfg.ext_zkr = true;
1275 cpu->cfg.ext_zkt = true;
1278 if (cpu->cfg.ext_zkn) {
1279 cpu->cfg.ext_zbkb = true;
1280 cpu->cfg.ext_zbkc = true;
1281 cpu->cfg.ext_zbkx = true;
1282 cpu->cfg.ext_zkne = true;
1283 cpu->cfg.ext_zknd = true;
1284 cpu->cfg.ext_zknh = true;
1287 if (cpu->cfg.ext_zks) {
1288 cpu->cfg.ext_zbkb = true;
1289 cpu->cfg.ext_zbkc = true;
1290 cpu->cfg.ext_zbkx = true;
1291 cpu->cfg.ext_zksed = true;
1292 cpu->cfg.ext_zksh = true;
1296 * Disable isa extensions based on priv spec after we
1297 * validated and set everything we need.
1299 riscv_cpu_disable_priv_spec_isa_exts(cpu);
1302 #ifndef CONFIG_USER_ONLY
1303 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1305 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
1306 uint8_t satp_mode_map_max;
1307 uint8_t satp_mode_supported_max =
1308 satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1310 if (cpu->cfg.satp_mode.map == 0) {
1311 if (cpu->cfg.satp_mode.init == 0) {
1312 /* If unset by the user, we fallback to the default satp mode. */
1313 set_satp_mode_default_map(cpu);
1314 } else {
1316 * Find the lowest level that was disabled and then enable the
1317 * first valid level below which can be found in
1318 * valid_vm_1_10_32/64.
1320 for (int i = 1; i < 16; ++i) {
1321 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1322 (cpu->cfg.satp_mode.supported & (1 << i))) {
1323 for (int j = i - 1; j >= 0; --j) {
1324 if (cpu->cfg.satp_mode.supported & (1 << j)) {
1325 cpu->cfg.satp_mode.map |= (1 << j);
1326 break;
1329 break;
1335 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1337 /* Make sure the user asked for a supported configuration (HW and qemu) */
1338 if (satp_mode_map_max > satp_mode_supported_max) {
1339 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1340 satp_mode_str(satp_mode_map_max, rv32),
1341 satp_mode_str(satp_mode_supported_max, rv32));
1342 return;
1346 * Make sure the user did not ask for an invalid configuration as per
1347 * the specification.
1349 if (!rv32) {
1350 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1351 if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1352 (cpu->cfg.satp_mode.init & (1 << i)) &&
1353 (cpu->cfg.satp_mode.supported & (1 << i))) {
1354 error_setg(errp, "cannot disable %s satp mode if %s "
1355 "is enabled", satp_mode_str(i, false),
1356 satp_mode_str(satp_mode_map_max, false));
1357 return;
1362 /* Finally expand the map so that all valid modes are set */
1363 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1364 if (cpu->cfg.satp_mode.supported & (1 << i)) {
1365 cpu->cfg.satp_mode.map |= (1 << i);
1369 #endif
1371 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1373 #ifndef CONFIG_USER_ONLY
1374 Error *local_err = NULL;
1376 riscv_cpu_satp_mode_finalize(cpu, &local_err);
1377 if (local_err != NULL) {
1378 error_propagate(errp, local_err);
1379 return;
1381 #endif
1384 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
1386 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
1387 error_setg(errp, "H extension requires priv spec 1.12.0");
1388 return;
1392 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp)
1394 RISCVCPU *cpu = RISCV_CPU(dev);
1395 CPURISCVState *env = &cpu->env;
1396 Error *local_err = NULL;
1398 riscv_cpu_validate_misa_mxl(cpu, &local_err);
1399 if (local_err != NULL) {
1400 error_propagate(errp, local_err);
1401 return;
1404 riscv_cpu_validate_priv_spec(cpu, &local_err);
1405 if (local_err != NULL) {
1406 error_propagate(errp, local_err);
1407 return;
1410 riscv_cpu_validate_misa_priv(env, &local_err);
1411 if (local_err != NULL) {
1412 error_propagate(errp, local_err);
1413 return;
1416 if (cpu->cfg.epmp && !cpu->cfg.pmp) {
1418 * Enhanced PMP should only be available
1419 * on harts with PMP support
1421 error_setg(errp, "Invalid configuration: EPMP requires PMP support");
1422 return;
1425 riscv_cpu_validate_set_extensions(cpu, &local_err);
1426 if (local_err != NULL) {
1427 error_propagate(errp, local_err);
1428 return;
1431 #ifndef CONFIG_USER_ONLY
1432 CPU(dev)->tcg_cflags |= CF_PCREL;
1434 if (cpu->cfg.ext_sstc) {
1435 riscv_timer_init(cpu);
1438 if (cpu->cfg.pmu_num) {
1439 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
1440 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1441 riscv_pmu_timer_cb, cpu);
1444 #endif
1447 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1449 CPUState *cs = CPU(dev);
1450 RISCVCPU *cpu = RISCV_CPU(dev);
1451 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1452 Error *local_err = NULL;
1454 cpu_exec_realizefn(cs, &local_err);
1455 if (local_err != NULL) {
1456 error_propagate(errp, local_err);
1457 return;
1460 if (tcg_enabled()) {
1461 riscv_cpu_realize_tcg(dev, &local_err);
1462 if (local_err != NULL) {
1463 error_propagate(errp, local_err);
1464 return;
1468 riscv_cpu_finalize_features(cpu, &local_err);
1469 if (local_err != NULL) {
1470 error_propagate(errp, local_err);
1471 return;
1474 riscv_cpu_register_gdb_regs_for_features(cs);
1476 qemu_init_vcpu(cs);
1477 cpu_reset(cs);
1479 mcc->parent_realize(dev, errp);
1482 #ifndef CONFIG_USER_ONLY
1483 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1484 void *opaque, Error **errp)
1486 RISCVSATPMap *satp_map = opaque;
1487 uint8_t satp = satp_mode_from_str(name);
1488 bool value;
1490 value = satp_map->map & (1 << satp);
1492 visit_type_bool(v, name, &value, errp);
1495 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1496 void *opaque, Error **errp)
1498 RISCVSATPMap *satp_map = opaque;
1499 uint8_t satp = satp_mode_from_str(name);
1500 bool value;
1502 if (!visit_type_bool(v, name, &value, errp)) {
1503 return;
1506 satp_map->map = deposit32(satp_map->map, satp, 1, value);
1507 satp_map->init |= 1 << satp;
1510 static void riscv_add_satp_mode_properties(Object *obj)
1512 RISCVCPU *cpu = RISCV_CPU(obj);
1514 if (cpu->env.misa_mxl == MXL_RV32) {
1515 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1516 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1517 } else {
1518 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1519 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1520 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1521 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1522 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1523 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1524 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1525 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1529 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1531 RISCVCPU *cpu = RISCV_CPU(opaque);
1532 CPURISCVState *env = &cpu->env;
1534 if (irq < IRQ_LOCAL_MAX) {
1535 switch (irq) {
1536 case IRQ_U_SOFT:
1537 case IRQ_S_SOFT:
1538 case IRQ_VS_SOFT:
1539 case IRQ_M_SOFT:
1540 case IRQ_U_TIMER:
1541 case IRQ_S_TIMER:
1542 case IRQ_VS_TIMER:
1543 case IRQ_M_TIMER:
1544 case IRQ_U_EXT:
1545 case IRQ_VS_EXT:
1546 case IRQ_M_EXT:
1547 if (kvm_enabled()) {
1548 kvm_riscv_set_irq(cpu, irq, level);
1549 } else {
1550 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1552 break;
1553 case IRQ_S_EXT:
1554 if (kvm_enabled()) {
1555 kvm_riscv_set_irq(cpu, irq, level);
1556 } else {
1557 env->external_seip = level;
1558 riscv_cpu_update_mip(env, 1 << irq,
1559 BOOL_TO_MASK(level | env->software_seip));
1561 break;
1562 default:
1563 g_assert_not_reached();
1565 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1566 /* Require H-extension for handling guest local interrupts */
1567 if (!riscv_has_ext(env, RVH)) {
1568 g_assert_not_reached();
1571 /* Compute bit position in HGEIP CSR */
1572 irq = irq - IRQ_LOCAL_MAX + 1;
1573 if (env->geilen < irq) {
1574 g_assert_not_reached();
1577 /* Update HGEIP CSR */
1578 env->hgeip &= ~((target_ulong)1 << irq);
1579 if (level) {
1580 env->hgeip |= (target_ulong)1 << irq;
1583 /* Update mip.SGEIP bit */
1584 riscv_cpu_update_mip(env, MIP_SGEIP,
1585 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1586 } else {
1587 g_assert_not_reached();
1590 #endif /* CONFIG_USER_ONLY */
1592 static void riscv_cpu_init(Object *obj)
1594 RISCVCPU *cpu = RISCV_CPU(obj);
1596 cpu_set_cpustate_pointers(cpu);
1598 #ifndef CONFIG_USER_ONLY
1599 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
1600 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1601 #endif /* CONFIG_USER_ONLY */
1604 typedef struct RISCVCPUMisaExtConfig {
1605 const char *name;
1606 const char *description;
1607 target_ulong misa_bit;
1608 bool enabled;
1609 } RISCVCPUMisaExtConfig;
1611 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1612 void *opaque, Error **errp)
1614 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1615 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1616 RISCVCPU *cpu = RISCV_CPU(obj);
1617 CPURISCVState *env = &cpu->env;
1618 bool value;
1620 if (!visit_type_bool(v, name, &value, errp)) {
1621 return;
1624 if (value) {
1625 env->misa_ext |= misa_bit;
1626 env->misa_ext_mask |= misa_bit;
1627 } else {
1628 env->misa_ext &= ~misa_bit;
1629 env->misa_ext_mask &= ~misa_bit;
1633 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1634 void *opaque, Error **errp)
1636 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1637 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1638 RISCVCPU *cpu = RISCV_CPU(obj);
1639 CPURISCVState *env = &cpu->env;
1640 bool value;
1642 value = env->misa_ext & misa_bit;
1644 visit_type_bool(v, name, &value, errp);
1647 typedef struct misa_ext_info {
1648 const char *name;
1649 const char *description;
1650 } MISAExtInfo;
1652 #define MISA_INFO_IDX(_bit) \
1653 __builtin_ctz(_bit)
1655 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1656 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1658 static const MISAExtInfo misa_ext_info_arr[] = {
1659 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1660 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1661 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1662 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1663 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1664 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1665 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1666 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1667 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1668 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1669 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1670 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1671 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1674 static int riscv_validate_misa_info_idx(uint32_t bit)
1676 int idx;
1679 * Our lowest valid input (RVA) is 1 and
1680 * __builtin_ctz() is UB with zero.
1682 g_assert(bit != 0);
1683 idx = MISA_INFO_IDX(bit);
1685 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1686 return idx;
1689 const char *riscv_get_misa_ext_name(uint32_t bit)
1691 int idx = riscv_validate_misa_info_idx(bit);
1692 const char *val = misa_ext_info_arr[idx].name;
1694 g_assert(val != NULL);
1695 return val;
1698 const char *riscv_get_misa_ext_description(uint32_t bit)
1700 int idx = riscv_validate_misa_info_idx(bit);
1701 const char *val = misa_ext_info_arr[idx].description;
1703 g_assert(val != NULL);
1704 return val;
1707 #define MISA_CFG(_bit, _enabled) \
1708 {.misa_bit = _bit, .enabled = _enabled}
1710 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
1711 MISA_CFG(RVA, true),
1712 MISA_CFG(RVC, true),
1713 MISA_CFG(RVD, true),
1714 MISA_CFG(RVF, true),
1715 MISA_CFG(RVI, true),
1716 MISA_CFG(RVE, false),
1717 MISA_CFG(RVM, true),
1718 MISA_CFG(RVS, true),
1719 MISA_CFG(RVU, true),
1720 MISA_CFG(RVH, true),
1721 MISA_CFG(RVJ, false),
1722 MISA_CFG(RVV, false),
1723 MISA_CFG(RVG, false),
1726 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1728 int i;
1730 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1731 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1732 int bit = misa_cfg->misa_bit;
1734 misa_cfg->name = riscv_get_misa_ext_name(bit);
1735 misa_cfg->description = riscv_get_misa_ext_description(bit);
1737 /* Check if KVM already created the property */
1738 if (object_property_find(cpu_obj, misa_cfg->name)) {
1739 continue;
1742 object_property_add(cpu_obj, misa_cfg->name, "bool",
1743 cpu_get_misa_ext_cfg,
1744 cpu_set_misa_ext_cfg,
1745 NULL, (void *)misa_cfg);
1746 object_property_set_description(cpu_obj, misa_cfg->name,
1747 misa_cfg->description);
1748 object_property_set_bool(cpu_obj, misa_cfg->name,
1749 misa_cfg->enabled, NULL);
1753 static Property riscv_cpu_extensions[] = {
1754 /* Defaults for standard extensions */
1755 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
1756 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
1757 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
1758 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
1759 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
1760 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
1761 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true),
1762 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
1763 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
1764 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
1765 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
1766 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false),
1767 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1768 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1769 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
1771 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1772 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1773 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1774 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1776 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false),
1777 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true),
1778 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
1779 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
1780 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
1782 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
1783 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
1784 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
1785 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false),
1786 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false),
1787 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false),
1788 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
1789 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false),
1790 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false),
1791 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false),
1792 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false),
1793 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false),
1794 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false),
1795 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false),
1796 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false),
1797 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false),
1798 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false),
1800 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
1801 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
1802 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
1803 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
1805 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true),
1806 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1807 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true),
1808 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1810 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
1812 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false),
1813 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false),
1814 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false),
1815 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false),
1816 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false),
1817 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false),
1818 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false),
1820 /* Vendor-specific custom extensions */
1821 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
1822 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false),
1823 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false),
1824 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
1825 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false),
1826 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false),
1827 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false),
1828 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false),
1829 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false),
1830 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false),
1831 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false),
1832 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
1834 /* These are experimental so mark with 'x-' */
1835 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
1837 /* ePMP 0.9.3 */
1838 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
1839 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
1840 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
1842 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
1843 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
1845 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false),
1846 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false),
1847 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
1849 DEFINE_PROP_END_OF_LIST(),
1853 #ifndef CONFIG_USER_ONLY
1854 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
1855 const char *name,
1856 void *opaque, Error **errp)
1858 const char *propname = opaque;
1859 bool value;
1861 if (!visit_type_bool(v, name, &value, errp)) {
1862 return;
1865 if (value) {
1866 error_setg(errp, "extension %s is not available with KVM",
1867 propname);
1870 #endif
1873 * Add CPU properties with user-facing flags.
1875 * This will overwrite existing env->misa_ext values with the
1876 * defaults set via riscv_cpu_add_misa_properties().
1878 static void riscv_cpu_add_user_properties(Object *obj)
1880 Property *prop;
1881 DeviceState *dev = DEVICE(obj);
1883 #ifndef CONFIG_USER_ONLY
1884 riscv_add_satp_mode_properties(obj);
1886 if (kvm_enabled()) {
1887 kvm_riscv_init_user_properties(obj);
1889 #endif
1891 riscv_cpu_add_misa_properties(obj);
1893 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1894 #ifndef CONFIG_USER_ONLY
1895 if (kvm_enabled()) {
1896 /* Check if KVM created the property already */
1897 if (object_property_find(obj, prop->name)) {
1898 continue;
1902 * Set the default to disabled for every extension
1903 * unknown to KVM and error out if the user attempts
1904 * to enable any of them.
1906 * We're giving a pass for non-bool properties since they're
1907 * not related to the availability of extensions and can be
1908 * safely ignored as is.
1910 if (prop->info == &qdev_prop_bool) {
1911 object_property_add(obj, prop->name, "bool",
1912 NULL, cpu_set_cfg_unavailable,
1913 NULL, (void *)prop->name);
1914 continue;
1917 #endif
1918 qdev_property_add_static(dev, prop);
1922 static Property riscv_cpu_properties[] = {
1923 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1925 #ifndef CONFIG_USER_ONLY
1926 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1927 #endif
1929 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1931 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1932 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1935 * write_misa() is marked as experimental for now so mark
1936 * it with -x and default to 'false'.
1938 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1939 DEFINE_PROP_END_OF_LIST(),
1942 static gchar *riscv_gdb_arch_name(CPUState *cs)
1944 RISCVCPU *cpu = RISCV_CPU(cs);
1945 CPURISCVState *env = &cpu->env;
1947 switch (riscv_cpu_mxl(env)) {
1948 case MXL_RV32:
1949 return g_strdup("riscv:rv32");
1950 case MXL_RV64:
1951 case MXL_RV128:
1952 return g_strdup("riscv:rv64");
1953 default:
1954 g_assert_not_reached();
1958 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1960 RISCVCPU *cpu = RISCV_CPU(cs);
1962 if (strcmp(xmlname, "riscv-csr.xml") == 0) {
1963 return cpu->dyn_csr_xml;
1964 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
1965 return cpu->dyn_vreg_xml;
1968 return NULL;
1971 #ifndef CONFIG_USER_ONLY
1972 static int64_t riscv_get_arch_id(CPUState *cs)
1974 RISCVCPU *cpu = RISCV_CPU(cs);
1976 return cpu->env.mhartid;
1979 #include "hw/core/sysemu-cpu-ops.h"
1981 static const struct SysemuCPUOps riscv_sysemu_ops = {
1982 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
1983 .write_elf64_note = riscv_cpu_write_elf64_note,
1984 .write_elf32_note = riscv_cpu_write_elf32_note,
1985 .legacy_vmsd = &vmstate_riscv_cpu,
1987 #endif
1989 #include "hw/core/tcg-cpu-ops.h"
1991 static const struct TCGCPUOps riscv_tcg_ops = {
1992 .initialize = riscv_translate_init,
1993 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
1994 .restore_state_to_opc = riscv_restore_state_to_opc,
1996 #ifndef CONFIG_USER_ONLY
1997 .tlb_fill = riscv_cpu_tlb_fill,
1998 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
1999 .do_interrupt = riscv_cpu_do_interrupt,
2000 .do_transaction_failed = riscv_cpu_do_transaction_failed,
2001 .do_unaligned_access = riscv_cpu_do_unaligned_access,
2002 .debug_excp_handler = riscv_cpu_debug_excp_handler,
2003 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
2004 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
2005 #endif /* !CONFIG_USER_ONLY */
2008 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
2010 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
2013 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
2014 void *opaque, Error **errp)
2016 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2017 RISCVCPU *cpu = RISCV_CPU(obj);
2018 uint32_t prev_val = cpu->cfg.mvendorid;
2019 uint32_t value;
2021 if (!visit_type_uint32(v, name, &value, errp)) {
2022 return;
2025 if (!dynamic_cpu && prev_val != value) {
2026 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2027 object_get_typename(obj), prev_val);
2028 return;
2031 cpu->cfg.mvendorid = value;
2034 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
2035 void *opaque, Error **errp)
2037 bool value = RISCV_CPU(obj)->cfg.mvendorid;
2039 visit_type_bool(v, name, &value, errp);
2042 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
2043 void *opaque, Error **errp)
2045 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2046 RISCVCPU *cpu = RISCV_CPU(obj);
2047 uint64_t prev_val = cpu->cfg.mimpid;
2048 uint64_t value;
2050 if (!visit_type_uint64(v, name, &value, errp)) {
2051 return;
2054 if (!dynamic_cpu && prev_val != value) {
2055 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2056 object_get_typename(obj), prev_val);
2057 return;
2060 cpu->cfg.mimpid = value;
2063 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
2064 void *opaque, Error **errp)
2066 bool value = RISCV_CPU(obj)->cfg.mimpid;
2068 visit_type_bool(v, name, &value, errp);
2071 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
2072 void *opaque, Error **errp)
2074 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2075 RISCVCPU *cpu = RISCV_CPU(obj);
2076 uint64_t prev_val = cpu->cfg.marchid;
2077 uint64_t value, invalid_val;
2078 uint32_t mxlen = 0;
2080 if (!visit_type_uint64(v, name, &value, errp)) {
2081 return;
2084 if (!dynamic_cpu && prev_val != value) {
2085 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2086 object_get_typename(obj), prev_val);
2087 return;
2090 switch (riscv_cpu_mxl(&cpu->env)) {
2091 case MXL_RV32:
2092 mxlen = 32;
2093 break;
2094 case MXL_RV64:
2095 case MXL_RV128:
2096 mxlen = 64;
2097 break;
2098 default:
2099 g_assert_not_reached();
2102 invalid_val = 1LL << (mxlen - 1);
2104 if (value == invalid_val) {
2105 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2106 "and the remaining bits zero", mxlen);
2107 return;
2110 cpu->cfg.marchid = value;
2113 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
2114 void *opaque, Error **errp)
2116 bool value = RISCV_CPU(obj)->cfg.marchid;
2118 visit_type_bool(v, name, &value, errp);
2121 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2123 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2124 CPUClass *cc = CPU_CLASS(c);
2125 DeviceClass *dc = DEVICE_CLASS(c);
2126 ResettableClass *rc = RESETTABLE_CLASS(c);
2128 device_class_set_parent_realize(dc, riscv_cpu_realize,
2129 &mcc->parent_realize);
2131 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2132 &mcc->parent_phases);
2134 cc->class_by_name = riscv_cpu_class_by_name;
2135 cc->has_work = riscv_cpu_has_work;
2136 cc->dump_state = riscv_cpu_dump_state;
2137 cc->set_pc = riscv_cpu_set_pc;
2138 cc->get_pc = riscv_cpu_get_pc;
2139 cc->gdb_read_register = riscv_cpu_gdb_read_register;
2140 cc->gdb_write_register = riscv_cpu_gdb_write_register;
2141 cc->gdb_num_core_regs = 33;
2142 cc->gdb_stop_before_watchpoint = true;
2143 cc->disas_set_info = riscv_cpu_disas_set_info;
2144 #ifndef CONFIG_USER_ONLY
2145 cc->sysemu_ops = &riscv_sysemu_ops;
2146 cc->get_arch_id = riscv_get_arch_id;
2147 #endif
2148 cc->gdb_arch_name = riscv_gdb_arch_name;
2149 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
2150 cc->tcg_ops = &riscv_tcg_ops;
2152 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
2153 cpu_set_mvendorid, NULL, NULL);
2155 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
2156 cpu_set_mimpid, NULL, NULL);
2158 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
2159 cpu_set_marchid, NULL, NULL);
2161 device_class_set_props(dc, riscv_cpu_properties);
2164 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2165 int max_str_len)
2167 char *old = *isa_str;
2168 char *new = *isa_str;
2169 int i;
2171 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
2172 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
2173 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
2174 g_free(old);
2175 old = new;
2179 *isa_str = new;
2182 char *riscv_isa_string(RISCVCPU *cpu)
2184 int i;
2185 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2186 char *isa_str = g_new(char, maxlen);
2187 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
2188 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2189 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2190 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2193 *p = '\0';
2194 if (!cpu->cfg.short_isa_string) {
2195 riscv_isa_string_ext(cpu, &isa_str, maxlen);
2197 return isa_str;
2200 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
2202 ObjectClass *class_a = (ObjectClass *)a;
2203 ObjectClass *class_b = (ObjectClass *)b;
2204 const char *name_a, *name_b;
2206 name_a = object_class_get_name(class_a);
2207 name_b = object_class_get_name(class_b);
2208 return strcmp(name_a, name_b);
2211 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
2213 const char *typename = object_class_get_name(OBJECT_CLASS(data));
2214 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
2216 qemu_printf("%.*s\n", len, typename);
2219 void riscv_cpu_list(void)
2221 GSList *list;
2223 list = object_class_get_list(TYPE_RISCV_CPU, false);
2224 list = g_slist_sort(list, riscv_cpu_list_compare);
2225 g_slist_foreach(list, riscv_cpu_list_entry, NULL);
2226 g_slist_free(list);
2229 #define DEFINE_CPU(type_name, initfn) \
2231 .name = type_name, \
2232 .parent = TYPE_RISCV_CPU, \
2233 .instance_init = initfn \
2236 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
2238 .name = type_name, \
2239 .parent = TYPE_RISCV_DYNAMIC_CPU, \
2240 .instance_init = initfn \
2243 static const TypeInfo riscv_cpu_type_infos[] = {
2245 .name = TYPE_RISCV_CPU,
2246 .parent = TYPE_CPU,
2247 .instance_size = sizeof(RISCVCPU),
2248 .instance_align = __alignof__(RISCVCPU),
2249 .instance_init = riscv_cpu_init,
2250 .abstract = true,
2251 .class_size = sizeof(RISCVCPUClass),
2252 .class_init = riscv_cpu_class_init,
2255 .name = TYPE_RISCV_DYNAMIC_CPU,
2256 .parent = TYPE_RISCV_CPU,
2257 .abstract = true,
2259 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
2260 #if defined(CONFIG_KVM)
2261 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init),
2262 #endif
2263 #if defined(TARGET_RISCV32)
2264 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
2265 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
2266 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
2267 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
2268 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
2269 #elif defined(TARGET_RISCV64)
2270 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
2271 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
2272 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
2273 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
2274 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
2275 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init),
2276 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
2277 #endif
2280 DEFINE_TYPES(riscv_cpu_type_infos)