target/riscv: Add Zvknh ISA extension support
[qemu/kevin.git] / target / riscv / cpu.c
blobf103f536fd400ef24340158219190c69400c10c2
1 /*
2 * QEMU RISC-V CPU
4 * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
5 * Copyright (c) 2017-2018 SiFive, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2 or later, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/qemu-print.h"
22 #include "qemu/ctype.h"
23 #include "qemu/log.h"
24 #include "cpu.h"
25 #include "cpu_vendorid.h"
26 #include "pmu.h"
27 #include "internals.h"
28 #include "time_helper.h"
29 #include "exec/exec-all.h"
30 #include "qapi/error.h"
31 #include "qapi/visitor.h"
32 #include "qemu/error-report.h"
33 #include "hw/qdev-properties.h"
34 #include "migration/vmstate.h"
35 #include "fpu/softfloat-helpers.h"
36 #include "sysemu/kvm.h"
37 #include "sysemu/tcg.h"
38 #include "kvm_riscv.h"
39 #include "tcg/tcg.h"
41 /* RISC-V CPU definitions */
42 static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
44 struct isa_ext_data {
45 const char *name;
46 int min_version;
47 int ext_enable_offset;
50 #define ISA_EXT_DATA_ENTRY(_name, _min_ver, _prop) \
51 {#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
54 * From vector_helper.c
55 * Note that vector data is stored in host-endian 64-bit chunks,
56 * so addressing bytes needs a host-endian fixup.
58 #if HOST_BIG_ENDIAN
59 #define BYTE(x) ((x) ^ 7)
60 #else
61 #define BYTE(x) (x)
62 #endif
65 * Here are the ordering rules of extension naming defined by RISC-V
66 * specification :
67 * 1. All extensions should be separated from other multi-letter extensions
68 * by an underscore.
69 * 2. The first letter following the 'Z' conventionally indicates the most
70 * closely related alphabetical extension category, IMAFDQLCBKJTPVH.
71 * If multiple 'Z' extensions are named, they should be ordered first
72 * by category, then alphabetically within a category.
73 * 3. Standard supervisor-level extensions (starts with 'S') should be
74 * listed after standard unprivileged extensions. If multiple
75 * supervisor-level extensions are listed, they should be ordered
76 * alphabetically.
77 * 4. Non-standard extensions (starts with 'X') must be listed after all
78 * standard extensions. They must be separated from other multi-letter
79 * extensions by an underscore.
81 * Single letter extensions are checked in riscv_cpu_validate_misa_priv()
82 * instead.
84 static const struct isa_ext_data isa_edata_arr[] = {
85 ISA_EXT_DATA_ENTRY(zicbom, PRIV_VERSION_1_12_0, ext_icbom),
86 ISA_EXT_DATA_ENTRY(zicboz, PRIV_VERSION_1_12_0, ext_icboz),
87 ISA_EXT_DATA_ENTRY(zicond, PRIV_VERSION_1_12_0, ext_zicond),
88 ISA_EXT_DATA_ENTRY(zicsr, PRIV_VERSION_1_10_0, ext_icsr),
89 ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
90 ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
91 ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul),
92 ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
93 ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
94 ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
95 ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
96 ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
97 ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
98 ISA_EXT_DATA_ENTRY(zdinx, PRIV_VERSION_1_12_0, ext_zdinx),
99 ISA_EXT_DATA_ENTRY(zca, PRIV_VERSION_1_12_0, ext_zca),
100 ISA_EXT_DATA_ENTRY(zcb, PRIV_VERSION_1_12_0, ext_zcb),
101 ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf),
102 ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd),
103 ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce),
104 ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp),
105 ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt),
106 ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba),
107 ISA_EXT_DATA_ENTRY(zbb, PRIV_VERSION_1_12_0, ext_zbb),
108 ISA_EXT_DATA_ENTRY(zbc, PRIV_VERSION_1_12_0, ext_zbc),
109 ISA_EXT_DATA_ENTRY(zbkb, PRIV_VERSION_1_12_0, ext_zbkb),
110 ISA_EXT_DATA_ENTRY(zbkc, PRIV_VERSION_1_12_0, ext_zbkc),
111 ISA_EXT_DATA_ENTRY(zbkx, PRIV_VERSION_1_12_0, ext_zbkx),
112 ISA_EXT_DATA_ENTRY(zbs, PRIV_VERSION_1_12_0, ext_zbs),
113 ISA_EXT_DATA_ENTRY(zk, PRIV_VERSION_1_12_0, ext_zk),
114 ISA_EXT_DATA_ENTRY(zkn, PRIV_VERSION_1_12_0, ext_zkn),
115 ISA_EXT_DATA_ENTRY(zknd, PRIV_VERSION_1_12_0, ext_zknd),
116 ISA_EXT_DATA_ENTRY(zkne, PRIV_VERSION_1_12_0, ext_zkne),
117 ISA_EXT_DATA_ENTRY(zknh, PRIV_VERSION_1_12_0, ext_zknh),
118 ISA_EXT_DATA_ENTRY(zkr, PRIV_VERSION_1_12_0, ext_zkr),
119 ISA_EXT_DATA_ENTRY(zks, PRIV_VERSION_1_12_0, ext_zks),
120 ISA_EXT_DATA_ENTRY(zksed, PRIV_VERSION_1_12_0, ext_zksed),
121 ISA_EXT_DATA_ENTRY(zksh, PRIV_VERSION_1_12_0, ext_zksh),
122 ISA_EXT_DATA_ENTRY(zkt, PRIV_VERSION_1_12_0, ext_zkt),
123 ISA_EXT_DATA_ENTRY(zvbb, PRIV_VERSION_1_12_0, ext_zvbb),
124 ISA_EXT_DATA_ENTRY(zvbc, PRIV_VERSION_1_12_0, ext_zvbc),
125 ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
126 ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
127 ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
128 ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
129 ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
130 ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
131 ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
132 ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
133 ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
134 ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
135 ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
136 ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
137 ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
138 ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, epmp),
139 ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
140 ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
141 ISA_EXT_DATA_ENTRY(sscofpmf, PRIV_VERSION_1_12_0, ext_sscofpmf),
142 ISA_EXT_DATA_ENTRY(sstc, PRIV_VERSION_1_12_0, ext_sstc),
143 ISA_EXT_DATA_ENTRY(svadu, PRIV_VERSION_1_12_0, ext_svadu),
144 ISA_EXT_DATA_ENTRY(svinval, PRIV_VERSION_1_12_0, ext_svinval),
145 ISA_EXT_DATA_ENTRY(svnapot, PRIV_VERSION_1_12_0, ext_svnapot),
146 ISA_EXT_DATA_ENTRY(svpbmt, PRIV_VERSION_1_12_0, ext_svpbmt),
147 ISA_EXT_DATA_ENTRY(xtheadba, PRIV_VERSION_1_11_0, ext_xtheadba),
148 ISA_EXT_DATA_ENTRY(xtheadbb, PRIV_VERSION_1_11_0, ext_xtheadbb),
149 ISA_EXT_DATA_ENTRY(xtheadbs, PRIV_VERSION_1_11_0, ext_xtheadbs),
150 ISA_EXT_DATA_ENTRY(xtheadcmo, PRIV_VERSION_1_11_0, ext_xtheadcmo),
151 ISA_EXT_DATA_ENTRY(xtheadcondmov, PRIV_VERSION_1_11_0, ext_xtheadcondmov),
152 ISA_EXT_DATA_ENTRY(xtheadfmemidx, PRIV_VERSION_1_11_0, ext_xtheadfmemidx),
153 ISA_EXT_DATA_ENTRY(xtheadfmv, PRIV_VERSION_1_11_0, ext_xtheadfmv),
154 ISA_EXT_DATA_ENTRY(xtheadmac, PRIV_VERSION_1_11_0, ext_xtheadmac),
155 ISA_EXT_DATA_ENTRY(xtheadmemidx, PRIV_VERSION_1_11_0, ext_xtheadmemidx),
156 ISA_EXT_DATA_ENTRY(xtheadmempair, PRIV_VERSION_1_11_0, ext_xtheadmempair),
157 ISA_EXT_DATA_ENTRY(xtheadsync, PRIV_VERSION_1_11_0, ext_xtheadsync),
158 ISA_EXT_DATA_ENTRY(xventanacondops, PRIV_VERSION_1_12_0, ext_XVentanaCondOps),
161 static bool isa_ext_is_enabled(RISCVCPU *cpu,
162 const struct isa_ext_data *edata)
164 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
166 return *ext_enabled;
169 static void isa_ext_update_enabled(RISCVCPU *cpu,
170 const struct isa_ext_data *edata, bool en)
172 bool *ext_enabled = (void *)&cpu->cfg + edata->ext_enable_offset;
174 *ext_enabled = en;
177 const char * const riscv_int_regnames[] = {
178 "x0/zero", "x1/ra", "x2/sp", "x3/gp", "x4/tp", "x5/t0", "x6/t1",
179 "x7/t2", "x8/s0", "x9/s1", "x10/a0", "x11/a1", "x12/a2", "x13/a3",
180 "x14/a4", "x15/a5", "x16/a6", "x17/a7", "x18/s2", "x19/s3", "x20/s4",
181 "x21/s5", "x22/s6", "x23/s7", "x24/s8", "x25/s9", "x26/s10", "x27/s11",
182 "x28/t3", "x29/t4", "x30/t5", "x31/t6"
185 const char * const riscv_int_regnamesh[] = {
186 "x0h/zeroh", "x1h/rah", "x2h/sph", "x3h/gph", "x4h/tph", "x5h/t0h",
187 "x6h/t1h", "x7h/t2h", "x8h/s0h", "x9h/s1h", "x10h/a0h", "x11h/a1h",
188 "x12h/a2h", "x13h/a3h", "x14h/a4h", "x15h/a5h", "x16h/a6h", "x17h/a7h",
189 "x18h/s2h", "x19h/s3h", "x20h/s4h", "x21h/s5h", "x22h/s6h", "x23h/s7h",
190 "x24h/s8h", "x25h/s9h", "x26h/s10h", "x27h/s11h", "x28h/t3h", "x29h/t4h",
191 "x30h/t5h", "x31h/t6h"
194 const char * const riscv_fpr_regnames[] = {
195 "f0/ft0", "f1/ft1", "f2/ft2", "f3/ft3", "f4/ft4", "f5/ft5",
196 "f6/ft6", "f7/ft7", "f8/fs0", "f9/fs1", "f10/fa0", "f11/fa1",
197 "f12/fa2", "f13/fa3", "f14/fa4", "f15/fa5", "f16/fa6", "f17/fa7",
198 "f18/fs2", "f19/fs3", "f20/fs4", "f21/fs5", "f22/fs6", "f23/fs7",
199 "f24/fs8", "f25/fs9", "f26/fs10", "f27/fs11", "f28/ft8", "f29/ft9",
200 "f30/ft10", "f31/ft11"
203 const char * const riscv_rvv_regnames[] = {
204 "v0", "v1", "v2", "v3", "v4", "v5", "v6",
205 "v7", "v8", "v9", "v10", "v11", "v12", "v13",
206 "v14", "v15", "v16", "v17", "v18", "v19", "v20",
207 "v21", "v22", "v23", "v24", "v25", "v26", "v27",
208 "v28", "v29", "v30", "v31"
211 static const char * const riscv_excp_names[] = {
212 "misaligned_fetch",
213 "fault_fetch",
214 "illegal_instruction",
215 "breakpoint",
216 "misaligned_load",
217 "fault_load",
218 "misaligned_store",
219 "fault_store",
220 "user_ecall",
221 "supervisor_ecall",
222 "hypervisor_ecall",
223 "machine_ecall",
224 "exec_page_fault",
225 "load_page_fault",
226 "reserved",
227 "store_page_fault",
228 "reserved",
229 "reserved",
230 "reserved",
231 "reserved",
232 "guest_exec_page_fault",
233 "guest_load_page_fault",
234 "reserved",
235 "guest_store_page_fault",
238 static const char * const riscv_intr_names[] = {
239 "u_software",
240 "s_software",
241 "vs_software",
242 "m_software",
243 "u_timer",
244 "s_timer",
245 "vs_timer",
246 "m_timer",
247 "u_external",
248 "s_external",
249 "vs_external",
250 "m_external",
251 "reserved",
252 "reserved",
253 "reserved",
254 "reserved"
257 static void riscv_cpu_add_user_properties(Object *obj);
259 const char *riscv_cpu_get_trap_name(target_ulong cause, bool async)
261 if (async) {
262 return (cause < ARRAY_SIZE(riscv_intr_names)) ?
263 riscv_intr_names[cause] : "(unknown)";
264 } else {
265 return (cause < ARRAY_SIZE(riscv_excp_names)) ?
266 riscv_excp_names[cause] : "(unknown)";
270 static void set_misa(CPURISCVState *env, RISCVMXL mxl, uint32_t ext)
272 env->misa_mxl_max = env->misa_mxl = mxl;
273 env->misa_ext_mask = env->misa_ext = ext;
276 #ifndef CONFIG_USER_ONLY
277 static uint8_t satp_mode_from_str(const char *satp_mode_str)
279 if (!strncmp(satp_mode_str, "mbare", 5)) {
280 return VM_1_10_MBARE;
283 if (!strncmp(satp_mode_str, "sv32", 4)) {
284 return VM_1_10_SV32;
287 if (!strncmp(satp_mode_str, "sv39", 4)) {
288 return VM_1_10_SV39;
291 if (!strncmp(satp_mode_str, "sv48", 4)) {
292 return VM_1_10_SV48;
295 if (!strncmp(satp_mode_str, "sv57", 4)) {
296 return VM_1_10_SV57;
299 if (!strncmp(satp_mode_str, "sv64", 4)) {
300 return VM_1_10_SV64;
303 g_assert_not_reached();
306 uint8_t satp_mode_max_from_map(uint32_t map)
308 /* map here has at least one bit set, so no problem with clz */
309 return 31 - __builtin_clz(map);
312 const char *satp_mode_str(uint8_t satp_mode, bool is_32_bit)
314 if (is_32_bit) {
315 switch (satp_mode) {
316 case VM_1_10_SV32:
317 return "sv32";
318 case VM_1_10_MBARE:
319 return "none";
321 } else {
322 switch (satp_mode) {
323 case VM_1_10_SV64:
324 return "sv64";
325 case VM_1_10_SV57:
326 return "sv57";
327 case VM_1_10_SV48:
328 return "sv48";
329 case VM_1_10_SV39:
330 return "sv39";
331 case VM_1_10_MBARE:
332 return "none";
336 g_assert_not_reached();
339 static void set_satp_mode_max_supported(RISCVCPU *cpu,
340 uint8_t satp_mode)
342 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
343 const bool *valid_vm = rv32 ? valid_vm_1_10_32 : valid_vm_1_10_64;
345 for (int i = 0; i <= satp_mode; ++i) {
346 if (valid_vm[i]) {
347 cpu->cfg.satp_mode.supported |= (1 << i);
352 /* Set the satp mode to the max supported */
353 static void set_satp_mode_default_map(RISCVCPU *cpu)
355 cpu->cfg.satp_mode.map = cpu->cfg.satp_mode.supported;
357 #endif
359 static void riscv_any_cpu_init(Object *obj)
361 RISCVCPU *cpu = RISCV_CPU(obj);
362 CPURISCVState *env = &cpu->env;
363 #if defined(TARGET_RISCV32)
364 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
365 #elif defined(TARGET_RISCV64)
366 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVU);
367 #endif
369 #ifndef CONFIG_USER_ONLY
370 set_satp_mode_max_supported(RISCV_CPU(obj),
371 riscv_cpu_mxl(&RISCV_CPU(obj)->env) == MXL_RV32 ?
372 VM_1_10_SV32 : VM_1_10_SV57);
373 #endif
375 env->priv_ver = PRIV_VERSION_LATEST;
377 /* inherited from parent obj via riscv_cpu_init() */
378 cpu->cfg.ext_ifencei = true;
379 cpu->cfg.ext_icsr = true;
380 cpu->cfg.mmu = true;
381 cpu->cfg.pmp = true;
384 #if defined(TARGET_RISCV64)
385 static void rv64_base_cpu_init(Object *obj)
387 CPURISCVState *env = &RISCV_CPU(obj)->env;
388 /* We set this in the realise function */
389 set_misa(env, MXL_RV64, 0);
390 riscv_cpu_add_user_properties(obj);
391 /* Set latest version of privileged specification */
392 env->priv_ver = PRIV_VERSION_LATEST;
393 #ifndef CONFIG_USER_ONLY
394 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
395 #endif
398 static void rv64_sifive_u_cpu_init(Object *obj)
400 RISCVCPU *cpu = RISCV_CPU(obj);
401 CPURISCVState *env = &cpu->env;
402 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
403 env->priv_ver = PRIV_VERSION_1_10_0;
404 #ifndef CONFIG_USER_ONLY
405 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV39);
406 #endif
408 /* inherited from parent obj via riscv_cpu_init() */
409 cpu->cfg.ext_ifencei = true;
410 cpu->cfg.ext_icsr = true;
411 cpu->cfg.mmu = true;
412 cpu->cfg.pmp = true;
415 static void rv64_sifive_e_cpu_init(Object *obj)
417 CPURISCVState *env = &RISCV_CPU(obj)->env;
418 RISCVCPU *cpu = RISCV_CPU(obj);
420 set_misa(env, MXL_RV64, RVI | RVM | RVA | RVC | RVU);
421 env->priv_ver = PRIV_VERSION_1_10_0;
422 #ifndef CONFIG_USER_ONLY
423 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
424 #endif
426 /* inherited from parent obj via riscv_cpu_init() */
427 cpu->cfg.ext_ifencei = true;
428 cpu->cfg.ext_icsr = true;
429 cpu->cfg.pmp = true;
432 static void rv64_thead_c906_cpu_init(Object *obj)
434 CPURISCVState *env = &RISCV_CPU(obj)->env;
435 RISCVCPU *cpu = RISCV_CPU(obj);
437 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
438 env->priv_ver = PRIV_VERSION_1_11_0;
440 cpu->cfg.ext_zfa = true;
441 cpu->cfg.ext_zfh = true;
442 cpu->cfg.mmu = true;
443 cpu->cfg.ext_xtheadba = true;
444 cpu->cfg.ext_xtheadbb = true;
445 cpu->cfg.ext_xtheadbs = true;
446 cpu->cfg.ext_xtheadcmo = true;
447 cpu->cfg.ext_xtheadcondmov = true;
448 cpu->cfg.ext_xtheadfmemidx = true;
449 cpu->cfg.ext_xtheadmac = true;
450 cpu->cfg.ext_xtheadmemidx = true;
451 cpu->cfg.ext_xtheadmempair = true;
452 cpu->cfg.ext_xtheadsync = true;
454 cpu->cfg.mvendorid = THEAD_VENDOR_ID;
455 #ifndef CONFIG_USER_ONLY
456 set_satp_mode_max_supported(cpu, VM_1_10_SV39);
457 #endif
459 /* inherited from parent obj via riscv_cpu_init() */
460 cpu->cfg.pmp = true;
463 static void rv64_veyron_v1_cpu_init(Object *obj)
465 CPURISCVState *env = &RISCV_CPU(obj)->env;
466 RISCVCPU *cpu = RISCV_CPU(obj);
468 set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU | RVH);
469 env->priv_ver = PRIV_VERSION_1_12_0;
471 /* Enable ISA extensions */
472 cpu->cfg.mmu = true;
473 cpu->cfg.ext_ifencei = true;
474 cpu->cfg.ext_icsr = true;
475 cpu->cfg.pmp = true;
476 cpu->cfg.ext_icbom = true;
477 cpu->cfg.cbom_blocksize = 64;
478 cpu->cfg.cboz_blocksize = 64;
479 cpu->cfg.ext_icboz = true;
480 cpu->cfg.ext_smaia = true;
481 cpu->cfg.ext_ssaia = true;
482 cpu->cfg.ext_sscofpmf = true;
483 cpu->cfg.ext_sstc = true;
484 cpu->cfg.ext_svinval = true;
485 cpu->cfg.ext_svnapot = true;
486 cpu->cfg.ext_svpbmt = true;
487 cpu->cfg.ext_smstateen = true;
488 cpu->cfg.ext_zba = true;
489 cpu->cfg.ext_zbb = true;
490 cpu->cfg.ext_zbc = true;
491 cpu->cfg.ext_zbs = true;
492 cpu->cfg.ext_XVentanaCondOps = true;
494 cpu->cfg.mvendorid = VEYRON_V1_MVENDORID;
495 cpu->cfg.marchid = VEYRON_V1_MARCHID;
496 cpu->cfg.mimpid = VEYRON_V1_MIMPID;
498 #ifndef CONFIG_USER_ONLY
499 set_satp_mode_max_supported(cpu, VM_1_10_SV48);
500 #endif
503 static void rv128_base_cpu_init(Object *obj)
505 if (qemu_tcg_mttcg_enabled()) {
506 /* Missing 128-bit aligned atomics */
507 error_report("128-bit RISC-V currently does not work with Multi "
508 "Threaded TCG. Please use: -accel tcg,thread=single");
509 exit(EXIT_FAILURE);
511 CPURISCVState *env = &RISCV_CPU(obj)->env;
512 /* We set this in the realise function */
513 set_misa(env, MXL_RV128, 0);
514 riscv_cpu_add_user_properties(obj);
515 /* Set latest version of privileged specification */
516 env->priv_ver = PRIV_VERSION_LATEST;
517 #ifndef CONFIG_USER_ONLY
518 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV57);
519 #endif
521 #else
522 static void rv32_base_cpu_init(Object *obj)
524 CPURISCVState *env = &RISCV_CPU(obj)->env;
525 /* We set this in the realise function */
526 set_misa(env, MXL_RV32, 0);
527 riscv_cpu_add_user_properties(obj);
528 /* Set latest version of privileged specification */
529 env->priv_ver = PRIV_VERSION_LATEST;
530 #ifndef CONFIG_USER_ONLY
531 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
532 #endif
535 static void rv32_sifive_u_cpu_init(Object *obj)
537 RISCVCPU *cpu = RISCV_CPU(obj);
538 CPURISCVState *env = &cpu->env;
539 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
540 env->priv_ver = PRIV_VERSION_1_10_0;
541 #ifndef CONFIG_USER_ONLY
542 set_satp_mode_max_supported(RISCV_CPU(obj), VM_1_10_SV32);
543 #endif
545 /* inherited from parent obj via riscv_cpu_init() */
546 cpu->cfg.ext_ifencei = true;
547 cpu->cfg.ext_icsr = true;
548 cpu->cfg.mmu = true;
549 cpu->cfg.pmp = true;
552 static void rv32_sifive_e_cpu_init(Object *obj)
554 CPURISCVState *env = &RISCV_CPU(obj)->env;
555 RISCVCPU *cpu = RISCV_CPU(obj);
557 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVC | RVU);
558 env->priv_ver = PRIV_VERSION_1_10_0;
559 #ifndef CONFIG_USER_ONLY
560 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
561 #endif
563 /* inherited from parent obj via riscv_cpu_init() */
564 cpu->cfg.ext_ifencei = true;
565 cpu->cfg.ext_icsr = true;
566 cpu->cfg.pmp = true;
569 static void rv32_ibex_cpu_init(Object *obj)
571 CPURISCVState *env = &RISCV_CPU(obj)->env;
572 RISCVCPU *cpu = RISCV_CPU(obj);
574 set_misa(env, MXL_RV32, RVI | RVM | RVC | RVU);
575 env->priv_ver = PRIV_VERSION_1_11_0;
576 #ifndef CONFIG_USER_ONLY
577 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
578 #endif
579 cpu->cfg.epmp = true;
581 /* inherited from parent obj via riscv_cpu_init() */
582 cpu->cfg.ext_ifencei = true;
583 cpu->cfg.ext_icsr = true;
584 cpu->cfg.pmp = true;
587 static void rv32_imafcu_nommu_cpu_init(Object *obj)
589 CPURISCVState *env = &RISCV_CPU(obj)->env;
590 RISCVCPU *cpu = RISCV_CPU(obj);
592 set_misa(env, MXL_RV32, RVI | RVM | RVA | RVF | RVC | RVU);
593 env->priv_ver = PRIV_VERSION_1_10_0;
594 #ifndef CONFIG_USER_ONLY
595 set_satp_mode_max_supported(cpu, VM_1_10_MBARE);
596 #endif
598 /* inherited from parent obj via riscv_cpu_init() */
599 cpu->cfg.ext_ifencei = true;
600 cpu->cfg.ext_icsr = true;
601 cpu->cfg.pmp = true;
603 #endif
605 #if defined(CONFIG_KVM)
606 static void riscv_host_cpu_init(Object *obj)
608 CPURISCVState *env = &RISCV_CPU(obj)->env;
609 #if defined(TARGET_RISCV32)
610 set_misa(env, MXL_RV32, 0);
611 #elif defined(TARGET_RISCV64)
612 set_misa(env, MXL_RV64, 0);
613 #endif
614 riscv_cpu_add_user_properties(obj);
616 #endif /* CONFIG_KVM */
618 static ObjectClass *riscv_cpu_class_by_name(const char *cpu_model)
620 ObjectClass *oc;
621 char *typename;
622 char **cpuname;
624 cpuname = g_strsplit(cpu_model, ",", 1);
625 typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
626 oc = object_class_by_name(typename);
627 g_strfreev(cpuname);
628 g_free(typename);
629 if (!oc || !object_class_dynamic_cast(oc, TYPE_RISCV_CPU) ||
630 object_class_is_abstract(oc)) {
631 return NULL;
633 return oc;
636 static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
638 RISCVCPU *cpu = RISCV_CPU(cs);
639 CPURISCVState *env = &cpu->env;
640 int i, j;
641 uint8_t *p;
643 #if !defined(CONFIG_USER_ONLY)
644 if (riscv_has_ext(env, RVH)) {
645 qemu_fprintf(f, " %s %d\n", "V = ", env->virt_enabled);
647 #endif
648 qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
649 #ifndef CONFIG_USER_ONLY
651 static const int dump_csrs[] = {
652 CSR_MHARTID,
653 CSR_MSTATUS,
654 CSR_MSTATUSH,
656 * CSR_SSTATUS is intentionally omitted here as its value
657 * can be figured out by looking at CSR_MSTATUS
659 CSR_HSTATUS,
660 CSR_VSSTATUS,
661 CSR_MIP,
662 CSR_MIE,
663 CSR_MIDELEG,
664 CSR_HIDELEG,
665 CSR_MEDELEG,
666 CSR_HEDELEG,
667 CSR_MTVEC,
668 CSR_STVEC,
669 CSR_VSTVEC,
670 CSR_MEPC,
671 CSR_SEPC,
672 CSR_VSEPC,
673 CSR_MCAUSE,
674 CSR_SCAUSE,
675 CSR_VSCAUSE,
676 CSR_MTVAL,
677 CSR_STVAL,
678 CSR_HTVAL,
679 CSR_MTVAL2,
680 CSR_MSCRATCH,
681 CSR_SSCRATCH,
682 CSR_SATP,
683 CSR_MMTE,
684 CSR_UPMBASE,
685 CSR_UPMMASK,
686 CSR_SPMBASE,
687 CSR_SPMMASK,
688 CSR_MPMBASE,
689 CSR_MPMMASK,
692 for (int i = 0; i < ARRAY_SIZE(dump_csrs); ++i) {
693 int csrno = dump_csrs[i];
694 target_ulong val = 0;
695 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
698 * Rely on the smode, hmode, etc, predicates within csr.c
699 * to do the filtering of the registers that are present.
701 if (res == RISCV_EXCP_NONE) {
702 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
703 csr_ops[csrno].name, val);
707 #endif
709 for (i = 0; i < 32; i++) {
710 qemu_fprintf(f, " %-8s " TARGET_FMT_lx,
711 riscv_int_regnames[i], env->gpr[i]);
712 if ((i & 3) == 3) {
713 qemu_fprintf(f, "\n");
716 if (flags & CPU_DUMP_FPU) {
717 for (i = 0; i < 32; i++) {
718 qemu_fprintf(f, " %-8s %016" PRIx64,
719 riscv_fpr_regnames[i], env->fpr[i]);
720 if ((i & 3) == 3) {
721 qemu_fprintf(f, "\n");
725 if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
726 static const int dump_rvv_csrs[] = {
727 CSR_VSTART,
728 CSR_VXSAT,
729 CSR_VXRM,
730 CSR_VCSR,
731 CSR_VL,
732 CSR_VTYPE,
733 CSR_VLENB,
735 for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
736 int csrno = dump_rvv_csrs[i];
737 target_ulong val = 0;
738 RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
741 * Rely on the smode, hmode, etc, predicates within csr.c
742 * to do the filtering of the registers that are present.
744 if (res == RISCV_EXCP_NONE) {
745 qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
746 csr_ops[csrno].name, val);
749 uint16_t vlenb = cpu->cfg.vlen >> 3;
751 for (i = 0; i < 32; i++) {
752 qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
753 p = (uint8_t *)env->vreg;
754 for (j = vlenb - 1 ; j >= 0; j--) {
755 qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
757 qemu_fprintf(f, "\n");
762 static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
764 RISCVCPU *cpu = RISCV_CPU(cs);
765 CPURISCVState *env = &cpu->env;
767 if (env->xl == MXL_RV32) {
768 env->pc = (int32_t)value;
769 } else {
770 env->pc = value;
774 static vaddr riscv_cpu_get_pc(CPUState *cs)
776 RISCVCPU *cpu = RISCV_CPU(cs);
777 CPURISCVState *env = &cpu->env;
779 /* Match cpu_get_tb_cpu_state. */
780 if (env->xl == MXL_RV32) {
781 return env->pc & UINT32_MAX;
783 return env->pc;
786 static void riscv_cpu_synchronize_from_tb(CPUState *cs,
787 const TranslationBlock *tb)
789 if (!(tb_cflags(tb) & CF_PCREL)) {
790 RISCVCPU *cpu = RISCV_CPU(cs);
791 CPURISCVState *env = &cpu->env;
792 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
794 tcg_debug_assert(!(cs->tcg_cflags & CF_PCREL));
796 if (xl == MXL_RV32) {
797 env->pc = (int32_t) tb->pc;
798 } else {
799 env->pc = tb->pc;
804 static bool riscv_cpu_has_work(CPUState *cs)
806 #ifndef CONFIG_USER_ONLY
807 RISCVCPU *cpu = RISCV_CPU(cs);
808 CPURISCVState *env = &cpu->env;
810 * Definition of the WFI instruction requires it to ignore the privilege
811 * mode and delegation registers, but respect individual enables
813 return riscv_cpu_all_pending(env) != 0;
814 #else
815 return true;
816 #endif
819 static void riscv_restore_state_to_opc(CPUState *cs,
820 const TranslationBlock *tb,
821 const uint64_t *data)
823 RISCVCPU *cpu = RISCV_CPU(cs);
824 CPURISCVState *env = &cpu->env;
825 RISCVMXL xl = FIELD_EX32(tb->flags, TB_FLAGS, XL);
826 target_ulong pc;
828 if (tb_cflags(tb) & CF_PCREL) {
829 pc = (env->pc & TARGET_PAGE_MASK) | data[0];
830 } else {
831 pc = data[0];
834 if (xl == MXL_RV32) {
835 env->pc = (int32_t)pc;
836 } else {
837 env->pc = pc;
839 env->bins = data[1];
842 static void riscv_cpu_reset_hold(Object *obj)
844 #ifndef CONFIG_USER_ONLY
845 uint8_t iprio;
846 int i, irq, rdzero;
847 #endif
848 CPUState *cs = CPU(obj);
849 RISCVCPU *cpu = RISCV_CPU(cs);
850 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
851 CPURISCVState *env = &cpu->env;
853 if (mcc->parent_phases.hold) {
854 mcc->parent_phases.hold(obj);
856 #ifndef CONFIG_USER_ONLY
857 env->misa_mxl = env->misa_mxl_max;
858 env->priv = PRV_M;
859 env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
860 if (env->misa_mxl > MXL_RV32) {
862 * The reset status of SXL/UXL is undefined, but mstatus is WARL
863 * and we must ensure that the value after init is valid for read.
865 env->mstatus = set_field(env->mstatus, MSTATUS64_SXL, env->misa_mxl);
866 env->mstatus = set_field(env->mstatus, MSTATUS64_UXL, env->misa_mxl);
867 if (riscv_has_ext(env, RVH)) {
868 env->vsstatus = set_field(env->vsstatus,
869 MSTATUS64_SXL, env->misa_mxl);
870 env->vsstatus = set_field(env->vsstatus,
871 MSTATUS64_UXL, env->misa_mxl);
872 env->mstatus_hs = set_field(env->mstatus_hs,
873 MSTATUS64_SXL, env->misa_mxl);
874 env->mstatus_hs = set_field(env->mstatus_hs,
875 MSTATUS64_UXL, env->misa_mxl);
878 env->mcause = 0;
879 env->miclaim = MIP_SGEIP;
880 env->pc = env->resetvec;
881 env->bins = 0;
882 env->two_stage_lookup = false;
884 env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) |
885 (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0);
886 env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) |
887 (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0);
889 /* Initialized default priorities of local interrupts. */
890 for (i = 0; i < ARRAY_SIZE(env->miprio); i++) {
891 iprio = riscv_cpu_default_priority(i);
892 env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio;
893 env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio;
894 env->hviprio[i] = 0;
896 i = 0;
897 while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) {
898 if (!rdzero) {
899 env->hviprio[irq] = env->miprio[irq];
901 i++;
903 /* mmte is supposed to have pm.current hardwired to 1 */
904 env->mmte |= (EXT_STATUS_INITIAL | MMTE_M_PM_CURRENT);
905 #endif
906 env->xl = riscv_cpu_mxl(env);
907 riscv_cpu_update_mask(env);
908 cs->exception_index = RISCV_EXCP_NONE;
909 env->load_res = -1;
910 set_default_nan_mode(1, &env->fp_status);
912 #ifndef CONFIG_USER_ONLY
913 if (cpu->cfg.debug) {
914 riscv_trigger_init(env);
917 if (kvm_enabled()) {
918 kvm_riscv_reset_vcpu(cpu);
920 #endif
923 static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
925 RISCVCPU *cpu = RISCV_CPU(s);
926 CPURISCVState *env = &cpu->env;
927 info->target_info = &cpu->cfg;
929 switch (env->xl) {
930 case MXL_RV32:
931 info->print_insn = print_insn_riscv32;
932 break;
933 case MXL_RV64:
934 info->print_insn = print_insn_riscv64;
935 break;
936 case MXL_RV128:
937 info->print_insn = print_insn_riscv128;
938 break;
939 default:
940 g_assert_not_reached();
944 static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg,
945 Error **errp)
947 int vext_version = VEXT_VERSION_1_00_0;
949 if (!is_power_of_2(cfg->vlen)) {
950 error_setg(errp, "Vector extension VLEN must be power of 2");
951 return;
953 if (cfg->vlen > RV_VLEN_MAX || cfg->vlen < 128) {
954 error_setg(errp,
955 "Vector extension implementation only supports VLEN "
956 "in the range [128, %d]", RV_VLEN_MAX);
957 return;
959 if (!is_power_of_2(cfg->elen)) {
960 error_setg(errp, "Vector extension ELEN must be power of 2");
961 return;
963 if (cfg->elen > 64 || cfg->elen < 8) {
964 error_setg(errp,
965 "Vector extension implementation only supports ELEN "
966 "in the range [8, 64]");
967 return;
969 if (cfg->vext_spec) {
970 if (!g_strcmp0(cfg->vext_spec, "v1.0")) {
971 vext_version = VEXT_VERSION_1_00_0;
972 } else {
973 error_setg(errp, "Unsupported vector spec version '%s'",
974 cfg->vext_spec);
975 return;
977 } else {
978 qemu_log("vector version is not specified, "
979 "use the default value v1.0\n");
981 env->vext_ver = vext_version;
984 static void riscv_cpu_validate_priv_spec(RISCVCPU *cpu, Error **errp)
986 CPURISCVState *env = &cpu->env;
987 int priv_version = -1;
989 if (cpu->cfg.priv_spec) {
990 if (!g_strcmp0(cpu->cfg.priv_spec, "v1.12.0")) {
991 priv_version = PRIV_VERSION_1_12_0;
992 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) {
993 priv_version = PRIV_VERSION_1_11_0;
994 } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) {
995 priv_version = PRIV_VERSION_1_10_0;
996 } else {
997 error_setg(errp,
998 "Unsupported privilege spec version '%s'",
999 cpu->cfg.priv_spec);
1000 return;
1003 env->priv_ver = priv_version;
1007 static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu)
1009 CPURISCVState *env = &cpu->env;
1010 int i;
1012 /* Force disable extensions if priv spec version does not match */
1013 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
1014 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i]) &&
1015 (env->priv_ver < isa_edata_arr[i].min_version)) {
1016 isa_ext_update_enabled(cpu, &isa_edata_arr[i], false);
1017 #ifndef CONFIG_USER_ONLY
1018 warn_report("disabling %s extension for hart 0x" TARGET_FMT_lx
1019 " because privilege spec version does not match",
1020 isa_edata_arr[i].name, env->mhartid);
1021 #else
1022 warn_report("disabling %s extension because "
1023 "privilege spec version does not match",
1024 isa_edata_arr[i].name);
1025 #endif
1030 static void riscv_cpu_validate_misa_mxl(RISCVCPU *cpu, Error **errp)
1032 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu);
1033 CPUClass *cc = CPU_CLASS(mcc);
1034 CPURISCVState *env = &cpu->env;
1036 /* Validate that MISA_MXL is set properly. */
1037 switch (env->misa_mxl_max) {
1038 #ifdef TARGET_RISCV64
1039 case MXL_RV64:
1040 case MXL_RV128:
1041 cc->gdb_core_xml_file = "riscv-64bit-cpu.xml";
1042 break;
1043 #endif
1044 case MXL_RV32:
1045 cc->gdb_core_xml_file = "riscv-32bit-cpu.xml";
1046 break;
1047 default:
1048 g_assert_not_reached();
1051 if (env->misa_mxl_max != env->misa_mxl) {
1052 error_setg(errp, "misa_mxl_max must be equal to misa_mxl");
1053 return;
1058 * Check consistency between chosen extensions while setting
1059 * cpu->cfg accordingly.
1061 void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
1063 CPURISCVState *env = &cpu->env;
1064 Error *local_err = NULL;
1066 /* Do some ISA extension error checking */
1067 if (riscv_has_ext(env, RVG) &&
1068 !(riscv_has_ext(env, RVI) && riscv_has_ext(env, RVM) &&
1069 riscv_has_ext(env, RVA) && riscv_has_ext(env, RVF) &&
1070 riscv_has_ext(env, RVD) &&
1071 cpu->cfg.ext_icsr && cpu->cfg.ext_ifencei)) {
1072 warn_report("Setting G will also set IMAFD_Zicsr_Zifencei");
1073 cpu->cfg.ext_icsr = true;
1074 cpu->cfg.ext_ifencei = true;
1076 env->misa_ext |= RVI | RVM | RVA | RVF | RVD;
1077 env->misa_ext_mask |= RVI | RVM | RVA | RVF | RVD;
1080 if (riscv_has_ext(env, RVI) && riscv_has_ext(env, RVE)) {
1081 error_setg(errp,
1082 "I and E extensions are incompatible");
1083 return;
1086 if (!riscv_has_ext(env, RVI) && !riscv_has_ext(env, RVE)) {
1087 error_setg(errp,
1088 "Either I or E extension must be set");
1089 return;
1092 if (riscv_has_ext(env, RVS) && !riscv_has_ext(env, RVU)) {
1093 error_setg(errp,
1094 "Setting S extension without U extension is illegal");
1095 return;
1098 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVI)) {
1099 error_setg(errp,
1100 "H depends on an I base integer ISA with 32 x registers");
1101 return;
1104 if (riscv_has_ext(env, RVH) && !riscv_has_ext(env, RVS)) {
1105 error_setg(errp, "H extension implicitly requires S-mode");
1106 return;
1109 if (riscv_has_ext(env, RVF) && !cpu->cfg.ext_icsr) {
1110 error_setg(errp, "F extension requires Zicsr");
1111 return;
1114 if ((cpu->cfg.ext_zawrs) && !riscv_has_ext(env, RVA)) {
1115 error_setg(errp, "Zawrs extension requires A extension");
1116 return;
1119 if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
1120 error_setg(errp, "Zfa extension requires F extension");
1121 return;
1124 if (cpu->cfg.ext_zfh) {
1125 cpu->cfg.ext_zfhmin = true;
1128 if (cpu->cfg.ext_zfhmin && !riscv_has_ext(env, RVF)) {
1129 error_setg(errp, "Zfh/Zfhmin extensions require F extension");
1130 return;
1133 if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
1134 error_setg(errp, "Zfbfmin extension depends on F extension");
1135 return;
1138 if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
1139 error_setg(errp, "D extension requires F extension");
1140 return;
1143 if (riscv_has_ext(env, RVV)) {
1144 riscv_cpu_validate_v(env, &cpu->cfg, &local_err);
1145 if (local_err != NULL) {
1146 error_propagate(errp, local_err);
1147 return;
1150 /* The V vector extension depends on the Zve64d extension */
1151 cpu->cfg.ext_zve64d = true;
1154 /* The Zve64d extension depends on the Zve64f extension */
1155 if (cpu->cfg.ext_zve64d) {
1156 cpu->cfg.ext_zve64f = true;
1159 /* The Zve64f extension depends on the Zve32f extension */
1160 if (cpu->cfg.ext_zve64f) {
1161 cpu->cfg.ext_zve32f = true;
1164 if (cpu->cfg.ext_zve64d && !riscv_has_ext(env, RVD)) {
1165 error_setg(errp, "Zve64d/V extensions require D extension");
1166 return;
1169 if (cpu->cfg.ext_zve32f && !riscv_has_ext(env, RVF)) {
1170 error_setg(errp, "Zve32f/Zve64f extensions require F extension");
1171 return;
1174 if (cpu->cfg.ext_zvfh) {
1175 cpu->cfg.ext_zvfhmin = true;
1178 if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) {
1179 error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension");
1180 return;
1183 if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) {
1184 error_setg(errp, "Zvfh extensions requires Zfhmin extension");
1185 return;
1188 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) {
1189 error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension");
1190 return;
1193 if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
1194 error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
1195 return;
1198 if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
1199 error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
1200 return;
1203 /* Set the ISA extensions, checks should have happened above */
1204 if (cpu->cfg.ext_zhinx) {
1205 cpu->cfg.ext_zhinxmin = true;
1208 if ((cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) && !cpu->cfg.ext_zfinx) {
1209 error_setg(errp, "Zdinx/Zhinx/Zhinxmin extensions require Zfinx");
1210 return;
1213 if (cpu->cfg.ext_zfinx) {
1214 if (!cpu->cfg.ext_icsr) {
1215 error_setg(errp, "Zfinx extension requires Zicsr");
1216 return;
1218 if (riscv_has_ext(env, RVF)) {
1219 error_setg(errp,
1220 "Zfinx cannot be supported together with F extension");
1221 return;
1225 if (cpu->cfg.ext_zce) {
1226 cpu->cfg.ext_zca = true;
1227 cpu->cfg.ext_zcb = true;
1228 cpu->cfg.ext_zcmp = true;
1229 cpu->cfg.ext_zcmt = true;
1230 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
1231 cpu->cfg.ext_zcf = true;
1235 /* zca, zcd and zcf has a PRIV 1.12.0 restriction */
1236 if (riscv_has_ext(env, RVC) && env->priv_ver >= PRIV_VERSION_1_12_0) {
1237 cpu->cfg.ext_zca = true;
1238 if (riscv_has_ext(env, RVF) && env->misa_mxl_max == MXL_RV32) {
1239 cpu->cfg.ext_zcf = true;
1241 if (riscv_has_ext(env, RVD)) {
1242 cpu->cfg.ext_zcd = true;
1246 if (env->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) {
1247 error_setg(errp, "Zcf extension is only relevant to RV32");
1248 return;
1251 if (!riscv_has_ext(env, RVF) && cpu->cfg.ext_zcf) {
1252 error_setg(errp, "Zcf extension requires F extension");
1253 return;
1256 if (!riscv_has_ext(env, RVD) && cpu->cfg.ext_zcd) {
1257 error_setg(errp, "Zcd extension requires D extension");
1258 return;
1261 if ((cpu->cfg.ext_zcf || cpu->cfg.ext_zcd || cpu->cfg.ext_zcb ||
1262 cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt) && !cpu->cfg.ext_zca) {
1263 error_setg(errp, "Zcf/Zcd/Zcb/Zcmp/Zcmt extensions require Zca "
1264 "extension");
1265 return;
1268 if (cpu->cfg.ext_zcd && (cpu->cfg.ext_zcmp || cpu->cfg.ext_zcmt)) {
1269 error_setg(errp, "Zcmp/Zcmt extensions are incompatible with "
1270 "Zcd extension");
1271 return;
1274 if (cpu->cfg.ext_zcmt && !cpu->cfg.ext_icsr) {
1275 error_setg(errp, "Zcmt extension requires Zicsr extension");
1276 return;
1280 * In principle Zve*x would also suffice here, were they supported
1281 * in qemu
1283 if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
1284 !cpu->cfg.ext_zve32f) {
1285 error_setg(errp,
1286 "Vector crypto extensions require V or Zve* extensions");
1287 return;
1290 if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) {
1291 error_setg(
1292 errp,
1293 "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
1294 return;
1297 if (cpu->cfg.ext_zk) {
1298 cpu->cfg.ext_zkn = true;
1299 cpu->cfg.ext_zkr = true;
1300 cpu->cfg.ext_zkt = true;
1303 if (cpu->cfg.ext_zkn) {
1304 cpu->cfg.ext_zbkb = true;
1305 cpu->cfg.ext_zbkc = true;
1306 cpu->cfg.ext_zbkx = true;
1307 cpu->cfg.ext_zkne = true;
1308 cpu->cfg.ext_zknd = true;
1309 cpu->cfg.ext_zknh = true;
1312 if (cpu->cfg.ext_zks) {
1313 cpu->cfg.ext_zbkb = true;
1314 cpu->cfg.ext_zbkc = true;
1315 cpu->cfg.ext_zbkx = true;
1316 cpu->cfg.ext_zksed = true;
1317 cpu->cfg.ext_zksh = true;
1321 * Disable isa extensions based on priv spec after we
1322 * validated and set everything we need.
1324 riscv_cpu_disable_priv_spec_isa_exts(cpu);
1327 #ifndef CONFIG_USER_ONLY
1328 static void riscv_cpu_satp_mode_finalize(RISCVCPU *cpu, Error **errp)
1330 bool rv32 = riscv_cpu_mxl(&cpu->env) == MXL_RV32;
1331 uint8_t satp_mode_map_max;
1332 uint8_t satp_mode_supported_max =
1333 satp_mode_max_from_map(cpu->cfg.satp_mode.supported);
1335 if (cpu->cfg.satp_mode.map == 0) {
1336 if (cpu->cfg.satp_mode.init == 0) {
1337 /* If unset by the user, we fallback to the default satp mode. */
1338 set_satp_mode_default_map(cpu);
1339 } else {
1341 * Find the lowest level that was disabled and then enable the
1342 * first valid level below which can be found in
1343 * valid_vm_1_10_32/64.
1345 for (int i = 1; i < 16; ++i) {
1346 if ((cpu->cfg.satp_mode.init & (1 << i)) &&
1347 (cpu->cfg.satp_mode.supported & (1 << i))) {
1348 for (int j = i - 1; j >= 0; --j) {
1349 if (cpu->cfg.satp_mode.supported & (1 << j)) {
1350 cpu->cfg.satp_mode.map |= (1 << j);
1351 break;
1354 break;
1360 satp_mode_map_max = satp_mode_max_from_map(cpu->cfg.satp_mode.map);
1362 /* Make sure the user asked for a supported configuration (HW and qemu) */
1363 if (satp_mode_map_max > satp_mode_supported_max) {
1364 error_setg(errp, "satp_mode %s is higher than hw max capability %s",
1365 satp_mode_str(satp_mode_map_max, rv32),
1366 satp_mode_str(satp_mode_supported_max, rv32));
1367 return;
1371 * Make sure the user did not ask for an invalid configuration as per
1372 * the specification.
1374 if (!rv32) {
1375 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1376 if (!(cpu->cfg.satp_mode.map & (1 << i)) &&
1377 (cpu->cfg.satp_mode.init & (1 << i)) &&
1378 (cpu->cfg.satp_mode.supported & (1 << i))) {
1379 error_setg(errp, "cannot disable %s satp mode if %s "
1380 "is enabled", satp_mode_str(i, false),
1381 satp_mode_str(satp_mode_map_max, false));
1382 return;
1387 /* Finally expand the map so that all valid modes are set */
1388 for (int i = satp_mode_map_max - 1; i >= 0; --i) {
1389 if (cpu->cfg.satp_mode.supported & (1 << i)) {
1390 cpu->cfg.satp_mode.map |= (1 << i);
1394 #endif
1396 static void riscv_cpu_finalize_features(RISCVCPU *cpu, Error **errp)
1398 #ifndef CONFIG_USER_ONLY
1399 Error *local_err = NULL;
1401 riscv_cpu_satp_mode_finalize(cpu, &local_err);
1402 if (local_err != NULL) {
1403 error_propagate(errp, local_err);
1404 return;
1406 #endif
1409 static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
1411 if (riscv_has_ext(env, RVH) && env->priv_ver < PRIV_VERSION_1_12_0) {
1412 error_setg(errp, "H extension requires priv spec 1.12.0");
1413 return;
1417 static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp)
1419 RISCVCPU *cpu = RISCV_CPU(dev);
1420 CPURISCVState *env = &cpu->env;
1421 Error *local_err = NULL;
1423 if (object_dynamic_cast(OBJECT(dev), TYPE_RISCV_CPU_HOST)) {
1424 error_setg(errp, "'host' CPU is not compatible with TCG acceleration");
1425 return;
1428 riscv_cpu_validate_misa_mxl(cpu, &local_err);
1429 if (local_err != NULL) {
1430 error_propagate(errp, local_err);
1431 return;
1434 riscv_cpu_validate_priv_spec(cpu, &local_err);
1435 if (local_err != NULL) {
1436 error_propagate(errp, local_err);
1437 return;
1440 riscv_cpu_validate_misa_priv(env, &local_err);
1441 if (local_err != NULL) {
1442 error_propagate(errp, local_err);
1443 return;
1446 if (cpu->cfg.epmp && !cpu->cfg.pmp) {
1448 * Enhanced PMP should only be available
1449 * on harts with PMP support
1451 error_setg(errp, "Invalid configuration: EPMP requires PMP support");
1452 return;
1455 riscv_cpu_validate_set_extensions(cpu, &local_err);
1456 if (local_err != NULL) {
1457 error_propagate(errp, local_err);
1458 return;
1461 #ifndef CONFIG_USER_ONLY
1462 CPU(dev)->tcg_cflags |= CF_PCREL;
1464 if (cpu->cfg.ext_sstc) {
1465 riscv_timer_init(cpu);
1468 if (cpu->cfg.pmu_num) {
1469 if (!riscv_pmu_init(cpu, cpu->cfg.pmu_num) && cpu->cfg.ext_sscofpmf) {
1470 cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1471 riscv_pmu_timer_cb, cpu);
1474 #endif
1477 static void riscv_cpu_realize(DeviceState *dev, Error **errp)
1479 CPUState *cs = CPU(dev);
1480 RISCVCPU *cpu = RISCV_CPU(dev);
1481 RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
1482 Error *local_err = NULL;
1484 cpu_exec_realizefn(cs, &local_err);
1485 if (local_err != NULL) {
1486 error_propagate(errp, local_err);
1487 return;
1490 if (tcg_enabled()) {
1491 riscv_cpu_realize_tcg(dev, &local_err);
1492 if (local_err != NULL) {
1493 error_propagate(errp, local_err);
1494 return;
1498 riscv_cpu_finalize_features(cpu, &local_err);
1499 if (local_err != NULL) {
1500 error_propagate(errp, local_err);
1501 return;
1504 riscv_cpu_register_gdb_regs_for_features(cs);
1506 qemu_init_vcpu(cs);
1507 cpu_reset(cs);
1509 mcc->parent_realize(dev, errp);
1512 #ifndef CONFIG_USER_ONLY
1513 static void cpu_riscv_get_satp(Object *obj, Visitor *v, const char *name,
1514 void *opaque, Error **errp)
1516 RISCVSATPMap *satp_map = opaque;
1517 uint8_t satp = satp_mode_from_str(name);
1518 bool value;
1520 value = satp_map->map & (1 << satp);
1522 visit_type_bool(v, name, &value, errp);
1525 static void cpu_riscv_set_satp(Object *obj, Visitor *v, const char *name,
1526 void *opaque, Error **errp)
1528 RISCVSATPMap *satp_map = opaque;
1529 uint8_t satp = satp_mode_from_str(name);
1530 bool value;
1532 if (!visit_type_bool(v, name, &value, errp)) {
1533 return;
1536 satp_map->map = deposit32(satp_map->map, satp, 1, value);
1537 satp_map->init |= 1 << satp;
1540 static void riscv_add_satp_mode_properties(Object *obj)
1542 RISCVCPU *cpu = RISCV_CPU(obj);
1544 if (cpu->env.misa_mxl == MXL_RV32) {
1545 object_property_add(obj, "sv32", "bool", cpu_riscv_get_satp,
1546 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1547 } else {
1548 object_property_add(obj, "sv39", "bool", cpu_riscv_get_satp,
1549 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1550 object_property_add(obj, "sv48", "bool", cpu_riscv_get_satp,
1551 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1552 object_property_add(obj, "sv57", "bool", cpu_riscv_get_satp,
1553 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1554 object_property_add(obj, "sv64", "bool", cpu_riscv_get_satp,
1555 cpu_riscv_set_satp, NULL, &cpu->cfg.satp_mode);
1559 static void riscv_cpu_set_irq(void *opaque, int irq, int level)
1561 RISCVCPU *cpu = RISCV_CPU(opaque);
1562 CPURISCVState *env = &cpu->env;
1564 if (irq < IRQ_LOCAL_MAX) {
1565 switch (irq) {
1566 case IRQ_U_SOFT:
1567 case IRQ_S_SOFT:
1568 case IRQ_VS_SOFT:
1569 case IRQ_M_SOFT:
1570 case IRQ_U_TIMER:
1571 case IRQ_S_TIMER:
1572 case IRQ_VS_TIMER:
1573 case IRQ_M_TIMER:
1574 case IRQ_U_EXT:
1575 case IRQ_VS_EXT:
1576 case IRQ_M_EXT:
1577 if (kvm_enabled()) {
1578 kvm_riscv_set_irq(cpu, irq, level);
1579 } else {
1580 riscv_cpu_update_mip(env, 1 << irq, BOOL_TO_MASK(level));
1582 break;
1583 case IRQ_S_EXT:
1584 if (kvm_enabled()) {
1585 kvm_riscv_set_irq(cpu, irq, level);
1586 } else {
1587 env->external_seip = level;
1588 riscv_cpu_update_mip(env, 1 << irq,
1589 BOOL_TO_MASK(level | env->software_seip));
1591 break;
1592 default:
1593 g_assert_not_reached();
1595 } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) {
1596 /* Require H-extension for handling guest local interrupts */
1597 if (!riscv_has_ext(env, RVH)) {
1598 g_assert_not_reached();
1601 /* Compute bit position in HGEIP CSR */
1602 irq = irq - IRQ_LOCAL_MAX + 1;
1603 if (env->geilen < irq) {
1604 g_assert_not_reached();
1607 /* Update HGEIP CSR */
1608 env->hgeip &= ~((target_ulong)1 << irq);
1609 if (level) {
1610 env->hgeip |= (target_ulong)1 << irq;
1613 /* Update mip.SGEIP bit */
1614 riscv_cpu_update_mip(env, MIP_SGEIP,
1615 BOOL_TO_MASK(!!(env->hgeie & env->hgeip)));
1616 } else {
1617 g_assert_not_reached();
1620 #endif /* CONFIG_USER_ONLY */
1622 static void riscv_cpu_init(Object *obj)
1624 RISCVCPU *cpu = RISCV_CPU(obj);
1626 cpu_set_cpustate_pointers(cpu);
1628 #ifndef CONFIG_USER_ONLY
1629 qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq,
1630 IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX);
1631 #endif /* CONFIG_USER_ONLY */
1634 typedef struct RISCVCPUMisaExtConfig {
1635 const char *name;
1636 const char *description;
1637 target_ulong misa_bit;
1638 bool enabled;
1639 } RISCVCPUMisaExtConfig;
1641 static void cpu_set_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1642 void *opaque, Error **errp)
1644 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1645 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1646 RISCVCPU *cpu = RISCV_CPU(obj);
1647 CPURISCVState *env = &cpu->env;
1648 bool value;
1650 if (!visit_type_bool(v, name, &value, errp)) {
1651 return;
1654 if (value) {
1655 env->misa_ext |= misa_bit;
1656 env->misa_ext_mask |= misa_bit;
1657 } else {
1658 env->misa_ext &= ~misa_bit;
1659 env->misa_ext_mask &= ~misa_bit;
1663 static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
1664 void *opaque, Error **errp)
1666 const RISCVCPUMisaExtConfig *misa_ext_cfg = opaque;
1667 target_ulong misa_bit = misa_ext_cfg->misa_bit;
1668 RISCVCPU *cpu = RISCV_CPU(obj);
1669 CPURISCVState *env = &cpu->env;
1670 bool value;
1672 value = env->misa_ext & misa_bit;
1674 visit_type_bool(v, name, &value, errp);
1677 typedef struct misa_ext_info {
1678 const char *name;
1679 const char *description;
1680 } MISAExtInfo;
1682 #define MISA_INFO_IDX(_bit) \
1683 __builtin_ctz(_bit)
1685 #define MISA_EXT_INFO(_bit, _propname, _descr) \
1686 [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
1688 static const MISAExtInfo misa_ext_info_arr[] = {
1689 MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
1690 MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
1691 MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
1692 MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
1693 MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
1694 MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
1695 MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
1696 MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
1697 MISA_EXT_INFO(RVU, "u", "User-level instructions"),
1698 MISA_EXT_INFO(RVH, "h", "Hypervisor"),
1699 MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
1700 MISA_EXT_INFO(RVV, "v", "Vector operations"),
1701 MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
1704 static int riscv_validate_misa_info_idx(uint32_t bit)
1706 int idx;
1709 * Our lowest valid input (RVA) is 1 and
1710 * __builtin_ctz() is UB with zero.
1712 g_assert(bit != 0);
1713 idx = MISA_INFO_IDX(bit);
1715 g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
1716 return idx;
1719 const char *riscv_get_misa_ext_name(uint32_t bit)
1721 int idx = riscv_validate_misa_info_idx(bit);
1722 const char *val = misa_ext_info_arr[idx].name;
1724 g_assert(val != NULL);
1725 return val;
1728 const char *riscv_get_misa_ext_description(uint32_t bit)
1730 int idx = riscv_validate_misa_info_idx(bit);
1731 const char *val = misa_ext_info_arr[idx].description;
1733 g_assert(val != NULL);
1734 return val;
1737 #define MISA_CFG(_bit, _enabled) \
1738 {.misa_bit = _bit, .enabled = _enabled}
1740 static RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
1741 MISA_CFG(RVA, true),
1742 MISA_CFG(RVC, true),
1743 MISA_CFG(RVD, true),
1744 MISA_CFG(RVF, true),
1745 MISA_CFG(RVI, true),
1746 MISA_CFG(RVE, false),
1747 MISA_CFG(RVM, true),
1748 MISA_CFG(RVS, true),
1749 MISA_CFG(RVU, true),
1750 MISA_CFG(RVH, true),
1751 MISA_CFG(RVJ, false),
1752 MISA_CFG(RVV, false),
1753 MISA_CFG(RVG, false),
1756 static void riscv_cpu_add_misa_properties(Object *cpu_obj)
1758 int i;
1760 for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
1761 RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
1762 int bit = misa_cfg->misa_bit;
1764 misa_cfg->name = riscv_get_misa_ext_name(bit);
1765 misa_cfg->description = riscv_get_misa_ext_description(bit);
1767 /* Check if KVM already created the property */
1768 if (object_property_find(cpu_obj, misa_cfg->name)) {
1769 continue;
1772 object_property_add(cpu_obj, misa_cfg->name, "bool",
1773 cpu_get_misa_ext_cfg,
1774 cpu_set_misa_ext_cfg,
1775 NULL, (void *)misa_cfg);
1776 object_property_set_description(cpu_obj, misa_cfg->name,
1777 misa_cfg->description);
1778 object_property_set_bool(cpu_obj, misa_cfg->name,
1779 misa_cfg->enabled, NULL);
1783 static Property riscv_cpu_extensions[] = {
1784 /* Defaults for standard extensions */
1785 DEFINE_PROP_UINT8("pmu-num", RISCVCPU, cfg.pmu_num, 16),
1786 DEFINE_PROP_BOOL("sscofpmf", RISCVCPU, cfg.ext_sscofpmf, false),
1787 DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
1788 DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
1789 DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
1790 DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
1791 DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true),
1792 DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
1793 DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
1794 DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
1795 DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false),
1796 DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false),
1797 DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true),
1798 DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true),
1799 DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true),
1801 DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec),
1802 DEFINE_PROP_STRING("vext_spec", RISCVCPU, cfg.vext_spec),
1803 DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128),
1804 DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64),
1806 DEFINE_PROP_BOOL("smstateen", RISCVCPU, cfg.ext_smstateen, false),
1807 DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true),
1808 DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false),
1809 DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false),
1810 DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false),
1812 DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true),
1813 DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true),
1814 DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true),
1815 DEFINE_PROP_BOOL("zbkb", RISCVCPU, cfg.ext_zbkb, false),
1816 DEFINE_PROP_BOOL("zbkc", RISCVCPU, cfg.ext_zbkc, false),
1817 DEFINE_PROP_BOOL("zbkx", RISCVCPU, cfg.ext_zbkx, false),
1818 DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true),
1819 DEFINE_PROP_BOOL("zk", RISCVCPU, cfg.ext_zk, false),
1820 DEFINE_PROP_BOOL("zkn", RISCVCPU, cfg.ext_zkn, false),
1821 DEFINE_PROP_BOOL("zknd", RISCVCPU, cfg.ext_zknd, false),
1822 DEFINE_PROP_BOOL("zkne", RISCVCPU, cfg.ext_zkne, false),
1823 DEFINE_PROP_BOOL("zknh", RISCVCPU, cfg.ext_zknh, false),
1824 DEFINE_PROP_BOOL("zkr", RISCVCPU, cfg.ext_zkr, false),
1825 DEFINE_PROP_BOOL("zks", RISCVCPU, cfg.ext_zks, false),
1826 DEFINE_PROP_BOOL("zksed", RISCVCPU, cfg.ext_zksed, false),
1827 DEFINE_PROP_BOOL("zksh", RISCVCPU, cfg.ext_zksh, false),
1828 DEFINE_PROP_BOOL("zkt", RISCVCPU, cfg.ext_zkt, false),
1830 DEFINE_PROP_BOOL("zdinx", RISCVCPU, cfg.ext_zdinx, false),
1831 DEFINE_PROP_BOOL("zfinx", RISCVCPU, cfg.ext_zfinx, false),
1832 DEFINE_PROP_BOOL("zhinx", RISCVCPU, cfg.ext_zhinx, false),
1833 DEFINE_PROP_BOOL("zhinxmin", RISCVCPU, cfg.ext_zhinxmin, false),
1835 DEFINE_PROP_BOOL("zicbom", RISCVCPU, cfg.ext_icbom, true),
1836 DEFINE_PROP_UINT16("cbom_blocksize", RISCVCPU, cfg.cbom_blocksize, 64),
1837 DEFINE_PROP_BOOL("zicboz", RISCVCPU, cfg.ext_icboz, true),
1838 DEFINE_PROP_UINT16("cboz_blocksize", RISCVCPU, cfg.cboz_blocksize, 64),
1840 DEFINE_PROP_BOOL("zmmul", RISCVCPU, cfg.ext_zmmul, false),
1842 DEFINE_PROP_BOOL("zca", RISCVCPU, cfg.ext_zca, false),
1843 DEFINE_PROP_BOOL("zcb", RISCVCPU, cfg.ext_zcb, false),
1844 DEFINE_PROP_BOOL("zcd", RISCVCPU, cfg.ext_zcd, false),
1845 DEFINE_PROP_BOOL("zce", RISCVCPU, cfg.ext_zce, false),
1846 DEFINE_PROP_BOOL("zcf", RISCVCPU, cfg.ext_zcf, false),
1847 DEFINE_PROP_BOOL("zcmp", RISCVCPU, cfg.ext_zcmp, false),
1848 DEFINE_PROP_BOOL("zcmt", RISCVCPU, cfg.ext_zcmt, false),
1850 /* Vendor-specific custom extensions */
1851 DEFINE_PROP_BOOL("xtheadba", RISCVCPU, cfg.ext_xtheadba, false),
1852 DEFINE_PROP_BOOL("xtheadbb", RISCVCPU, cfg.ext_xtheadbb, false),
1853 DEFINE_PROP_BOOL("xtheadbs", RISCVCPU, cfg.ext_xtheadbs, false),
1854 DEFINE_PROP_BOOL("xtheadcmo", RISCVCPU, cfg.ext_xtheadcmo, false),
1855 DEFINE_PROP_BOOL("xtheadcondmov", RISCVCPU, cfg.ext_xtheadcondmov, false),
1856 DEFINE_PROP_BOOL("xtheadfmemidx", RISCVCPU, cfg.ext_xtheadfmemidx, false),
1857 DEFINE_PROP_BOOL("xtheadfmv", RISCVCPU, cfg.ext_xtheadfmv, false),
1858 DEFINE_PROP_BOOL("xtheadmac", RISCVCPU, cfg.ext_xtheadmac, false),
1859 DEFINE_PROP_BOOL("xtheadmemidx", RISCVCPU, cfg.ext_xtheadmemidx, false),
1860 DEFINE_PROP_BOOL("xtheadmempair", RISCVCPU, cfg.ext_xtheadmempair, false),
1861 DEFINE_PROP_BOOL("xtheadsync", RISCVCPU, cfg.ext_xtheadsync, false),
1862 DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false),
1864 /* These are experimental so mark with 'x-' */
1865 DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false),
1867 /* ePMP 0.9.3 */
1868 DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false),
1869 DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false),
1870 DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false),
1872 DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
1873 DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
1875 DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false),
1876 DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false),
1877 DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
1879 /* Vector cryptography extensions */
1880 DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
1881 DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
1882 DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
1883 DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
1884 DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
1886 DEFINE_PROP_END_OF_LIST(),
1890 #ifndef CONFIG_USER_ONLY
1891 static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
1892 const char *name,
1893 void *opaque, Error **errp)
1895 const char *propname = opaque;
1896 bool value;
1898 if (!visit_type_bool(v, name, &value, errp)) {
1899 return;
1902 if (value) {
1903 error_setg(errp, "extension %s is not available with KVM",
1904 propname);
1907 #endif
1910 * Add CPU properties with user-facing flags.
1912 * This will overwrite existing env->misa_ext values with the
1913 * defaults set via riscv_cpu_add_misa_properties().
1915 static void riscv_cpu_add_user_properties(Object *obj)
1917 Property *prop;
1918 DeviceState *dev = DEVICE(obj);
1920 #ifndef CONFIG_USER_ONLY
1921 riscv_add_satp_mode_properties(obj);
1923 if (kvm_enabled()) {
1924 kvm_riscv_init_user_properties(obj);
1926 #endif
1928 riscv_cpu_add_misa_properties(obj);
1930 for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
1931 #ifndef CONFIG_USER_ONLY
1932 if (kvm_enabled()) {
1933 /* Check if KVM created the property already */
1934 if (object_property_find(obj, prop->name)) {
1935 continue;
1939 * Set the default to disabled for every extension
1940 * unknown to KVM and error out if the user attempts
1941 * to enable any of them.
1943 * We're giving a pass for non-bool properties since they're
1944 * not related to the availability of extensions and can be
1945 * safely ignored as is.
1947 if (prop->info == &qdev_prop_bool) {
1948 object_property_add(obj, prop->name, "bool",
1949 NULL, cpu_set_cfg_unavailable,
1950 NULL, (void *)prop->name);
1951 continue;
1954 #endif
1955 qdev_property_add_static(dev, prop);
1959 static Property riscv_cpu_properties[] = {
1960 DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
1962 #ifndef CONFIG_USER_ONLY
1963 DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
1964 #endif
1966 DEFINE_PROP_BOOL("short-isa-string", RISCVCPU, cfg.short_isa_string, false),
1968 DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false),
1969 DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false),
1972 * write_misa() is marked as experimental for now so mark
1973 * it with -x and default to 'false'.
1975 DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false),
1976 DEFINE_PROP_END_OF_LIST(),
1979 static gchar *riscv_gdb_arch_name(CPUState *cs)
1981 RISCVCPU *cpu = RISCV_CPU(cs);
1982 CPURISCVState *env = &cpu->env;
1984 switch (riscv_cpu_mxl(env)) {
1985 case MXL_RV32:
1986 return g_strdup("riscv:rv32");
1987 case MXL_RV64:
1988 case MXL_RV128:
1989 return g_strdup("riscv:rv64");
1990 default:
1991 g_assert_not_reached();
1995 static const char *riscv_gdb_get_dynamic_xml(CPUState *cs, const char *xmlname)
1997 RISCVCPU *cpu = RISCV_CPU(cs);
1999 if (strcmp(xmlname, "riscv-csr.xml") == 0) {
2000 return cpu->dyn_csr_xml;
2001 } else if (strcmp(xmlname, "riscv-vector.xml") == 0) {
2002 return cpu->dyn_vreg_xml;
2005 return NULL;
2008 #ifndef CONFIG_USER_ONLY
2009 static int64_t riscv_get_arch_id(CPUState *cs)
2011 RISCVCPU *cpu = RISCV_CPU(cs);
2013 return cpu->env.mhartid;
2016 #include "hw/core/sysemu-cpu-ops.h"
2018 static const struct SysemuCPUOps riscv_sysemu_ops = {
2019 .get_phys_page_debug = riscv_cpu_get_phys_page_debug,
2020 .write_elf64_note = riscv_cpu_write_elf64_note,
2021 .write_elf32_note = riscv_cpu_write_elf32_note,
2022 .legacy_vmsd = &vmstate_riscv_cpu,
2024 #endif
2026 #include "hw/core/tcg-cpu-ops.h"
2028 static const struct TCGCPUOps riscv_tcg_ops = {
2029 .initialize = riscv_translate_init,
2030 .synchronize_from_tb = riscv_cpu_synchronize_from_tb,
2031 .restore_state_to_opc = riscv_restore_state_to_opc,
2033 #ifndef CONFIG_USER_ONLY
2034 .tlb_fill = riscv_cpu_tlb_fill,
2035 .cpu_exec_interrupt = riscv_cpu_exec_interrupt,
2036 .do_interrupt = riscv_cpu_do_interrupt,
2037 .do_transaction_failed = riscv_cpu_do_transaction_failed,
2038 .do_unaligned_access = riscv_cpu_do_unaligned_access,
2039 .debug_excp_handler = riscv_cpu_debug_excp_handler,
2040 .debug_check_breakpoint = riscv_cpu_debug_check_breakpoint,
2041 .debug_check_watchpoint = riscv_cpu_debug_check_watchpoint,
2042 #endif /* !CONFIG_USER_ONLY */
2045 static bool riscv_cpu_is_dynamic(Object *cpu_obj)
2047 return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
2050 static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
2051 void *opaque, Error **errp)
2053 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2054 RISCVCPU *cpu = RISCV_CPU(obj);
2055 uint32_t prev_val = cpu->cfg.mvendorid;
2056 uint32_t value;
2058 if (!visit_type_uint32(v, name, &value, errp)) {
2059 return;
2062 if (!dynamic_cpu && prev_val != value) {
2063 error_setg(errp, "Unable to change %s mvendorid (0x%x)",
2064 object_get_typename(obj), prev_val);
2065 return;
2068 cpu->cfg.mvendorid = value;
2071 static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
2072 void *opaque, Error **errp)
2074 bool value = RISCV_CPU(obj)->cfg.mvendorid;
2076 visit_type_bool(v, name, &value, errp);
2079 static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
2080 void *opaque, Error **errp)
2082 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2083 RISCVCPU *cpu = RISCV_CPU(obj);
2084 uint64_t prev_val = cpu->cfg.mimpid;
2085 uint64_t value;
2087 if (!visit_type_uint64(v, name, &value, errp)) {
2088 return;
2091 if (!dynamic_cpu && prev_val != value) {
2092 error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
2093 object_get_typename(obj), prev_val);
2094 return;
2097 cpu->cfg.mimpid = value;
2100 static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
2101 void *opaque, Error **errp)
2103 bool value = RISCV_CPU(obj)->cfg.mimpid;
2105 visit_type_bool(v, name, &value, errp);
2108 static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
2109 void *opaque, Error **errp)
2111 bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
2112 RISCVCPU *cpu = RISCV_CPU(obj);
2113 uint64_t prev_val = cpu->cfg.marchid;
2114 uint64_t value, invalid_val;
2115 uint32_t mxlen = 0;
2117 if (!visit_type_uint64(v, name, &value, errp)) {
2118 return;
2121 if (!dynamic_cpu && prev_val != value) {
2122 error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
2123 object_get_typename(obj), prev_val);
2124 return;
2127 switch (riscv_cpu_mxl(&cpu->env)) {
2128 case MXL_RV32:
2129 mxlen = 32;
2130 break;
2131 case MXL_RV64:
2132 case MXL_RV128:
2133 mxlen = 64;
2134 break;
2135 default:
2136 g_assert_not_reached();
2139 invalid_val = 1LL << (mxlen - 1);
2141 if (value == invalid_val) {
2142 error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
2143 "and the remaining bits zero", mxlen);
2144 return;
2147 cpu->cfg.marchid = value;
2150 static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
2151 void *opaque, Error **errp)
2153 bool value = RISCV_CPU(obj)->cfg.marchid;
2155 visit_type_bool(v, name, &value, errp);
2158 static void riscv_cpu_class_init(ObjectClass *c, void *data)
2160 RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
2161 CPUClass *cc = CPU_CLASS(c);
2162 DeviceClass *dc = DEVICE_CLASS(c);
2163 ResettableClass *rc = RESETTABLE_CLASS(c);
2165 device_class_set_parent_realize(dc, riscv_cpu_realize,
2166 &mcc->parent_realize);
2168 resettable_class_set_parent_phases(rc, NULL, riscv_cpu_reset_hold, NULL,
2169 &mcc->parent_phases);
2171 cc->class_by_name = riscv_cpu_class_by_name;
2172 cc->has_work = riscv_cpu_has_work;
2173 cc->dump_state = riscv_cpu_dump_state;
2174 cc->set_pc = riscv_cpu_set_pc;
2175 cc->get_pc = riscv_cpu_get_pc;
2176 cc->gdb_read_register = riscv_cpu_gdb_read_register;
2177 cc->gdb_write_register = riscv_cpu_gdb_write_register;
2178 cc->gdb_num_core_regs = 33;
2179 cc->gdb_stop_before_watchpoint = true;
2180 cc->disas_set_info = riscv_cpu_disas_set_info;
2181 #ifndef CONFIG_USER_ONLY
2182 cc->sysemu_ops = &riscv_sysemu_ops;
2183 cc->get_arch_id = riscv_get_arch_id;
2184 #endif
2185 cc->gdb_arch_name = riscv_gdb_arch_name;
2186 cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
2187 cc->tcg_ops = &riscv_tcg_ops;
2189 object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
2190 cpu_set_mvendorid, NULL, NULL);
2192 object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
2193 cpu_set_mimpid, NULL, NULL);
2195 object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
2196 cpu_set_marchid, NULL, NULL);
2198 device_class_set_props(dc, riscv_cpu_properties);
2201 static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
2202 int max_str_len)
2204 char *old = *isa_str;
2205 char *new = *isa_str;
2206 int i;
2208 for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
2209 if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
2210 new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
2211 g_free(old);
2212 old = new;
2216 *isa_str = new;
2219 char *riscv_isa_string(RISCVCPU *cpu)
2221 int i;
2222 const size_t maxlen = sizeof("rv128") + sizeof(riscv_single_letter_exts);
2223 char *isa_str = g_new(char, maxlen);
2224 char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
2225 for (i = 0; i < sizeof(riscv_single_letter_exts) - 1; i++) {
2226 if (cpu->env.misa_ext & RV(riscv_single_letter_exts[i])) {
2227 *p++ = qemu_tolower(riscv_single_letter_exts[i]);
2230 *p = '\0';
2231 if (!cpu->cfg.short_isa_string) {
2232 riscv_isa_string_ext(cpu, &isa_str, maxlen);
2234 return isa_str;
2237 static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
2239 ObjectClass *class_a = (ObjectClass *)a;
2240 ObjectClass *class_b = (ObjectClass *)b;
2241 const char *name_a, *name_b;
2243 name_a = object_class_get_name(class_a);
2244 name_b = object_class_get_name(class_b);
2245 return strcmp(name_a, name_b);
2248 static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
2250 const char *typename = object_class_get_name(OBJECT_CLASS(data));
2251 int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
2253 qemu_printf("%.*s\n", len, typename);
2256 void riscv_cpu_list(void)
2258 GSList *list;
2260 list = object_class_get_list(TYPE_RISCV_CPU, false);
2261 list = g_slist_sort(list, riscv_cpu_list_compare);
2262 g_slist_foreach(list, riscv_cpu_list_entry, NULL);
2263 g_slist_free(list);
2266 #define DEFINE_CPU(type_name, initfn) \
2268 .name = type_name, \
2269 .parent = TYPE_RISCV_CPU, \
2270 .instance_init = initfn \
2273 #define DEFINE_DYNAMIC_CPU(type_name, initfn) \
2275 .name = type_name, \
2276 .parent = TYPE_RISCV_DYNAMIC_CPU, \
2277 .instance_init = initfn \
2280 static const TypeInfo riscv_cpu_type_infos[] = {
2282 .name = TYPE_RISCV_CPU,
2283 .parent = TYPE_CPU,
2284 .instance_size = sizeof(RISCVCPU),
2285 .instance_align = __alignof__(RISCVCPU),
2286 .instance_init = riscv_cpu_init,
2287 .abstract = true,
2288 .class_size = sizeof(RISCVCPUClass),
2289 .class_init = riscv_cpu_class_init,
2292 .name = TYPE_RISCV_DYNAMIC_CPU,
2293 .parent = TYPE_RISCV_CPU,
2294 .abstract = true,
2296 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
2297 #if defined(CONFIG_KVM)
2298 DEFINE_CPU(TYPE_RISCV_CPU_HOST, riscv_host_cpu_init),
2299 #endif
2300 #if defined(TARGET_RISCV32)
2301 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE32, rv32_base_cpu_init),
2302 DEFINE_CPU(TYPE_RISCV_CPU_IBEX, rv32_ibex_cpu_init),
2303 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32_sifive_e_cpu_init),
2304 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E34, rv32_imafcu_nommu_cpu_init),
2305 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32_sifive_u_cpu_init),
2306 #elif defined(TARGET_RISCV64)
2307 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE64, rv64_base_cpu_init),
2308 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64_sifive_e_cpu_init),
2309 DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64_sifive_u_cpu_init),
2310 DEFINE_CPU(TYPE_RISCV_CPU_SHAKTI_C, rv64_sifive_u_cpu_init),
2311 DEFINE_CPU(TYPE_RISCV_CPU_THEAD_C906, rv64_thead_c906_cpu_init),
2312 DEFINE_CPU(TYPE_RISCV_CPU_VEYRON_V1, rv64_veyron_v1_cpu_init),
2313 DEFINE_DYNAMIC_CPU(TYPE_RISCV_CPU_BASE128, rv128_base_cpu_init),
2314 #endif
2317 DEFINE_TYPES(riscv_cpu_type_infos)