2 * RISC-V implementation of KVM hooks
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
22 #include <linux/kvm.h>
24 #include "qemu/timer.h"
25 #include "qapi/error.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28 #include "qapi/visitor.h"
29 #include "sysemu/sysemu.h"
30 #include "sysemu/kvm.h"
31 #include "sysemu/kvm_int.h"
34 #include "hw/pci/pci.h"
35 #include "exec/memattrs.h"
36 #include "exec/address-spaces.h"
37 #include "hw/boards.h"
39 #include "hw/intc/riscv_imsic.h"
41 #include "hw/loader.h"
42 #include "kvm_riscv.h"
43 #include "sbi_ecall_interface.h"
44 #include "chardev/char-fe.h"
45 #include "migration/migration.h"
46 #include "sysemu/runstate.h"
47 #include "hw/riscv/numa.h"
49 void riscv_kvm_aplic_request(void *opaque
, int irq
, int level
)
51 kvm_set_irq(kvm_state
, irq
, !!level
);
54 static uint64_t kvm_riscv_reg_id(CPURISCVState
*env
, uint64_t type
,
57 uint64_t id
= KVM_REG_RISCV
| type
| idx
;
59 switch (riscv_cpu_mxl(env
)) {
61 id
|= KVM_REG_SIZE_U32
;
64 id
|= KVM_REG_SIZE_U64
;
67 g_assert_not_reached();
72 #define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
73 KVM_REG_RISCV_CORE_REG(name))
75 #define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
76 KVM_REG_RISCV_CSR_REG(name))
78 #define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
79 KVM_REG_RISCV_TIMER_REG(name))
81 #define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
83 #define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
85 #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
87 int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \
93 #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
95 int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), ®); \
101 #define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
103 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
109 #define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
111 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), ®); \
117 typedef struct KVMCPUConfig
{
119 const char *description
;
126 #define KVM_MISA_CFG(_bit, _reg_id) \
127 {.offset = _bit, .kvm_reg_id = _reg_id}
129 /* KVM ISA extensions */
130 static KVMCPUConfig kvm_misa_ext_cfgs
[] = {
131 KVM_MISA_CFG(RVA
, KVM_RISCV_ISA_EXT_A
),
132 KVM_MISA_CFG(RVC
, KVM_RISCV_ISA_EXT_C
),
133 KVM_MISA_CFG(RVD
, KVM_RISCV_ISA_EXT_D
),
134 KVM_MISA_CFG(RVF
, KVM_RISCV_ISA_EXT_F
),
135 KVM_MISA_CFG(RVH
, KVM_RISCV_ISA_EXT_H
),
136 KVM_MISA_CFG(RVI
, KVM_RISCV_ISA_EXT_I
),
137 KVM_MISA_CFG(RVM
, KVM_RISCV_ISA_EXT_M
),
140 static void kvm_cpu_set_misa_ext_cfg(Object
*obj
, Visitor
*v
,
142 void *opaque
, Error
**errp
)
144 KVMCPUConfig
*misa_ext_cfg
= opaque
;
145 target_ulong misa_bit
= misa_ext_cfg
->offset
;
146 RISCVCPU
*cpu
= RISCV_CPU(obj
);
147 CPURISCVState
*env
= &cpu
->env
;
148 bool value
, host_bit
;
150 if (!visit_type_bool(v
, name
, &value
, errp
)) {
154 host_bit
= env
->misa_ext_mask
& misa_bit
;
156 if (value
== host_bit
) {
161 misa_ext_cfg
->user_set
= true;
166 * Forbid users to enable extensions that aren't
167 * available in the hart.
169 error_setg(errp
, "Enabling MISA bit '%s' is not allowed: it's not "
170 "enabled in the host", misa_ext_cfg
->name
);
173 static void kvm_riscv_update_cpu_misa_ext(RISCVCPU
*cpu
, CPUState
*cs
)
175 CPURISCVState
*env
= &cpu
->env
;
179 for (i
= 0; i
< ARRAY_SIZE(kvm_misa_ext_cfgs
); i
++) {
180 KVMCPUConfig
*misa_cfg
= &kvm_misa_ext_cfgs
[i
];
181 target_ulong misa_bit
= misa_cfg
->offset
;
183 if (!misa_cfg
->user_set
) {
187 /* If we're here we're going to disable the MISA bit */
189 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_ISA_EXT
,
190 misa_cfg
->kvm_reg_id
);
191 ret
= kvm_set_one_reg(cs
, id
, ®
);
194 * We're not checking for -EINVAL because if the bit is about
195 * to be disabled, it means that it was already enabled by
196 * KVM. We determined that by fetching the 'isa' register
197 * during init() time. Any error at this point is worth
200 error_report("Unable to set KVM reg %s, error %d",
201 misa_cfg
->name
, ret
);
204 env
->misa_ext
&= ~misa_bit
;
208 #define CPUCFG(_prop) offsetof(struct RISCVCPUConfig, _prop)
210 #define KVM_EXT_CFG(_name, _prop, _reg_id) \
211 {.name = _name, .offset = CPUCFG(_prop), \
212 .kvm_reg_id = _reg_id}
214 static KVMCPUConfig kvm_multi_ext_cfgs
[] = {
215 KVM_EXT_CFG("zicbom", ext_icbom
, KVM_RISCV_ISA_EXT_ZICBOM
),
216 KVM_EXT_CFG("zicboz", ext_icboz
, KVM_RISCV_ISA_EXT_ZICBOZ
),
217 KVM_EXT_CFG("zihintpause", ext_zihintpause
, KVM_RISCV_ISA_EXT_ZIHINTPAUSE
),
218 KVM_EXT_CFG("zbb", ext_zbb
, KVM_RISCV_ISA_EXT_ZBB
),
219 KVM_EXT_CFG("ssaia", ext_ssaia
, KVM_RISCV_ISA_EXT_SSAIA
),
220 KVM_EXT_CFG("sstc", ext_sstc
, KVM_RISCV_ISA_EXT_SSTC
),
221 KVM_EXT_CFG("svinval", ext_svinval
, KVM_RISCV_ISA_EXT_SVINVAL
),
222 KVM_EXT_CFG("svpbmt", ext_svpbmt
, KVM_RISCV_ISA_EXT_SVPBMT
),
225 static void *kvmconfig_get_cfg_addr(RISCVCPU
*cpu
, KVMCPUConfig
*kvmcfg
)
227 return (void *)&cpu
->cfg
+ kvmcfg
->offset
;
230 static void kvm_cpu_cfg_set(RISCVCPU
*cpu
, KVMCPUConfig
*multi_ext
,
233 bool *ext_enabled
= kvmconfig_get_cfg_addr(cpu
, multi_ext
);
238 static uint32_t kvm_cpu_cfg_get(RISCVCPU
*cpu
,
239 KVMCPUConfig
*multi_ext
)
241 bool *ext_enabled
= kvmconfig_get_cfg_addr(cpu
, multi_ext
);
246 static void kvm_cpu_set_multi_ext_cfg(Object
*obj
, Visitor
*v
,
248 void *opaque
, Error
**errp
)
250 KVMCPUConfig
*multi_ext_cfg
= opaque
;
251 RISCVCPU
*cpu
= RISCV_CPU(obj
);
252 bool value
, host_val
;
254 if (!visit_type_bool(v
, name
, &value
, errp
)) {
258 host_val
= kvm_cpu_cfg_get(cpu
, multi_ext_cfg
);
261 * Ignore if the user is setting the same value
264 if (value
== host_val
) {
268 if (!multi_ext_cfg
->supported
) {
270 * Error out if the user is trying to enable an
271 * extension that KVM doesn't support. Ignore
275 error_setg(errp
, "KVM does not support disabling extension %s",
276 multi_ext_cfg
->name
);
282 multi_ext_cfg
->user_set
= true;
283 kvm_cpu_cfg_set(cpu
, multi_ext_cfg
, value
);
286 static KVMCPUConfig kvm_cbom_blocksize
= {
287 .name
= "cbom_blocksize",
288 .offset
= CPUCFG(cbom_blocksize
),
289 .kvm_reg_id
= KVM_REG_RISCV_CONFIG_REG(zicbom_block_size
)
292 static KVMCPUConfig kvm_cboz_blocksize
= {
293 .name
= "cboz_blocksize",
294 .offset
= CPUCFG(cboz_blocksize
),
295 .kvm_reg_id
= KVM_REG_RISCV_CONFIG_REG(zicboz_block_size
)
298 static void kvm_cpu_set_cbomz_blksize(Object
*obj
, Visitor
*v
,
300 void *opaque
, Error
**errp
)
302 KVMCPUConfig
*cbomz_cfg
= opaque
;
303 RISCVCPU
*cpu
= RISCV_CPU(obj
);
304 uint16_t value
, *host_val
;
306 if (!visit_type_uint16(v
, name
, &value
, errp
)) {
310 host_val
= kvmconfig_get_cfg_addr(cpu
, cbomz_cfg
);
312 if (value
!= *host_val
) {
313 error_report("Unable to set %s to a different value than "
315 cbomz_cfg
->name
, *host_val
);
319 cbomz_cfg
->user_set
= true;
322 static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU
*cpu
, CPUState
*cs
)
324 CPURISCVState
*env
= &cpu
->env
;
328 for (i
= 0; i
< ARRAY_SIZE(kvm_multi_ext_cfgs
); i
++) {
329 KVMCPUConfig
*multi_ext_cfg
= &kvm_multi_ext_cfgs
[i
];
331 if (!multi_ext_cfg
->user_set
) {
335 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_ISA_EXT
,
336 multi_ext_cfg
->kvm_reg_id
);
337 reg
= kvm_cpu_cfg_get(cpu
, multi_ext_cfg
);
338 ret
= kvm_set_one_reg(cs
, id
, ®
);
340 error_report("Unable to %s extension %s in KVM, error %d",
341 reg
? "enable" : "disable",
342 multi_ext_cfg
->name
, ret
);
348 static void kvm_riscv_add_cpu_user_properties(Object
*cpu_obj
)
352 for (i
= 0; i
< ARRAY_SIZE(kvm_misa_ext_cfgs
); i
++) {
353 KVMCPUConfig
*misa_cfg
= &kvm_misa_ext_cfgs
[i
];
354 int bit
= misa_cfg
->offset
;
356 misa_cfg
->name
= riscv_get_misa_ext_name(bit
);
357 misa_cfg
->description
= riscv_get_misa_ext_description(bit
);
359 object_property_add(cpu_obj
, misa_cfg
->name
, "bool",
361 kvm_cpu_set_misa_ext_cfg
,
363 object_property_set_description(cpu_obj
, misa_cfg
->name
,
364 misa_cfg
->description
);
367 for (i
= 0; i
< ARRAY_SIZE(kvm_multi_ext_cfgs
); i
++) {
368 KVMCPUConfig
*multi_cfg
= &kvm_multi_ext_cfgs
[i
];
370 object_property_add(cpu_obj
, multi_cfg
->name
, "bool",
372 kvm_cpu_set_multi_ext_cfg
,
376 object_property_add(cpu_obj
, "cbom_blocksize", "uint16",
377 NULL
, kvm_cpu_set_cbomz_blksize
,
378 NULL
, &kvm_cbom_blocksize
);
380 object_property_add(cpu_obj
, "cboz_blocksize", "uint16",
381 NULL
, kvm_cpu_set_cbomz_blksize
,
382 NULL
, &kvm_cboz_blocksize
);
385 static int kvm_riscv_get_regs_core(CPUState
*cs
)
390 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
392 ret
= kvm_get_one_reg(cs
, RISCV_CORE_REG(env
, regs
.pc
), ®
);
398 for (i
= 1; i
< 32; i
++) {
399 uint64_t id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CORE
, i
);
400 ret
= kvm_get_one_reg(cs
, id
, ®
);
410 static int kvm_riscv_put_regs_core(CPUState
*cs
)
415 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
418 ret
= kvm_set_one_reg(cs
, RISCV_CORE_REG(env
, regs
.pc
), ®
);
423 for (i
= 1; i
< 32; i
++) {
424 uint64_t id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CORE
, i
);
426 ret
= kvm_set_one_reg(cs
, id
, ®
);
435 static int kvm_riscv_get_regs_csr(CPUState
*cs
)
438 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
440 KVM_RISCV_GET_CSR(cs
, env
, sstatus
, env
->mstatus
);
441 KVM_RISCV_GET_CSR(cs
, env
, sie
, env
->mie
);
442 KVM_RISCV_GET_CSR(cs
, env
, stvec
, env
->stvec
);
443 KVM_RISCV_GET_CSR(cs
, env
, sscratch
, env
->sscratch
);
444 KVM_RISCV_GET_CSR(cs
, env
, sepc
, env
->sepc
);
445 KVM_RISCV_GET_CSR(cs
, env
, scause
, env
->scause
);
446 KVM_RISCV_GET_CSR(cs
, env
, stval
, env
->stval
);
447 KVM_RISCV_GET_CSR(cs
, env
, sip
, env
->mip
);
448 KVM_RISCV_GET_CSR(cs
, env
, satp
, env
->satp
);
452 static int kvm_riscv_put_regs_csr(CPUState
*cs
)
455 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
457 KVM_RISCV_SET_CSR(cs
, env
, sstatus
, env
->mstatus
);
458 KVM_RISCV_SET_CSR(cs
, env
, sie
, env
->mie
);
459 KVM_RISCV_SET_CSR(cs
, env
, stvec
, env
->stvec
);
460 KVM_RISCV_SET_CSR(cs
, env
, sscratch
, env
->sscratch
);
461 KVM_RISCV_SET_CSR(cs
, env
, sepc
, env
->sepc
);
462 KVM_RISCV_SET_CSR(cs
, env
, scause
, env
->scause
);
463 KVM_RISCV_SET_CSR(cs
, env
, stval
, env
->stval
);
464 KVM_RISCV_SET_CSR(cs
, env
, sip
, env
->mip
);
465 KVM_RISCV_SET_CSR(cs
, env
, satp
, env
->satp
);
470 static int kvm_riscv_get_regs_fp(CPUState
*cs
)
474 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
476 if (riscv_has_ext(env
, RVD
)) {
478 for (i
= 0; i
< 32; i
++) {
479 ret
= kvm_get_one_reg(cs
, RISCV_FP_D_REG(env
, i
), ®
);
488 if (riscv_has_ext(env
, RVF
)) {
490 for (i
= 0; i
< 32; i
++) {
491 ret
= kvm_get_one_reg(cs
, RISCV_FP_F_REG(env
, i
), ®
);
503 static int kvm_riscv_put_regs_fp(CPUState
*cs
)
507 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
509 if (riscv_has_ext(env
, RVD
)) {
511 for (i
= 0; i
< 32; i
++) {
513 ret
= kvm_set_one_reg(cs
, RISCV_FP_D_REG(env
, i
), ®
);
521 if (riscv_has_ext(env
, RVF
)) {
523 for (i
= 0; i
< 32; i
++) {
525 ret
= kvm_set_one_reg(cs
, RISCV_FP_F_REG(env
, i
), ®
);
536 static void kvm_riscv_get_regs_timer(CPUState
*cs
)
538 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
540 if (env
->kvm_timer_dirty
) {
544 KVM_RISCV_GET_TIMER(cs
, env
, time
, env
->kvm_timer_time
);
545 KVM_RISCV_GET_TIMER(cs
, env
, compare
, env
->kvm_timer_compare
);
546 KVM_RISCV_GET_TIMER(cs
, env
, state
, env
->kvm_timer_state
);
547 KVM_RISCV_GET_TIMER(cs
, env
, frequency
, env
->kvm_timer_frequency
);
549 env
->kvm_timer_dirty
= true;
552 static void kvm_riscv_put_regs_timer(CPUState
*cs
)
555 CPURISCVState
*env
= &RISCV_CPU(cs
)->env
;
557 if (!env
->kvm_timer_dirty
) {
561 KVM_RISCV_SET_TIMER(cs
, env
, time
, env
->kvm_timer_time
);
562 KVM_RISCV_SET_TIMER(cs
, env
, compare
, env
->kvm_timer_compare
);
565 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
566 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
567 * doesn't matter that adaping in QEMU now.
568 * TODO If KVM changes, adapt here.
570 if (env
->kvm_timer_state
) {
571 KVM_RISCV_SET_TIMER(cs
, env
, state
, env
->kvm_timer_state
);
575 * For now, migration will not work between Hosts with different timer
576 * frequency. Therefore, we should check whether they are the same here
577 * during the migration.
579 if (migration_is_running(migrate_get_current()->state
)) {
580 KVM_RISCV_GET_TIMER(cs
, env
, frequency
, reg
);
581 if (reg
!= env
->kvm_timer_frequency
) {
582 error_report("Dst Hosts timer frequency != Src Hosts");
586 env
->kvm_timer_dirty
= false;
589 typedef struct KVMScratchCPU
{
596 * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
597 * from target/arm/kvm.c.
599 static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU
*scratch
)
601 int kvmfd
= -1, vmfd
= -1, cpufd
= -1;
603 kvmfd
= qemu_open_old("/dev/kvm", O_RDWR
);
608 vmfd
= ioctl(kvmfd
, KVM_CREATE_VM
, 0);
609 } while (vmfd
== -1 && errno
== EINTR
);
613 cpufd
= ioctl(vmfd
, KVM_CREATE_VCPU
, 0);
618 scratch
->kvmfd
= kvmfd
;
619 scratch
->vmfd
= vmfd
;
620 scratch
->cpufd
= cpufd
;
638 static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU
*scratch
)
640 close(scratch
->cpufd
);
641 close(scratch
->vmfd
);
642 close(scratch
->kvmfd
);
645 static void kvm_riscv_init_machine_ids(RISCVCPU
*cpu
, KVMScratchCPU
*kvmcpu
)
647 CPURISCVState
*env
= &cpu
->env
;
648 struct kvm_one_reg reg
;
651 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
652 KVM_REG_RISCV_CONFIG_REG(mvendorid
));
653 reg
.addr
= (uint64_t)&cpu
->cfg
.mvendorid
;
654 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
656 error_report("Unable to retrieve mvendorid from host, error %d", ret
);
659 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
660 KVM_REG_RISCV_CONFIG_REG(marchid
));
661 reg
.addr
= (uint64_t)&cpu
->cfg
.marchid
;
662 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
664 error_report("Unable to retrieve marchid from host, error %d", ret
);
667 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
668 KVM_REG_RISCV_CONFIG_REG(mimpid
));
669 reg
.addr
= (uint64_t)&cpu
->cfg
.mimpid
;
670 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
672 error_report("Unable to retrieve mimpid from host, error %d", ret
);
676 static void kvm_riscv_init_misa_ext_mask(RISCVCPU
*cpu
,
677 KVMScratchCPU
*kvmcpu
)
679 CPURISCVState
*env
= &cpu
->env
;
680 struct kvm_one_reg reg
;
683 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
684 KVM_REG_RISCV_CONFIG_REG(isa
));
685 reg
.addr
= (uint64_t)&env
->misa_ext_mask
;
686 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
689 error_report("Unable to fetch ISA register from KVM, "
691 kvm_riscv_destroy_scratch_vcpu(kvmcpu
);
695 env
->misa_ext
= env
->misa_ext_mask
;
698 static void kvm_riscv_read_cbomz_blksize(RISCVCPU
*cpu
, KVMScratchCPU
*kvmcpu
,
699 KVMCPUConfig
*cbomz_cfg
)
701 CPURISCVState
*env
= &cpu
->env
;
702 struct kvm_one_reg reg
;
705 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
706 cbomz_cfg
->kvm_reg_id
);
707 reg
.addr
= (uint64_t)kvmconfig_get_cfg_addr(cpu
, cbomz_cfg
);
708 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
710 error_report("Unable to read KVM reg %s, error %d",
711 cbomz_cfg
->name
, ret
);
716 static void kvm_riscv_init_multiext_cfg(RISCVCPU
*cpu
, KVMScratchCPU
*kvmcpu
)
718 CPURISCVState
*env
= &cpu
->env
;
722 for (i
= 0; i
< ARRAY_SIZE(kvm_multi_ext_cfgs
); i
++) {
723 KVMCPUConfig
*multi_ext_cfg
= &kvm_multi_ext_cfgs
[i
];
724 struct kvm_one_reg reg
;
726 reg
.id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_ISA_EXT
,
727 multi_ext_cfg
->kvm_reg_id
);
728 reg
.addr
= (uint64_t)&val
;
729 ret
= ioctl(kvmcpu
->cpufd
, KVM_GET_ONE_REG
, ®
);
731 if (errno
== EINVAL
) {
732 /* Silently default to 'false' if KVM does not support it. */
733 multi_ext_cfg
->supported
= false;
736 error_report("Unable to read ISA_EXT KVM register %s, "
737 "error %d", multi_ext_cfg
->name
, ret
);
738 kvm_riscv_destroy_scratch_vcpu(kvmcpu
);
742 multi_ext_cfg
->supported
= true;
745 kvm_cpu_cfg_set(cpu
, multi_ext_cfg
, val
);
748 if (cpu
->cfg
.ext_icbom
) {
749 kvm_riscv_read_cbomz_blksize(cpu
, kvmcpu
, &kvm_cbom_blocksize
);
752 if (cpu
->cfg
.ext_icboz
) {
753 kvm_riscv_read_cbomz_blksize(cpu
, kvmcpu
, &kvm_cboz_blocksize
);
757 void kvm_riscv_init_user_properties(Object
*cpu_obj
)
759 RISCVCPU
*cpu
= RISCV_CPU(cpu_obj
);
760 KVMScratchCPU kvmcpu
;
762 if (!kvm_riscv_create_scratch_vcpu(&kvmcpu
)) {
766 kvm_riscv_add_cpu_user_properties(cpu_obj
);
767 kvm_riscv_init_machine_ids(cpu
, &kvmcpu
);
768 kvm_riscv_init_misa_ext_mask(cpu
, &kvmcpu
);
769 kvm_riscv_init_multiext_cfg(cpu
, &kvmcpu
);
771 kvm_riscv_destroy_scratch_vcpu(&kvmcpu
);
774 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
778 int kvm_arch_get_registers(CPUState
*cs
)
782 ret
= kvm_riscv_get_regs_core(cs
);
787 ret
= kvm_riscv_get_regs_csr(cs
);
792 ret
= kvm_riscv_get_regs_fp(cs
);
800 int kvm_arch_put_registers(CPUState
*cs
, int level
)
804 ret
= kvm_riscv_put_regs_core(cs
);
809 ret
= kvm_riscv_put_regs_csr(cs
);
814 ret
= kvm_riscv_put_regs_fp(cs
);
822 int kvm_arch_release_virq_post(int virq
)
827 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
828 uint64_t address
, uint32_t data
, PCIDevice
*dev
)
833 int kvm_arch_destroy_vcpu(CPUState
*cs
)
838 unsigned long kvm_arch_vcpu_id(CPUState
*cpu
)
840 return cpu
->cpu_index
;
843 static void kvm_riscv_vm_state_change(void *opaque
, bool running
,
846 CPUState
*cs
= opaque
;
849 kvm_riscv_put_regs_timer(cs
);
851 kvm_riscv_get_regs_timer(cs
);
855 void kvm_arch_init_irq_routing(KVMState
*s
)
859 static int kvm_vcpu_set_machine_ids(RISCVCPU
*cpu
, CPUState
*cs
)
861 CPURISCVState
*env
= &cpu
->env
;
866 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
867 KVM_REG_RISCV_CONFIG_REG(mvendorid
));
869 * cfg.mvendorid is an uint32 but a target_ulong will
870 * be written. Assign it to a target_ulong var to avoid
871 * writing pieces of other cpu->cfg fields in the reg.
873 reg
= cpu
->cfg
.mvendorid
;
874 ret
= kvm_set_one_reg(cs
, id
, ®
);
879 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
880 KVM_REG_RISCV_CONFIG_REG(marchid
));
881 ret
= kvm_set_one_reg(cs
, id
, &cpu
->cfg
.marchid
);
886 id
= kvm_riscv_reg_id(env
, KVM_REG_RISCV_CONFIG
,
887 KVM_REG_RISCV_CONFIG_REG(mimpid
));
888 ret
= kvm_set_one_reg(cs
, id
, &cpu
->cfg
.mimpid
);
893 int kvm_arch_init_vcpu(CPUState
*cs
)
896 RISCVCPU
*cpu
= RISCV_CPU(cs
);
898 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change
, cs
);
900 if (!object_dynamic_cast(OBJECT(cpu
), TYPE_RISCV_CPU_HOST
)) {
901 ret
= kvm_vcpu_set_machine_ids(cpu
, cs
);
907 kvm_riscv_update_cpu_misa_ext(cpu
, cs
);
908 kvm_riscv_update_cpu_cfg_isa_ext(cpu
, cs
);
913 int kvm_arch_msi_data_to_gsi(uint32_t data
)
918 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry
*route
,
919 int vector
, PCIDevice
*dev
)
924 int kvm_arch_get_default_type(MachineState
*ms
)
929 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
934 int kvm_arch_irqchip_create(KVMState
*s
)
936 if (kvm_kernel_irqchip_split()) {
937 error_report("-machine kernel_irqchip=split is not supported on RISC-V.");
942 * We can create the VAIA using the newer device control API.
944 return kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
);
947 int kvm_arch_process_async_events(CPUState
*cs
)
952 void kvm_arch_pre_run(CPUState
*cs
, struct kvm_run
*run
)
956 MemTxAttrs
kvm_arch_post_run(CPUState
*cs
, struct kvm_run
*run
)
958 return MEMTXATTRS_UNSPECIFIED
;
961 bool kvm_arch_stop_on_emulation_error(CPUState
*cs
)
966 static int kvm_riscv_handle_sbi(CPUState
*cs
, struct kvm_run
*run
)
970 switch (run
->riscv_sbi
.extension_id
) {
971 case SBI_EXT_0_1_CONSOLE_PUTCHAR
:
972 ch
= run
->riscv_sbi
.args
[0];
973 qemu_chr_fe_write(serial_hd(0)->be
, &ch
, sizeof(ch
));
975 case SBI_EXT_0_1_CONSOLE_GETCHAR
:
976 ret
= qemu_chr_fe_read_all(serial_hd(0)->be
, &ch
, sizeof(ch
));
977 if (ret
== sizeof(ch
)) {
978 run
->riscv_sbi
.ret
[0] = ch
;
980 run
->riscv_sbi
.ret
[0] = -1;
985 qemu_log_mask(LOG_UNIMP
,
986 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
987 __func__
, run
->riscv_sbi
.extension_id
);
994 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
997 switch (run
->exit_reason
) {
998 case KVM_EXIT_RISCV_SBI
:
999 ret
= kvm_riscv_handle_sbi(cs
, run
);
1002 qemu_log_mask(LOG_UNIMP
, "%s: un-handled exit reason %d\n",
1003 __func__
, run
->exit_reason
);
1010 void kvm_riscv_reset_vcpu(RISCVCPU
*cpu
)
1012 CPURISCVState
*env
= &cpu
->env
;
1014 if (!kvm_enabled()) {
1017 env
->pc
= cpu
->env
.kernel_addr
;
1018 env
->gpr
[10] = kvm_arch_vcpu_id(CPU(cpu
)); /* a0 */
1019 env
->gpr
[11] = cpu
->env
.fdt_addr
; /* a1 */
1023 void kvm_riscv_set_irq(RISCVCPU
*cpu
, int irq
, int level
)
1026 unsigned virq
= level
? KVM_INTERRUPT_SET
: KVM_INTERRUPT_UNSET
;
1028 if (irq
!= IRQ_S_EXT
) {
1029 perror("kvm riscv set irq != IRQ_S_EXT\n");
1033 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_INTERRUPT
, &virq
);
1035 perror("Set irq failed");
1040 bool kvm_arch_cpu_check_are_resettable(void)
1045 static int aia_mode
;
1047 static const char *kvm_aia_mode_str(uint64_t mode
)
1050 case KVM_DEV_RISCV_AIA_MODE_EMUL
:
1052 case KVM_DEV_RISCV_AIA_MODE_HWACCEL
:
1054 case KVM_DEV_RISCV_AIA_MODE_AUTO
:
1060 static char *riscv_get_kvm_aia(Object
*obj
, Error
**errp
)
1062 return g_strdup(kvm_aia_mode_str(aia_mode
));
1065 static void riscv_set_kvm_aia(Object
*obj
, const char *val
, Error
**errp
)
1067 if (!strcmp(val
, "emul")) {
1068 aia_mode
= KVM_DEV_RISCV_AIA_MODE_EMUL
;
1069 } else if (!strcmp(val
, "hwaccel")) {
1070 aia_mode
= KVM_DEV_RISCV_AIA_MODE_HWACCEL
;
1071 } else if (!strcmp(val
, "auto")) {
1072 aia_mode
= KVM_DEV_RISCV_AIA_MODE_AUTO
;
1074 error_setg(errp
, "Invalid KVM AIA mode");
1075 error_append_hint(errp
, "Valid values are emul, hwaccel, and auto.\n");
1079 void kvm_arch_accel_class_init(ObjectClass
*oc
)
1081 object_class_property_add_str(oc
, "riscv-aia", riscv_get_kvm_aia
,
1083 object_class_property_set_description(oc
, "riscv-aia",
1084 "Set KVM AIA mode. Valid values are "
1085 "emul, hwaccel, and auto. Default "
1087 object_property_set_default_str(object_class_property_find(oc
, "riscv-aia"),
1091 void kvm_riscv_aia_create(MachineState
*machine
, uint64_t group_shift
,
1092 uint64_t aia_irq_num
, uint64_t aia_msi_num
,
1093 uint64_t aplic_base
, uint64_t imsic_base
,
1098 uint64_t default_aia_mode
;
1099 uint64_t socket_count
= riscv_socket_count(machine
);
1100 uint64_t max_hart_per_socket
= 0;
1101 uint64_t socket
, base_hart
, hart_count
, socket_imsic_base
, imsic_addr
;
1102 uint64_t socket_bits
, hart_bits
, guest_bits
;
1104 aia_fd
= kvm_create_device(kvm_state
, KVM_DEV_TYPE_RISCV_AIA
, false);
1107 error_report("Unable to create in-kernel irqchip");
1111 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1112 KVM_DEV_RISCV_AIA_CONFIG_MODE
,
1113 &default_aia_mode
, false, NULL
);
1115 error_report("KVM AIA: failed to get current KVM AIA mode");
1118 qemu_log("KVM AIA: default mode is %s\n",
1119 kvm_aia_mode_str(default_aia_mode
));
1121 if (default_aia_mode
!= aia_mode
) {
1122 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1123 KVM_DEV_RISCV_AIA_CONFIG_MODE
,
1124 &aia_mode
, true, NULL
);
1126 warn_report("KVM AIA: failed to set KVM AIA mode");
1128 qemu_log("KVM AIA: set current mode to %s\n",
1129 kvm_aia_mode_str(aia_mode
));
1132 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1133 KVM_DEV_RISCV_AIA_CONFIG_SRCS
,
1134 &aia_irq_num
, true, NULL
);
1136 error_report("KVM AIA: failed to set number of input irq lines");
1140 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1141 KVM_DEV_RISCV_AIA_CONFIG_IDS
,
1142 &aia_msi_num
, true, NULL
);
1144 error_report("KVM AIA: failed to set number of msi");
1148 socket_bits
= find_last_bit(&socket_count
, BITS_PER_LONG
) + 1;
1149 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1150 KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS
,
1151 &socket_bits
, true, NULL
);
1153 error_report("KVM AIA: failed to set group_bits");
1157 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1158 KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT
,
1159 &group_shift
, true, NULL
);
1161 error_report("KVM AIA: failed to set group_shift");
1165 guest_bits
= guest_num
== 0 ? 0 :
1166 find_last_bit(&guest_num
, BITS_PER_LONG
) + 1;
1167 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1168 KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS
,
1169 &guest_bits
, true, NULL
);
1171 error_report("KVM AIA: failed to set guest_bits");
1175 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_ADDR
,
1176 KVM_DEV_RISCV_AIA_ADDR_APLIC
,
1177 &aplic_base
, true, NULL
);
1179 error_report("KVM AIA: failed to set the base address of APLIC");
1183 for (socket
= 0; socket
< socket_count
; socket
++) {
1184 socket_imsic_base
= imsic_base
+ socket
* (1U << group_shift
);
1185 hart_count
= riscv_socket_hart_count(machine
, socket
);
1186 base_hart
= riscv_socket_first_hartid(machine
, socket
);
1188 if (max_hart_per_socket
< hart_count
) {
1189 max_hart_per_socket
= hart_count
;
1192 for (i
= 0; i
< hart_count
; i
++) {
1193 imsic_addr
= socket_imsic_base
+ i
* IMSIC_HART_SIZE(guest_bits
);
1194 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_ADDR
,
1195 KVM_DEV_RISCV_AIA_ADDR_IMSIC(i
+ base_hart
),
1196 &imsic_addr
, true, NULL
);
1198 error_report("KVM AIA: failed to set the IMSIC address for hart %d", i
);
1204 hart_bits
= find_last_bit(&max_hart_per_socket
, BITS_PER_LONG
) + 1;
1205 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CONFIG
,
1206 KVM_DEV_RISCV_AIA_CONFIG_HART_BITS
,
1207 &hart_bits
, true, NULL
);
1209 error_report("KVM AIA: failed to set hart_bits");
1213 if (kvm_has_gsi_routing()) {
1214 for (uint64_t idx
= 0; idx
< aia_irq_num
+ 1; ++idx
) {
1215 /* KVM AIA only has one APLIC instance */
1216 kvm_irqchip_add_irq_route(kvm_state
, idx
, 0, idx
);
1218 kvm_gsi_routing_allowed
= true;
1219 kvm_irqchip_commit_routes(kvm_state
);
1222 ret
= kvm_device_access(aia_fd
, KVM_DEV_RISCV_AIA_GRP_CTRL
,
1223 KVM_DEV_RISCV_AIA_CTRL_INIT
,
1226 error_report("KVM AIA: initialized fail");
1230 kvm_msi_via_irqfd_allowed
= kvm_irqfds_enabled();