avocado/boot_linux_console.py: check for tcg in test_ppc_powernv8/9
[qemu.git] / target / riscv / kvm.c
blobe6b7cb6d4d5f8f921a97ac3c57c064cb089acf0c
1 /*
2 * RISC-V implementation of KVM hooks
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
22 #include <linux/kvm.h>
24 #include "qemu-common.h"
25 #include "qemu/timer.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28 #include "sysemu/sysemu.h"
29 #include "sysemu/kvm.h"
30 #include "sysemu/kvm_int.h"
31 #include "cpu.h"
32 #include "trace.h"
33 #include "hw/pci/pci.h"
34 #include "exec/memattrs.h"
35 #include "exec/address-spaces.h"
36 #include "hw/boards.h"
37 #include "hw/irq.h"
38 #include "qemu/log.h"
39 #include "hw/loader.h"
40 #include "kvm_riscv.h"
41 #include "sbi_ecall_interface.h"
42 #include "chardev/char-fe.h"
43 #include "migration/migration.h"
44 #include "sysemu/runstate.h"
46 static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
47 uint64_t idx)
49 uint64_t id = KVM_REG_RISCV | type | idx;
51 switch (riscv_cpu_mxl(env)) {
52 case MXL_RV32:
53 id |= KVM_REG_SIZE_U32;
54 break;
55 case MXL_RV64:
56 id |= KVM_REG_SIZE_U64;
57 break;
58 default:
59 g_assert_not_reached();
61 return id;
64 #define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
65 KVM_REG_RISCV_CORE_REG(name))
67 #define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
68 KVM_REG_RISCV_CSR_REG(name))
70 #define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
71 KVM_REG_RISCV_TIMER_REG(name))
73 #define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
75 #define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
77 #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
78 do { \
79 int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
80 if (ret) { \
81 return ret; \
82 } \
83 } while (0)
85 #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
86 do { \
87 int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
88 if (ret) { \
89 return ret; \
90 } \
91 } while (0)
93 #define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
94 do { \
95 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), &reg); \
96 if (ret) { \
97 abort(); \
98 } \
99 } while (0)
101 #define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
102 do { \
103 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, time), &reg); \
104 if (ret) { \
105 abort(); \
107 } while (0)
109 static int kvm_riscv_get_regs_core(CPUState *cs)
111 int ret = 0;
112 int i;
113 target_ulong reg;
114 CPURISCVState *env = &RISCV_CPU(cs)->env;
116 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
117 if (ret) {
118 return ret;
120 env->pc = reg;
122 for (i = 1; i < 32; i++) {
123 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
124 ret = kvm_get_one_reg(cs, id, &reg);
125 if (ret) {
126 return ret;
128 env->gpr[i] = reg;
131 return ret;
134 static int kvm_riscv_put_regs_core(CPUState *cs)
136 int ret = 0;
137 int i;
138 target_ulong reg;
139 CPURISCVState *env = &RISCV_CPU(cs)->env;
141 reg = env->pc;
142 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
143 if (ret) {
144 return ret;
147 for (i = 1; i < 32; i++) {
148 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
149 reg = env->gpr[i];
150 ret = kvm_set_one_reg(cs, id, &reg);
151 if (ret) {
152 return ret;
156 return ret;
159 static int kvm_riscv_get_regs_csr(CPUState *cs)
161 int ret = 0;
162 CPURISCVState *env = &RISCV_CPU(cs)->env;
164 KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
165 KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
166 KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
167 KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
168 KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
169 KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
170 KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
171 KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
172 KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
173 return ret;
176 static int kvm_riscv_put_regs_csr(CPUState *cs)
178 int ret = 0;
179 CPURISCVState *env = &RISCV_CPU(cs)->env;
181 KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
182 KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
183 KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
184 KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
185 KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
186 KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
187 KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
188 KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
189 KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
191 return ret;
194 static int kvm_riscv_get_regs_fp(CPUState *cs)
196 int ret = 0;
197 int i;
198 CPURISCVState *env = &RISCV_CPU(cs)->env;
200 if (riscv_has_ext(env, RVD)) {
201 uint64_t reg;
202 for (i = 0; i < 32; i++) {
203 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
204 if (ret) {
205 return ret;
207 env->fpr[i] = reg;
209 return ret;
212 if (riscv_has_ext(env, RVF)) {
213 uint32_t reg;
214 for (i = 0; i < 32; i++) {
215 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
216 if (ret) {
217 return ret;
219 env->fpr[i] = reg;
221 return ret;
224 return ret;
227 static int kvm_riscv_put_regs_fp(CPUState *cs)
229 int ret = 0;
230 int i;
231 CPURISCVState *env = &RISCV_CPU(cs)->env;
233 if (riscv_has_ext(env, RVD)) {
234 uint64_t reg;
235 for (i = 0; i < 32; i++) {
236 reg = env->fpr[i];
237 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
238 if (ret) {
239 return ret;
242 return ret;
245 if (riscv_has_ext(env, RVF)) {
246 uint32_t reg;
247 for (i = 0; i < 32; i++) {
248 reg = env->fpr[i];
249 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
250 if (ret) {
251 return ret;
254 return ret;
257 return ret;
260 static void kvm_riscv_get_regs_timer(CPUState *cs)
262 CPURISCVState *env = &RISCV_CPU(cs)->env;
264 if (env->kvm_timer_dirty) {
265 return;
268 KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time);
269 KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare);
270 KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state);
271 KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency);
273 env->kvm_timer_dirty = true;
276 static void kvm_riscv_put_regs_timer(CPUState *cs)
278 uint64_t reg;
279 CPURISCVState *env = &RISCV_CPU(cs)->env;
281 if (!env->kvm_timer_dirty) {
282 return;
285 KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time);
286 KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare);
289 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
290 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
291 * doesn't matter that adaping in QEMU now.
292 * TODO If KVM changes, adapt here.
294 if (env->kvm_timer_state) {
295 KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state);
299 * For now, migration will not work between Hosts with different timer
300 * frequency. Therefore, we should check whether they are the same here
301 * during the migration.
303 if (migration_is_running(migrate_get_current()->state)) {
304 KVM_RISCV_GET_TIMER(cs, env, frequency, reg);
305 if (reg != env->kvm_timer_frequency) {
306 error_report("Dst Hosts timer frequency != Src Hosts");
310 env->kvm_timer_dirty = false;
313 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
314 KVM_CAP_LAST_INFO
317 int kvm_arch_get_registers(CPUState *cs)
319 int ret = 0;
321 ret = kvm_riscv_get_regs_core(cs);
322 if (ret) {
323 return ret;
326 ret = kvm_riscv_get_regs_csr(cs);
327 if (ret) {
328 return ret;
331 ret = kvm_riscv_get_regs_fp(cs);
332 if (ret) {
333 return ret;
336 return ret;
339 int kvm_arch_put_registers(CPUState *cs, int level)
341 int ret = 0;
343 ret = kvm_riscv_put_regs_core(cs);
344 if (ret) {
345 return ret;
348 ret = kvm_riscv_put_regs_csr(cs);
349 if (ret) {
350 return ret;
353 ret = kvm_riscv_put_regs_fp(cs);
354 if (ret) {
355 return ret;
358 return ret;
361 int kvm_arch_release_virq_post(int virq)
363 return 0;
366 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
367 uint64_t address, uint32_t data, PCIDevice *dev)
369 return 0;
372 int kvm_arch_destroy_vcpu(CPUState *cs)
374 return 0;
377 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
379 return cpu->cpu_index;
382 static void kvm_riscv_vm_state_change(void *opaque, bool running,
383 RunState state)
385 CPUState *cs = opaque;
387 if (running) {
388 kvm_riscv_put_regs_timer(cs);
389 } else {
390 kvm_riscv_get_regs_timer(cs);
394 void kvm_arch_init_irq_routing(KVMState *s)
398 int kvm_arch_init_vcpu(CPUState *cs)
400 int ret = 0;
401 target_ulong isa;
402 RISCVCPU *cpu = RISCV_CPU(cs);
403 CPURISCVState *env = &cpu->env;
404 uint64_t id;
406 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
408 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
409 KVM_REG_RISCV_CONFIG_REG(isa));
410 ret = kvm_get_one_reg(cs, id, &isa);
411 if (ret) {
412 return ret;
414 env->misa_ext = isa;
416 return ret;
419 int kvm_arch_msi_data_to_gsi(uint32_t data)
421 abort();
424 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
425 int vector, PCIDevice *dev)
427 return 0;
430 int kvm_arch_init(MachineState *ms, KVMState *s)
432 return 0;
435 int kvm_arch_irqchip_create(KVMState *s)
437 return 0;
440 int kvm_arch_process_async_events(CPUState *cs)
442 return 0;
445 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
449 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
451 return MEMTXATTRS_UNSPECIFIED;
454 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
456 return true;
459 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
461 int ret = 0;
462 unsigned char ch;
463 switch (run->riscv_sbi.extension_id) {
464 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
465 ch = run->riscv_sbi.args[0];
466 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
467 break;
468 case SBI_EXT_0_1_CONSOLE_GETCHAR:
469 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
470 if (ret == sizeof(ch)) {
471 run->riscv_sbi.args[0] = ch;
472 } else {
473 run->riscv_sbi.args[0] = -1;
475 break;
476 default:
477 qemu_log_mask(LOG_UNIMP,
478 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
479 __func__, run->riscv_sbi.extension_id);
480 ret = -1;
481 break;
483 return ret;
486 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
488 int ret = 0;
489 switch (run->exit_reason) {
490 case KVM_EXIT_RISCV_SBI:
491 ret = kvm_riscv_handle_sbi(cs, run);
492 break;
493 default:
494 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
495 __func__, run->exit_reason);
496 ret = -1;
497 break;
499 return ret;
502 void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
504 CPURISCVState *env = &cpu->env;
506 if (!kvm_enabled()) {
507 return;
509 env->pc = cpu->env.kernel_addr;
510 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
511 env->gpr[11] = cpu->env.fdt_addr; /* a1 */
512 env->satp = 0;
515 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
517 int ret;
518 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
520 if (irq != IRQ_S_EXT) {
521 perror("kvm riscv set irq != IRQ_S_EXT\n");
522 abort();
525 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
526 if (ret < 0) {
527 perror("Set irq failed");
528 abort();
532 bool kvm_arch_cpu_check_are_resettable(void)
534 return true;