tests/docker: Add flex/bison to `debian-all-test`
[qemu/ar7.git] / target / riscv / kvm.c
blob30f21453d69ca0aa71737dcaa9b27d84aa4d6713
1 /*
2 * RISC-V implementation of KVM hooks
4 * Copyright (c) 2020 Huawei Technologies Co., Ltd
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include <sys/ioctl.h>
22 #include <linux/kvm.h>
24 #include "qemu/timer.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "sysemu/sysemu.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/kvm_int.h"
30 #include "cpu.h"
31 #include "trace.h"
32 #include "hw/pci/pci.h"
33 #include "exec/memattrs.h"
34 #include "exec/address-spaces.h"
35 #include "hw/boards.h"
36 #include "hw/irq.h"
37 #include "qemu/log.h"
38 #include "hw/loader.h"
39 #include "kvm_riscv.h"
40 #include "sbi_ecall_interface.h"
41 #include "chardev/char-fe.h"
42 #include "migration/migration.h"
43 #include "sysemu/runstate.h"
45 static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
46 uint64_t idx)
48 uint64_t id = KVM_REG_RISCV | type | idx;
50 switch (riscv_cpu_mxl(env)) {
51 case MXL_RV32:
52 id |= KVM_REG_SIZE_U32;
53 break;
54 case MXL_RV64:
55 id |= KVM_REG_SIZE_U64;
56 break;
57 default:
58 g_assert_not_reached();
60 return id;
63 #define RISCV_CORE_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, \
64 KVM_REG_RISCV_CORE_REG(name))
66 #define RISCV_CSR_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_CSR, \
67 KVM_REG_RISCV_CSR_REG(name))
69 #define RISCV_TIMER_REG(env, name) kvm_riscv_reg_id(env, KVM_REG_RISCV_TIMER, \
70 KVM_REG_RISCV_TIMER_REG(name))
72 #define RISCV_FP_F_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_F, idx)
74 #define RISCV_FP_D_REG(env, idx) kvm_riscv_reg_id(env, KVM_REG_RISCV_FP_D, idx)
76 #define KVM_RISCV_GET_CSR(cs, env, csr, reg) \
77 do { \
78 int ret = kvm_get_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
79 if (ret) { \
80 return ret; \
81 } \
82 } while (0)
84 #define KVM_RISCV_SET_CSR(cs, env, csr, reg) \
85 do { \
86 int ret = kvm_set_one_reg(cs, RISCV_CSR_REG(env, csr), &reg); \
87 if (ret) { \
88 return ret; \
89 } \
90 } while (0)
92 #define KVM_RISCV_GET_TIMER(cs, env, name, reg) \
93 do { \
94 int ret = kvm_get_one_reg(cs, RISCV_TIMER_REG(env, name), &reg); \
95 if (ret) { \
96 abort(); \
97 } \
98 } while (0)
100 #define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
101 do { \
102 int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, time), &reg); \
103 if (ret) { \
104 abort(); \
106 } while (0)
108 static int kvm_riscv_get_regs_core(CPUState *cs)
110 int ret = 0;
111 int i;
112 target_ulong reg;
113 CPURISCVState *env = &RISCV_CPU(cs)->env;
115 ret = kvm_get_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
116 if (ret) {
117 return ret;
119 env->pc = reg;
121 for (i = 1; i < 32; i++) {
122 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
123 ret = kvm_get_one_reg(cs, id, &reg);
124 if (ret) {
125 return ret;
127 env->gpr[i] = reg;
130 return ret;
133 static int kvm_riscv_put_regs_core(CPUState *cs)
135 int ret = 0;
136 int i;
137 target_ulong reg;
138 CPURISCVState *env = &RISCV_CPU(cs)->env;
140 reg = env->pc;
141 ret = kvm_set_one_reg(cs, RISCV_CORE_REG(env, regs.pc), &reg);
142 if (ret) {
143 return ret;
146 for (i = 1; i < 32; i++) {
147 uint64_t id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CORE, i);
148 reg = env->gpr[i];
149 ret = kvm_set_one_reg(cs, id, &reg);
150 if (ret) {
151 return ret;
155 return ret;
158 static int kvm_riscv_get_regs_csr(CPUState *cs)
160 int ret = 0;
161 CPURISCVState *env = &RISCV_CPU(cs)->env;
163 KVM_RISCV_GET_CSR(cs, env, sstatus, env->mstatus);
164 KVM_RISCV_GET_CSR(cs, env, sie, env->mie);
165 KVM_RISCV_GET_CSR(cs, env, stvec, env->stvec);
166 KVM_RISCV_GET_CSR(cs, env, sscratch, env->sscratch);
167 KVM_RISCV_GET_CSR(cs, env, sepc, env->sepc);
168 KVM_RISCV_GET_CSR(cs, env, scause, env->scause);
169 KVM_RISCV_GET_CSR(cs, env, stval, env->stval);
170 KVM_RISCV_GET_CSR(cs, env, sip, env->mip);
171 KVM_RISCV_GET_CSR(cs, env, satp, env->satp);
172 return ret;
175 static int kvm_riscv_put_regs_csr(CPUState *cs)
177 int ret = 0;
178 CPURISCVState *env = &RISCV_CPU(cs)->env;
180 KVM_RISCV_SET_CSR(cs, env, sstatus, env->mstatus);
181 KVM_RISCV_SET_CSR(cs, env, sie, env->mie);
182 KVM_RISCV_SET_CSR(cs, env, stvec, env->stvec);
183 KVM_RISCV_SET_CSR(cs, env, sscratch, env->sscratch);
184 KVM_RISCV_SET_CSR(cs, env, sepc, env->sepc);
185 KVM_RISCV_SET_CSR(cs, env, scause, env->scause);
186 KVM_RISCV_SET_CSR(cs, env, stval, env->stval);
187 KVM_RISCV_SET_CSR(cs, env, sip, env->mip);
188 KVM_RISCV_SET_CSR(cs, env, satp, env->satp);
190 return ret;
193 static int kvm_riscv_get_regs_fp(CPUState *cs)
195 int ret = 0;
196 int i;
197 CPURISCVState *env = &RISCV_CPU(cs)->env;
199 if (riscv_has_ext(env, RVD)) {
200 uint64_t reg;
201 for (i = 0; i < 32; i++) {
202 ret = kvm_get_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
203 if (ret) {
204 return ret;
206 env->fpr[i] = reg;
208 return ret;
211 if (riscv_has_ext(env, RVF)) {
212 uint32_t reg;
213 for (i = 0; i < 32; i++) {
214 ret = kvm_get_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
215 if (ret) {
216 return ret;
218 env->fpr[i] = reg;
220 return ret;
223 return ret;
226 static int kvm_riscv_put_regs_fp(CPUState *cs)
228 int ret = 0;
229 int i;
230 CPURISCVState *env = &RISCV_CPU(cs)->env;
232 if (riscv_has_ext(env, RVD)) {
233 uint64_t reg;
234 for (i = 0; i < 32; i++) {
235 reg = env->fpr[i];
236 ret = kvm_set_one_reg(cs, RISCV_FP_D_REG(env, i), &reg);
237 if (ret) {
238 return ret;
241 return ret;
244 if (riscv_has_ext(env, RVF)) {
245 uint32_t reg;
246 for (i = 0; i < 32; i++) {
247 reg = env->fpr[i];
248 ret = kvm_set_one_reg(cs, RISCV_FP_F_REG(env, i), &reg);
249 if (ret) {
250 return ret;
253 return ret;
256 return ret;
259 static void kvm_riscv_get_regs_timer(CPUState *cs)
261 CPURISCVState *env = &RISCV_CPU(cs)->env;
263 if (env->kvm_timer_dirty) {
264 return;
267 KVM_RISCV_GET_TIMER(cs, env, time, env->kvm_timer_time);
268 KVM_RISCV_GET_TIMER(cs, env, compare, env->kvm_timer_compare);
269 KVM_RISCV_GET_TIMER(cs, env, state, env->kvm_timer_state);
270 KVM_RISCV_GET_TIMER(cs, env, frequency, env->kvm_timer_frequency);
272 env->kvm_timer_dirty = true;
275 static void kvm_riscv_put_regs_timer(CPUState *cs)
277 uint64_t reg;
278 CPURISCVState *env = &RISCV_CPU(cs)->env;
280 if (!env->kvm_timer_dirty) {
281 return;
284 KVM_RISCV_SET_TIMER(cs, env, time, env->kvm_timer_time);
285 KVM_RISCV_SET_TIMER(cs, env, compare, env->kvm_timer_compare);
288 * To set register of RISCV_TIMER_REG(state) will occur a error from KVM
289 * on env->kvm_timer_state == 0, It's better to adapt in KVM, but it
290 * doesn't matter that adaping in QEMU now.
291 * TODO If KVM changes, adapt here.
293 if (env->kvm_timer_state) {
294 KVM_RISCV_SET_TIMER(cs, env, state, env->kvm_timer_state);
298 * For now, migration will not work between Hosts with different timer
299 * frequency. Therefore, we should check whether they are the same here
300 * during the migration.
302 if (migration_is_running(migrate_get_current()->state)) {
303 KVM_RISCV_GET_TIMER(cs, env, frequency, reg);
304 if (reg != env->kvm_timer_frequency) {
305 error_report("Dst Hosts timer frequency != Src Hosts");
309 env->kvm_timer_dirty = false;
312 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
313 KVM_CAP_LAST_INFO
316 int kvm_arch_get_registers(CPUState *cs)
318 int ret = 0;
320 ret = kvm_riscv_get_regs_core(cs);
321 if (ret) {
322 return ret;
325 ret = kvm_riscv_get_regs_csr(cs);
326 if (ret) {
327 return ret;
330 ret = kvm_riscv_get_regs_fp(cs);
331 if (ret) {
332 return ret;
335 return ret;
338 int kvm_arch_put_registers(CPUState *cs, int level)
340 int ret = 0;
342 ret = kvm_riscv_put_regs_core(cs);
343 if (ret) {
344 return ret;
347 ret = kvm_riscv_put_regs_csr(cs);
348 if (ret) {
349 return ret;
352 ret = kvm_riscv_put_regs_fp(cs);
353 if (ret) {
354 return ret;
357 return ret;
360 int kvm_arch_release_virq_post(int virq)
362 return 0;
365 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
366 uint64_t address, uint32_t data, PCIDevice *dev)
368 return 0;
371 int kvm_arch_destroy_vcpu(CPUState *cs)
373 return 0;
376 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
378 return cpu->cpu_index;
381 static void kvm_riscv_vm_state_change(void *opaque, bool running,
382 RunState state)
384 CPUState *cs = opaque;
386 if (running) {
387 kvm_riscv_put_regs_timer(cs);
388 } else {
389 kvm_riscv_get_regs_timer(cs);
393 void kvm_arch_init_irq_routing(KVMState *s)
397 int kvm_arch_init_vcpu(CPUState *cs)
399 int ret = 0;
400 target_ulong isa;
401 RISCVCPU *cpu = RISCV_CPU(cs);
402 CPURISCVState *env = &cpu->env;
403 uint64_t id;
405 qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
407 id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
408 KVM_REG_RISCV_CONFIG_REG(isa));
409 ret = kvm_get_one_reg(cs, id, &isa);
410 if (ret) {
411 return ret;
413 env->misa_ext = isa;
415 return ret;
418 int kvm_arch_msi_data_to_gsi(uint32_t data)
420 abort();
423 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
424 int vector, PCIDevice *dev)
426 return 0;
429 int kvm_arch_init(MachineState *ms, KVMState *s)
431 return 0;
434 int kvm_arch_irqchip_create(KVMState *s)
436 return 0;
439 int kvm_arch_process_async_events(CPUState *cs)
441 return 0;
444 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
448 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
450 return MEMTXATTRS_UNSPECIFIED;
453 bool kvm_arch_stop_on_emulation_error(CPUState *cs)
455 return true;
458 static int kvm_riscv_handle_sbi(CPUState *cs, struct kvm_run *run)
460 int ret = 0;
461 unsigned char ch;
462 switch (run->riscv_sbi.extension_id) {
463 case SBI_EXT_0_1_CONSOLE_PUTCHAR:
464 ch = run->riscv_sbi.args[0];
465 qemu_chr_fe_write(serial_hd(0)->be, &ch, sizeof(ch));
466 break;
467 case SBI_EXT_0_1_CONSOLE_GETCHAR:
468 ret = qemu_chr_fe_read_all(serial_hd(0)->be, &ch, sizeof(ch));
469 if (ret == sizeof(ch)) {
470 run->riscv_sbi.args[0] = ch;
471 } else {
472 run->riscv_sbi.args[0] = -1;
474 break;
475 default:
476 qemu_log_mask(LOG_UNIMP,
477 "%s: un-handled SBI EXIT, specific reasons is %lu\n",
478 __func__, run->riscv_sbi.extension_id);
479 ret = -1;
480 break;
482 return ret;
485 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
487 int ret = 0;
488 switch (run->exit_reason) {
489 case KVM_EXIT_RISCV_SBI:
490 ret = kvm_riscv_handle_sbi(cs, run);
491 break;
492 default:
493 qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
494 __func__, run->exit_reason);
495 ret = -1;
496 break;
498 return ret;
501 void kvm_riscv_reset_vcpu(RISCVCPU *cpu)
503 CPURISCVState *env = &cpu->env;
505 if (!kvm_enabled()) {
506 return;
508 env->pc = cpu->env.kernel_addr;
509 env->gpr[10] = kvm_arch_vcpu_id(CPU(cpu)); /* a0 */
510 env->gpr[11] = cpu->env.fdt_addr; /* a1 */
511 env->satp = 0;
514 void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level)
516 int ret;
517 unsigned virq = level ? KVM_INTERRUPT_SET : KVM_INTERRUPT_UNSET;
519 if (irq != IRQ_S_EXT) {
520 perror("kvm riscv set irq != IRQ_S_EXT\n");
521 abort();
524 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
525 if (ret < 0) {
526 perror("Set irq failed");
527 abort();
531 bool kvm_arch_cpu_check_are_resettable(void)
533 return true;
536 void kvm_arch_accel_class_init(ObjectClass *oc)