4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include <sys/types.h>
15 #include <sys/ioctl.h>
18 #include <linux/kvm.h>
20 #include "qemu-common.h"
27 #define dprintf(fmt, ...) \
28 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
30 #define dprintf(fmt, ...) \
34 typedef struct kvm_userspace_memory_region KVMSlot
;
45 static KVMState
*kvm_state
;
47 static KVMSlot
*kvm_alloc_slot(KVMState
*s
)
51 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
52 if (s
->slots
[i
].memory_size
== 0)
59 static KVMSlot
*kvm_lookup_slot(KVMState
*s
, target_phys_addr_t start_addr
)
63 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
64 KVMSlot
*mem
= &s
->slots
[i
];
66 if (start_addr
>= mem
->guest_phys_addr
&&
67 start_addr
< (mem
->guest_phys_addr
+ mem
->memory_size
))
74 int kvm_init_vcpu(CPUState
*env
)
76 KVMState
*s
= kvm_state
;
80 dprintf("kvm_init_vcpu\n");
82 ret
= kvm_vm_ioctl(s
, KVM_CREATE_VCPU
,
83 (void *)(unsigned long)env
->cpu_index
);
85 dprintf("kvm_create_vcpu failed\n");
92 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
94 dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
98 env
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
100 if (env
->kvm_run
== MAP_FAILED
) {
102 dprintf("mmap'ing vcpu state failed\n");
106 ret
= kvm_arch_init_vcpu(env
);
112 int kvm_init(int smp_cpus
)
121 s
= qemu_mallocz(sizeof(KVMState
));
125 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++)
126 s
->slots
[i
].slot
= i
;
129 s
->fd
= open("/dev/kvm", O_RDWR
);
131 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
136 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
137 if (ret
< KVM_API_VERSION
) {
140 fprintf(stderr
, "kvm version too old\n");
144 if (ret
> KVM_API_VERSION
) {
146 fprintf(stderr
, "kvm version not supported\n");
150 s
->vmfd
= kvm_ioctl(s
, KVM_CREATE_VM
, 0);
154 /* initially, KVM allocated its own memory and we had to jump through
155 * hooks to make phys_ram_base point to this. Modern versions of KVM
156 * just use a user allocated buffer so we can use phys_ram_base
157 * unmodified. Make sure we have a sufficiently modern version of KVM.
159 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, (void *)KVM_CAP_USER_MEMORY
);
163 fprintf(stderr
, "kvm does not support KVM_CAP_USER_MEMORY\n");
167 ret
= kvm_arch_init(s
, smp_cpus
);
187 static int kvm_handle_io(CPUState
*env
, uint16_t port
, void *data
,
188 int direction
, int size
, uint32_t count
)
193 for (i
= 0; i
< count
; i
++) {
194 if (direction
== KVM_EXIT_IO_IN
) {
197 stb_p(ptr
, cpu_inb(env
, port
));
200 stw_p(ptr
, cpu_inw(env
, port
));
203 stl_p(ptr
, cpu_inl(env
, port
));
209 cpu_outb(env
, port
, ldub_p(ptr
));
212 cpu_outw(env
, port
, lduw_p(ptr
));
215 cpu_outl(env
, port
, ldl_p(ptr
));
226 int kvm_cpu_exec(CPUState
*env
)
228 struct kvm_run
*run
= env
->kvm_run
;
231 dprintf("kvm_cpu_exec()\n");
234 kvm_arch_pre_run(env
, run
);
236 if ((env
->interrupt_request
& CPU_INTERRUPT_EXIT
)) {
237 dprintf("interrupt exit requested\n");
242 ret
= kvm_vcpu_ioctl(env
, KVM_RUN
, 0);
243 kvm_arch_post_run(env
, run
);
245 if (ret
== -EINTR
|| ret
== -EAGAIN
) {
246 dprintf("io window exit\n");
252 dprintf("kvm run failed %s\n", strerror(-ret
));
256 ret
= 0; /* exit loop */
257 switch (run
->exit_reason
) {
259 dprintf("handle_io\n");
260 ret
= kvm_handle_io(env
, run
->io
.port
,
261 (uint8_t *)run
+ run
->io
.data_offset
,
267 dprintf("handle_mmio\n");
268 cpu_physical_memory_rw(run
->mmio
.phys_addr
,
274 case KVM_EXIT_IRQ_WINDOW_OPEN
:
275 dprintf("irq_window_open\n");
277 case KVM_EXIT_SHUTDOWN
:
278 dprintf("shutdown\n");
279 qemu_system_reset_request();
282 case KVM_EXIT_UNKNOWN
:
283 dprintf("kvm_exit_unknown\n");
285 case KVM_EXIT_FAIL_ENTRY
:
286 dprintf("kvm_exit_fail_entry\n");
288 case KVM_EXIT_EXCEPTION
:
289 dprintf("kvm_exit_exception\n");
292 dprintf("kvm_exit_debug\n");
295 dprintf("kvm_arch_handle_exit\n");
296 ret
= kvm_arch_handle_exit(env
, run
);
304 void kvm_set_phys_mem(target_phys_addr_t start_addr
,
306 ram_addr_t phys_offset
)
308 KVMState
*s
= kvm_state
;
309 ram_addr_t flags
= phys_offset
& ~TARGET_PAGE_MASK
;
312 /* KVM does not support read-only slots */
313 phys_offset
&= ~IO_MEM_ROM
;
315 mem
= kvm_lookup_slot(s
, start_addr
);
317 if (flags
== IO_MEM_UNASSIGNED
) {
318 mem
->memory_size
= 0;
319 mem
->guest_phys_addr
= start_addr
;
320 mem
->userspace_addr
= 0;
323 kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, mem
);
324 } else if (start_addr
>= mem
->guest_phys_addr
&&
325 (start_addr
+ size
) <= (mem
->guest_phys_addr
+ mem
->memory_size
))
329 /* KVM does not need to know about this memory */
330 if (flags
>= IO_MEM_UNASSIGNED
)
333 mem
= kvm_alloc_slot(s
);
334 mem
->memory_size
= size
;
335 mem
->guest_phys_addr
= start_addr
;
336 mem
->userspace_addr
= (unsigned long)(phys_ram_base
+ phys_offset
);
339 kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, mem
);
340 /* FIXME deal with errors */
343 int kvm_ioctl(KVMState
*s
, int type
, void *data
)
347 ret
= ioctl(s
->fd
, type
, data
);
354 int kvm_vm_ioctl(KVMState
*s
, int type
, void *data
)
358 ret
= ioctl(s
->vmfd
, type
, data
);
365 int kvm_vcpu_ioctl(CPUState
*env
, int type
, void *data
)
369 ret
= ioctl(env
->kvm_fd
, type
, data
);