Currently trying to turn an oversized directory into a VVFAT image will
[qemu/mini2440/sniper_sniper_test.git] / kvm-all.c
blobc3a46e2438b40ef9dd29736ef32fca721661d23b
1 /*
2 * QEMU KVM support
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include <sys/types.h>
15 #include <sys/ioctl.h>
16 #include <sys/mman.h>
18 #include <linux/kvm.h>
20 #include "qemu-common.h"
21 #include "sysemu.h"
22 #include "kvm.h"
24 //#define DEBUG_KVM
26 #ifdef DEBUG_KVM
27 #define dprintf(fmt, ...) \
28 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
29 #else
30 #define dprintf(fmt, ...) \
31 do { } while (0)
32 #endif
34 typedef struct kvm_userspace_memory_region KVMSlot;
36 int kvm_allowed = 0;
38 struct KVMState
40 KVMSlot slots[32];
41 int fd;
42 int vmfd;
45 static KVMState *kvm_state;
47 static KVMSlot *kvm_alloc_slot(KVMState *s)
49 int i;
51 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
52 if (s->slots[i].memory_size == 0)
53 return &s->slots[i];
56 return NULL;
59 static KVMSlot *kvm_lookup_slot(KVMState *s, target_phys_addr_t start_addr)
61 int i;
63 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
64 KVMSlot *mem = &s->slots[i];
66 if (start_addr >= mem->guest_phys_addr &&
67 start_addr < (mem->guest_phys_addr + mem->memory_size))
68 return mem;
71 return NULL;
74 int kvm_init_vcpu(CPUState *env)
76 KVMState *s = kvm_state;
77 long mmap_size;
78 int ret;
80 dprintf("kvm_init_vcpu\n");
82 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU,
83 (void *)(unsigned long)env->cpu_index);
84 if (ret < 0) {
85 dprintf("kvm_create_vcpu failed\n");
86 goto err;
89 env->kvm_fd = ret;
90 env->kvm_state = s;
92 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
93 if (mmap_size < 0) {
94 dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
95 goto err;
98 env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
99 env->kvm_fd, 0);
100 if (env->kvm_run == MAP_FAILED) {
101 ret = -errno;
102 dprintf("mmap'ing vcpu state failed\n");
103 goto err;
106 ret = kvm_arch_init_vcpu(env);
108 err:
109 return ret;
112 int kvm_init(int smp_cpus)
114 KVMState *s;
115 int ret;
116 int i;
118 if (smp_cpus > 1)
119 return -EINVAL;
121 s = qemu_mallocz(sizeof(KVMState));
122 if (s == NULL)
123 return -ENOMEM;
125 for (i = 0; i < ARRAY_SIZE(s->slots); i++)
126 s->slots[i].slot = i;
128 s->vmfd = -1;
129 s->fd = open("/dev/kvm", O_RDWR);
130 if (s->fd == -1) {
131 fprintf(stderr, "Could not access KVM kernel module: %m\n");
132 ret = -errno;
133 goto err;
136 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
137 if (ret < KVM_API_VERSION) {
138 if (ret > 0)
139 ret = -EINVAL;
140 fprintf(stderr, "kvm version too old\n");
141 goto err;
144 if (ret > KVM_API_VERSION) {
145 ret = -EINVAL;
146 fprintf(stderr, "kvm version not supported\n");
147 goto err;
150 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
151 if (s->vmfd < 0)
152 goto err;
154 /* initially, KVM allocated its own memory and we had to jump through
155 * hooks to make phys_ram_base point to this. Modern versions of KVM
156 * just use a user allocated buffer so we can use phys_ram_base
157 * unmodified. Make sure we have a sufficiently modern version of KVM.
159 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY);
160 if (ret <= 0) {
161 if (ret == 0)
162 ret = -EINVAL;
163 fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n");
164 goto err;
167 ret = kvm_arch_init(s, smp_cpus);
168 if (ret < 0)
169 goto err;
171 kvm_state = s;
173 return 0;
175 err:
176 if (s) {
177 if (s->vmfd != -1)
178 close(s->vmfd);
179 if (s->fd != -1)
180 close(s->fd);
182 qemu_free(s);
184 return ret;
187 static int kvm_handle_io(CPUState *env, uint16_t port, void *data,
188 int direction, int size, uint32_t count)
190 int i;
191 uint8_t *ptr = data;
193 for (i = 0; i < count; i++) {
194 if (direction == KVM_EXIT_IO_IN) {
195 switch (size) {
196 case 1:
197 stb_p(ptr, cpu_inb(env, port));
198 break;
199 case 2:
200 stw_p(ptr, cpu_inw(env, port));
201 break;
202 case 4:
203 stl_p(ptr, cpu_inl(env, port));
204 break;
206 } else {
207 switch (size) {
208 case 1:
209 cpu_outb(env, port, ldub_p(ptr));
210 break;
211 case 2:
212 cpu_outw(env, port, lduw_p(ptr));
213 break;
214 case 4:
215 cpu_outl(env, port, ldl_p(ptr));
216 break;
220 ptr += size;
223 return 1;
226 int kvm_cpu_exec(CPUState *env)
228 struct kvm_run *run = env->kvm_run;
229 int ret;
231 dprintf("kvm_cpu_exec()\n");
233 do {
234 kvm_arch_pre_run(env, run);
236 if ((env->interrupt_request & CPU_INTERRUPT_EXIT)) {
237 dprintf("interrupt exit requested\n");
238 ret = 0;
239 break;
242 ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
243 kvm_arch_post_run(env, run);
245 if (ret == -EINTR || ret == -EAGAIN) {
246 dprintf("io window exit\n");
247 ret = 0;
248 break;
251 if (ret < 0) {
252 dprintf("kvm run failed %s\n", strerror(-ret));
253 abort();
256 ret = 0; /* exit loop */
257 switch (run->exit_reason) {
258 case KVM_EXIT_IO:
259 dprintf("handle_io\n");
260 ret = kvm_handle_io(env, run->io.port,
261 (uint8_t *)run + run->io.data_offset,
262 run->io.direction,
263 run->io.size,
264 run->io.count);
265 break;
266 case KVM_EXIT_MMIO:
267 dprintf("handle_mmio\n");
268 cpu_physical_memory_rw(run->mmio.phys_addr,
269 run->mmio.data,
270 run->mmio.len,
271 run->mmio.is_write);
272 ret = 1;
273 break;
274 case KVM_EXIT_IRQ_WINDOW_OPEN:
275 dprintf("irq_window_open\n");
276 break;
277 case KVM_EXIT_SHUTDOWN:
278 dprintf("shutdown\n");
279 qemu_system_reset_request();
280 ret = 1;
281 break;
282 case KVM_EXIT_UNKNOWN:
283 dprintf("kvm_exit_unknown\n");
284 break;
285 case KVM_EXIT_FAIL_ENTRY:
286 dprintf("kvm_exit_fail_entry\n");
287 break;
288 case KVM_EXIT_EXCEPTION:
289 dprintf("kvm_exit_exception\n");
290 break;
291 case KVM_EXIT_DEBUG:
292 dprintf("kvm_exit_debug\n");
293 break;
294 default:
295 dprintf("kvm_arch_handle_exit\n");
296 ret = kvm_arch_handle_exit(env, run);
297 break;
299 } while (ret > 0);
301 return ret;
304 void kvm_set_phys_mem(target_phys_addr_t start_addr,
305 ram_addr_t size,
306 ram_addr_t phys_offset)
308 KVMState *s = kvm_state;
309 ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
310 KVMSlot *mem;
312 /* KVM does not support read-only slots */
313 phys_offset &= ~IO_MEM_ROM;
315 mem = kvm_lookup_slot(s, start_addr);
316 if (mem) {
317 if (flags == IO_MEM_UNASSIGNED) {
318 mem->memory_size = 0;
319 mem->guest_phys_addr = start_addr;
320 mem->userspace_addr = 0;
321 mem->flags = 0;
323 kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, mem);
324 } else if (start_addr >= mem->guest_phys_addr &&
325 (start_addr + size) <= (mem->guest_phys_addr + mem->memory_size))
326 return;
329 /* KVM does not need to know about this memory */
330 if (flags >= IO_MEM_UNASSIGNED)
331 return;
333 mem = kvm_alloc_slot(s);
334 mem->memory_size = size;
335 mem->guest_phys_addr = start_addr;
336 mem->userspace_addr = (unsigned long)(phys_ram_base + phys_offset);
337 mem->flags = 0;
339 kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, mem);
340 /* FIXME deal with errors */
343 int kvm_ioctl(KVMState *s, int type, void *data)
345 int ret;
347 ret = ioctl(s->fd, type, data);
348 if (ret == -1)
349 ret = -errno;
351 return ret;
354 int kvm_vm_ioctl(KVMState *s, int type, void *data)
356 int ret;
358 ret = ioctl(s->vmfd, type, data);
359 if (ret == -1)
360 ret = -errno;
362 return ret;
365 int kvm_vcpu_ioctl(CPUState *env, int type, void *data)
367 int ret;
369 ret = ioctl(env->kvm_fd, type, data);
370 if (ret == -1)
371 ret = -errno;
373 return ret;