kvm: external module: Hack DEFINE_SIMPLE_ATTRIBUTE for lost_records_get() also
[qemu-kvm/fedora.git] / qemu-kvm.c
blobcfdf90fc7ec42dd49cc04a73d54238acaa157fcc
1 /*
2 * qemu/kvm integration
4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
7 */
8 #include "config.h"
9 #include "config-host.h"
11 int kvm_allowed = 1;
12 int kvm_irqchip = 1;
13 int kvm_pit = 1;
15 #include <assert.h>
16 #include <string.h>
17 #include "hw/hw.h"
18 #include "sysemu.h"
19 #include "qemu-common.h"
20 #include "console.h"
21 #include "block.h"
22 #include "compatfd.h"
24 #include "qemu-kvm.h"
25 #include <libkvm.h>
26 #include <pthread.h>
27 #include <sys/utsname.h>
28 #include <sys/syscall.h>
29 #include <sys/mman.h>
31 #define bool _Bool
32 #define false 0
33 #define true 1
35 extern void perror(const char *s);
37 kvm_context_t kvm_context;
39 extern int smp_cpus;
41 pthread_mutex_t qemu_mutex = PTHREAD_MUTEX_INITIALIZER;
42 pthread_cond_t qemu_vcpu_cond = PTHREAD_COND_INITIALIZER;
43 pthread_cond_t qemu_system_cond = PTHREAD_COND_INITIALIZER;
44 pthread_cond_t qemu_pause_cond = PTHREAD_COND_INITIALIZER;
45 pthread_cond_t qemu_work_cond = PTHREAD_COND_INITIALIZER;
46 __thread struct vcpu_info *vcpu;
48 static int qemu_system_ready;
50 #define SIG_IPI (SIGRTMIN+4)
52 struct qemu_kvm_work_item {
53 struct qemu_kvm_work_item *next;
54 void (*func)(void *data);
55 void *data;
56 bool done;
59 struct vcpu_info {
60 CPUState *env;
61 int sipi_needed;
62 int init;
63 pthread_t thread;
64 int signalled;
65 int stop;
66 int stopped;
67 int created;
68 struct qemu_kvm_work_item *queued_work_first, *queued_work_last;
69 } vcpu_info[256];
71 pthread_t io_thread;
72 static int io_thread_fd = -1;
73 static int io_thread_sigfd = -1;
75 static int kvm_debug_stop_requested;
77 static inline unsigned long kvm_get_thread_id(void)
79 return syscall(SYS_gettid);
82 static void qemu_cond_wait(pthread_cond_t *cond)
84 CPUState *env = cpu_single_env;
85 static const struct timespec ts = {
86 .tv_sec = 0,
87 .tv_nsec = 100000,
90 pthread_cond_timedwait(cond, &qemu_mutex, &ts);
91 cpu_single_env = env;
94 CPUState *qemu_kvm_cpu_env(int index)
96 return vcpu_info[index].env;
99 static void sig_ipi_handler(int n)
103 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
105 struct vcpu_info *vi = &vcpu_info[env->cpu_index];
106 struct qemu_kvm_work_item wi;
108 if (vi == vcpu) {
109 func(data);
110 return;
113 wi.func = func;
114 wi.data = data;
115 if (!vi->queued_work_first)
116 vi->queued_work_first = &wi;
117 else
118 vi->queued_work_last->next = &wi;
119 vi->queued_work_last = &wi;
120 wi.next = NULL;
121 wi.done = false;
123 pthread_kill(vi->thread, SIG_IPI);
124 while (!wi.done)
125 qemu_cond_wait(&qemu_work_cond);
128 void kvm_update_interrupt_request(CPUState *env)
130 int signal = 0;
132 if (env) {
133 if (!vcpu)
134 signal = 1;
135 if (vcpu && env != vcpu->env && !vcpu_info[env->cpu_index].signalled)
136 signal = 1;
138 if (signal) {
139 vcpu_info[env->cpu_index].signalled = 1;
140 if (vcpu_info[env->cpu_index].thread)
141 pthread_kill(vcpu_info[env->cpu_index].thread, SIG_IPI);
146 void kvm_update_after_sipi(CPUState *env)
148 vcpu_info[env->cpu_index].sipi_needed = 1;
149 kvm_update_interrupt_request(env);
152 void kvm_apic_init(CPUState *env)
154 if (env->cpu_index != 0)
155 vcpu_info[env->cpu_index].init = 1;
156 kvm_update_interrupt_request(env);
159 #include <signal.h>
161 static int try_push_interrupts(void *opaque)
163 return kvm_arch_try_push_interrupts(opaque);
166 static void post_kvm_run(void *opaque, int vcpu)
169 pthread_mutex_lock(&qemu_mutex);
170 kvm_arch_post_kvm_run(opaque, vcpu);
173 static int pre_kvm_run(void *opaque, int vcpu)
175 CPUState *env = qemu_kvm_cpu_env(vcpu);
177 kvm_arch_pre_kvm_run(opaque, vcpu);
179 if (env->interrupt_request & CPU_INTERRUPT_EXIT)
180 return 1;
181 pthread_mutex_unlock(&qemu_mutex);
182 return 0;
185 static void kvm_do_load_registers(void *_env)
187 CPUState *env = _env;
189 kvm_arch_load_regs(env);
192 void kvm_load_registers(CPUState *env)
194 if (kvm_enabled() && qemu_system_ready)
195 on_vcpu(env, kvm_do_load_registers, env);
198 static void kvm_do_save_registers(void *_env)
200 CPUState *env = _env;
202 kvm_arch_save_regs(env);
205 void kvm_save_registers(CPUState *env)
207 if (kvm_enabled())
208 on_vcpu(env, kvm_do_save_registers, env);
211 int kvm_cpu_exec(CPUState *env)
213 int r;
215 r = kvm_run(kvm_context, env->cpu_index);
216 if (r < 0) {
217 printf("kvm_run returned %d\n", r);
218 exit(1);
221 return 0;
224 extern int vm_running;
226 static int has_work(CPUState *env)
228 if (!vm_running || (env && vcpu_info[env->cpu_index].stopped))
229 return 0;
230 if (!env->halted)
231 return 1;
232 return kvm_arch_has_work(env);
235 static void flush_queued_work(CPUState *env)
237 struct vcpu_info *vi = &vcpu_info[env->cpu_index];
238 struct qemu_kvm_work_item *wi;
240 if (!vi->queued_work_first)
241 return;
243 while ((wi = vi->queued_work_first)) {
244 vi->queued_work_first = wi->next;
245 wi->func(wi->data);
246 wi->done = true;
248 vi->queued_work_last = NULL;
249 pthread_cond_broadcast(&qemu_work_cond);
252 static void kvm_main_loop_wait(CPUState *env, int timeout)
254 struct timespec ts;
255 int r, e;
256 siginfo_t siginfo;
257 sigset_t waitset;
259 pthread_mutex_unlock(&qemu_mutex);
261 ts.tv_sec = timeout / 1000;
262 ts.tv_nsec = (timeout % 1000) * 1000000;
263 sigemptyset(&waitset);
264 sigaddset(&waitset, SIG_IPI);
266 r = sigtimedwait(&waitset, &siginfo, &ts);
267 e = errno;
269 pthread_mutex_lock(&qemu_mutex);
271 if (r == -1 && !(e == EAGAIN || e == EINTR)) {
272 printf("sigtimedwait: %s\n", strerror(e));
273 exit(1);
276 cpu_single_env = env;
277 flush_queued_work(env);
279 if (vcpu_info[env->cpu_index].stop) {
280 vcpu_info[env->cpu_index].stop = 0;
281 vcpu_info[env->cpu_index].stopped = 1;
282 pthread_cond_signal(&qemu_pause_cond);
285 vcpu_info[env->cpu_index].signalled = 0;
288 static int all_threads_paused(void)
290 int i;
292 for (i = 0; i < smp_cpus; ++i)
293 if (vcpu_info[i].stop)
294 return 0;
295 return 1;
298 static void pause_all_threads(void)
300 int i;
302 assert(!cpu_single_env);
304 for (i = 0; i < smp_cpus; ++i) {
305 vcpu_info[i].stop = 1;
306 pthread_kill(vcpu_info[i].thread, SIG_IPI);
308 while (!all_threads_paused())
309 qemu_cond_wait(&qemu_pause_cond);
312 static void resume_all_threads(void)
314 int i;
316 assert(!cpu_single_env);
318 for (i = 0; i < smp_cpus; ++i) {
319 vcpu_info[i].stop = 0;
320 vcpu_info[i].stopped = 0;
321 pthread_kill(vcpu_info[i].thread, SIG_IPI);
325 static void kvm_vm_state_change_handler(void *context, int running)
327 if (running)
328 resume_all_threads();
329 else
330 pause_all_threads();
333 static void update_regs_for_sipi(CPUState *env)
335 kvm_arch_update_regs_for_sipi(env);
336 vcpu_info[env->cpu_index].sipi_needed = 0;
337 vcpu_info[env->cpu_index].init = 0;
340 static void update_regs_for_init(CPUState *env)
342 cpu_reset(env);
343 kvm_arch_load_regs(env);
346 static void setup_kernel_sigmask(CPUState *env)
348 sigset_t set;
350 sigemptyset(&set);
351 sigaddset(&set, SIGUSR2);
352 sigaddset(&set, SIGIO);
353 sigaddset(&set, SIGALRM);
354 sigprocmask(SIG_BLOCK, &set, NULL);
356 sigprocmask(SIG_BLOCK, NULL, &set);
357 sigdelset(&set, SIG_IPI);
359 kvm_set_signal_mask(kvm_context, env->cpu_index, &set);
362 void qemu_kvm_system_reset(void)
364 int i;
366 pause_all_threads();
368 qemu_system_reset();
370 for (i = 0; i < smp_cpus; ++i)
371 kvm_arch_cpu_reset(vcpu_info[i].env);
373 resume_all_threads();
376 static int kvm_main_loop_cpu(CPUState *env)
378 struct vcpu_info *info = &vcpu_info[env->cpu_index];
380 setup_kernel_sigmask(env);
382 pthread_mutex_lock(&qemu_mutex);
383 if (kvm_irqchip_in_kernel(kvm_context))
384 env->halted = 0;
386 kvm_qemu_init_env(env);
387 #ifdef TARGET_I386
388 kvm_tpr_vcpu_start(env);
389 #endif
391 cpu_single_env = env;
392 kvm_load_registers(env);
394 while (1) {
395 while (!has_work(env))
396 kvm_main_loop_wait(env, 1000);
397 if (env->interrupt_request & CPU_INTERRUPT_HARD)
398 env->halted = 0;
399 if (!kvm_irqchip_in_kernel(kvm_context) && info->sipi_needed)
400 update_regs_for_sipi(env);
401 if (!kvm_irqchip_in_kernel(kvm_context) && info->init)
402 update_regs_for_init(env);
403 if (!env->halted && !info->init)
404 kvm_cpu_exec(env);
405 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
406 kvm_main_loop_wait(env, 0);
408 pthread_mutex_unlock(&qemu_mutex);
409 return 0;
412 static void *ap_main_loop(void *_env)
414 CPUState *env = _env;
415 sigset_t signals;
417 vcpu = &vcpu_info[env->cpu_index];
418 vcpu->env = env;
419 vcpu->env->thread_id = kvm_get_thread_id();
420 sigfillset(&signals);
421 sigprocmask(SIG_BLOCK, &signals, NULL);
422 kvm_create_vcpu(kvm_context, env->cpu_index);
423 kvm_qemu_init_env(env);
425 /* signal VCPU creation */
426 pthread_mutex_lock(&qemu_mutex);
427 vcpu->created = 1;
428 pthread_cond_signal(&qemu_vcpu_cond);
430 /* and wait for machine initialization */
431 while (!qemu_system_ready)
432 qemu_cond_wait(&qemu_system_cond);
433 pthread_mutex_unlock(&qemu_mutex);
435 kvm_main_loop_cpu(env);
436 return NULL;
439 void kvm_init_new_ap(int cpu, CPUState *env)
441 pthread_create(&vcpu_info[cpu].thread, NULL, ap_main_loop, env);
443 while (vcpu_info[cpu].created == 0)
444 qemu_cond_wait(&qemu_vcpu_cond);
447 int kvm_init_ap(void)
449 #ifdef TARGET_I386
450 kvm_tpr_opt_setup();
451 #endif
452 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler, NULL);
454 signal(SIG_IPI, sig_ipi_handler);
455 return 0;
458 void qemu_kvm_notify_work(void)
460 uint64_t value = 1;
461 char buffer[8];
462 size_t offset = 0;
464 if (io_thread_fd == -1)
465 return;
467 memcpy(buffer, &value, sizeof(value));
469 while (offset < 8) {
470 ssize_t len;
472 len = write(io_thread_fd, buffer + offset, 8 - offset);
473 if (len == -1 && errno == EINTR)
474 continue;
476 if (len <= 0)
477 break;
479 offset += len;
482 if (offset != 8)
483 fprintf(stderr, "failed to notify io thread\n");
486 /* If we have signalfd, we mask out the signals we want to handle and then
487 * use signalfd to listen for them. We rely on whatever the current signal
488 * handler is to dispatch the signals when we receive them.
491 static void sigfd_handler(void *opaque)
493 int fd = (unsigned long)opaque;
494 struct qemu_signalfd_siginfo info;
495 struct sigaction action;
496 ssize_t len;
498 while (1) {
499 do {
500 len = read(fd, &info, sizeof(info));
501 } while (len == -1 && errno == EINTR);
503 if (len == -1 && errno == EAGAIN)
504 break;
506 if (len != sizeof(info)) {
507 printf("read from sigfd returned %ld: %m\n", len);
508 return;
511 sigaction(info.ssi_signo, NULL, &action);
512 if (action.sa_handler)
513 action.sa_handler(info.ssi_signo);
518 /* Used to break IO thread out of select */
519 static void io_thread_wakeup(void *opaque)
521 int fd = (unsigned long)opaque;
522 char buffer[8];
523 size_t offset = 0;
525 while (offset < 8) {
526 ssize_t len;
528 len = read(fd, buffer + offset, 8 - offset);
529 if (len == -1 && errno == EINTR)
530 continue;
532 if (len <= 0)
533 break;
535 offset += len;
539 int kvm_main_loop(void)
541 int fds[2];
542 sigset_t mask;
543 int sigfd;
545 io_thread = pthread_self();
546 qemu_system_ready = 1;
548 if (kvm_eventfd(fds) == -1) {
549 fprintf(stderr, "failed to create eventfd\n");
550 return -errno;
553 qemu_set_fd_handler2(fds[0], NULL, io_thread_wakeup, NULL,
554 (void *)(unsigned long)fds[0]);
556 io_thread_fd = fds[1];
558 sigemptyset(&mask);
559 sigaddset(&mask, SIGIO);
560 sigaddset(&mask, SIGALRM);
561 sigprocmask(SIG_BLOCK, &mask, NULL);
563 sigfd = qemu_signalfd(&mask);
564 if (sigfd == -1) {
565 fprintf(stderr, "failed to create signalfd\n");
566 return -errno;
569 fcntl(sigfd, F_SETFL, O_NONBLOCK);
571 qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL,
572 (void *)(unsigned long)sigfd);
574 pthread_cond_broadcast(&qemu_system_cond);
576 io_thread_sigfd = sigfd;
577 cpu_single_env = NULL;
579 while (1) {
580 main_loop_wait(1000);
581 if (qemu_shutdown_requested())
582 break;
583 else if (qemu_powerdown_requested())
584 qemu_system_powerdown();
585 else if (qemu_reset_requested())
586 qemu_kvm_system_reset();
587 else if (kvm_debug_stop_requested) {
588 vm_stop(EXCP_DEBUG);
589 kvm_debug_stop_requested = 0;
593 pause_all_threads();
594 pthread_mutex_unlock(&qemu_mutex);
596 return 0;
599 static int kvm_debug(void *opaque, int vcpu)
601 kvm_debug_stop_requested = 1;
602 vcpu_info[vcpu].stopped = 1;
603 return 1;
606 static int kvm_inb(void *opaque, uint16_t addr, uint8_t *data)
608 *data = cpu_inb(0, addr);
609 return 0;
612 static int kvm_inw(void *opaque, uint16_t addr, uint16_t *data)
614 *data = cpu_inw(0, addr);
615 return 0;
618 static int kvm_inl(void *opaque, uint16_t addr, uint32_t *data)
620 *data = cpu_inl(0, addr);
621 return 0;
624 #define PM_IO_BASE 0xb000
626 static int kvm_outb(void *opaque, uint16_t addr, uint8_t data)
628 if (addr == 0xb2) {
629 switch (data) {
630 case 0: {
631 cpu_outb(0, 0xb3, 0);
632 break;
634 case 0xf0: {
635 unsigned x;
637 /* enable acpi */
638 x = cpu_inw(0, PM_IO_BASE + 4);
639 x &= ~1;
640 cpu_outw(0, PM_IO_BASE + 4, x);
641 break;
643 case 0xf1: {
644 unsigned x;
646 /* enable acpi */
647 x = cpu_inw(0, PM_IO_BASE + 4);
648 x |= 1;
649 cpu_outw(0, PM_IO_BASE + 4, x);
650 break;
652 default:
653 break;
655 return 0;
657 cpu_outb(0, addr, data);
658 return 0;
661 static int kvm_outw(void *opaque, uint16_t addr, uint16_t data)
663 cpu_outw(0, addr, data);
664 return 0;
667 static int kvm_outl(void *opaque, uint16_t addr, uint32_t data)
669 cpu_outl(0, addr, data);
670 return 0;
673 static int kvm_mmio_read(void *opaque, uint64_t addr, uint8_t *data, int len)
675 cpu_physical_memory_rw(addr, data, len, 0);
676 return 0;
679 static int kvm_mmio_write(void *opaque, uint64_t addr, uint8_t *data, int len)
681 cpu_physical_memory_rw(addr, data, len, 1);
682 return 0;
685 static int kvm_io_window(void *opaque)
687 return 1;
691 static int kvm_halt(void *opaque, int vcpu)
693 return kvm_arch_halt(opaque, vcpu);
696 static int kvm_shutdown(void *opaque, int vcpu)
698 /* stop the current vcpu from going back to guest mode */
699 vcpu_info[cpu_single_env->cpu_index].stopped = 1;
701 qemu_system_reset_request();
702 return 1;
705 static struct kvm_callbacks qemu_kvm_ops = {
706 .debug = kvm_debug,
707 .inb = kvm_inb,
708 .inw = kvm_inw,
709 .inl = kvm_inl,
710 .outb = kvm_outb,
711 .outw = kvm_outw,
712 .outl = kvm_outl,
713 .mmio_read = kvm_mmio_read,
714 .mmio_write = kvm_mmio_write,
715 .halt = kvm_halt,
716 .shutdown = kvm_shutdown,
717 .io_window = kvm_io_window,
718 .try_push_interrupts = try_push_interrupts,
719 .post_kvm_run = post_kvm_run,
720 .pre_kvm_run = pre_kvm_run,
721 #ifdef TARGET_I386
722 .tpr_access = handle_tpr_access,
723 #endif
724 #ifdef TARGET_PPC
725 .powerpc_dcr_read = handle_powerpc_dcr_read,
726 .powerpc_dcr_write = handle_powerpc_dcr_write,
727 #endif
730 int kvm_qemu_init()
732 /* Try to initialize kvm */
733 kvm_context = kvm_init(&qemu_kvm_ops, cpu_single_env);
734 if (!kvm_context) {
735 return -1;
737 pthread_mutex_lock(&qemu_mutex);
739 return 0;
742 int kvm_qemu_create_context(void)
744 int r;
745 if (!kvm_irqchip) {
746 kvm_disable_irqchip_creation(kvm_context);
748 if (!kvm_pit) {
749 kvm_disable_pit_creation(kvm_context);
751 if (kvm_create(kvm_context, phys_ram_size, (void**)&phys_ram_base) < 0) {
752 kvm_qemu_destroy();
753 return -1;
755 r = kvm_arch_qemu_create_context();
756 if(r <0)
757 kvm_qemu_destroy();
758 return 0;
761 void kvm_qemu_destroy(void)
763 kvm_finalize(kvm_context);
766 void kvm_cpu_register_physical_memory(target_phys_addr_t start_addr,
767 unsigned long size,
768 unsigned long phys_offset)
770 int r = 0;
771 unsigned long area_flags = phys_offset & ~TARGET_PAGE_MASK;
773 phys_offset &= ~IO_MEM_ROM;
775 if (area_flags == IO_MEM_UNASSIGNED) {
776 kvm_unregister_memory_area(kvm_context, start_addr, size);
777 return;
780 r = kvm_is_containing_region(kvm_context, start_addr, size);
781 if (r)
782 return;
784 if (area_flags >= TLB_MMIO)
785 return;
787 r = kvm_register_phys_mem(kvm_context, start_addr,
788 phys_ram_base + phys_offset,
789 size, 0);
790 if (r < 0) {
791 printf("kvm_cpu_register_physical_memory: failed\n");
792 exit(1);
794 return;
797 int kvm_setup_guest_memory(void *area, unsigned long size)
799 int ret = 0;
801 #ifdef MADV_DONTFORK
802 if (kvm_enabled() && !kvm_has_sync_mmu(kvm_context))
803 ret = madvise(area, size, MADV_DONTFORK);
804 #endif
806 if (ret)
807 perror ("madvise");
809 return ret;
812 int kvm_qemu_check_extension(int ext)
814 return kvm_check_extension(kvm_context, ext);
817 int kvm_qemu_init_env(CPUState *cenv)
819 return kvm_arch_qemu_init_env(cenv);
822 struct kvm_guest_debug_data {
823 struct kvm_debug_guest dbg;
824 int err;
827 void kvm_invoke_guest_debug(void *data)
829 struct kvm_guest_debug_data *dbg_data = data;
831 dbg_data->err = kvm_guest_debug(kvm_context, cpu_single_env->cpu_index,
832 &dbg_data->dbg);
835 int kvm_update_debugger(CPUState *env)
837 struct kvm_guest_debug_data data;
838 int i;
840 memset(data.dbg.breakpoints, 0, sizeof(data.dbg.breakpoints));
842 data.dbg.enabled = 0;
843 if (env->nb_breakpoints || env->singlestep_enabled) {
844 data.dbg.enabled = 1;
845 for (i = 0; i < 4 && i < env->nb_breakpoints; ++i) {
846 data.dbg.breakpoints[i].enabled = 1;
847 data.dbg.breakpoints[i].address = env->breakpoints[i];
849 data.dbg.singlestep = env->singlestep_enabled;
851 on_vcpu(env, kvm_invoke_guest_debug, &data);
852 return data.err;
857 * dirty pages logging
859 /* FIXME: use unsigned long pointer instead of unsigned char */
860 unsigned char *kvm_dirty_bitmap = NULL;
861 int kvm_physical_memory_set_dirty_tracking(int enable)
863 int r = 0;
865 if (!kvm_enabled())
866 return 0;
868 if (enable) {
869 if (!kvm_dirty_bitmap) {
870 unsigned bitmap_size = BITMAP_SIZE(phys_ram_size);
871 kvm_dirty_bitmap = qemu_malloc(bitmap_size);
872 if (kvm_dirty_bitmap == NULL) {
873 perror("Failed to allocate dirty pages bitmap");
874 r=-1;
876 else {
877 r = kvm_dirty_pages_log_enable_all(kvm_context);
881 else {
882 if (kvm_dirty_bitmap) {
883 r = kvm_dirty_pages_log_reset(kvm_context);
884 qemu_free(kvm_dirty_bitmap);
885 kvm_dirty_bitmap = NULL;
888 return r;
891 /* get kvm's dirty pages bitmap and update qemu's */
892 int kvm_get_dirty_pages_log_range(unsigned long start_addr,
893 unsigned char *bitmap,
894 unsigned int offset,
895 unsigned long mem_size)
897 unsigned int i, j, n=0;
898 unsigned char c;
899 unsigned page_number, addr, addr1;
900 unsigned int len = ((mem_size/TARGET_PAGE_SIZE) + 7) / 8;
903 * bitmap-traveling is faster than memory-traveling (for addr...)
904 * especially when most of the memory is not dirty.
906 for (i=0; i<len; i++) {
907 c = bitmap[i];
908 while (c>0) {
909 j = ffsl(c) - 1;
910 c &= ~(1u<<j);
911 page_number = i * 8 + j;
912 addr1 = page_number * TARGET_PAGE_SIZE;
913 addr = offset + addr1;
914 cpu_physical_memory_set_dirty(addr);
915 n++;
918 return 0;
920 int kvm_get_dirty_bitmap_cb(unsigned long start, unsigned long len,
921 void *bitmap, void *opaque)
923 return kvm_get_dirty_pages_log_range(start, bitmap, start, len);
927 * get kvm's dirty pages bitmap and update qemu's
928 * we only care about physical ram, which resides in slots 0 and 3
930 int kvm_update_dirty_pages_log(void)
932 int r = 0;
935 r = kvm_get_dirty_pages_range(kvm_context, 0, phys_ram_size,
936 kvm_dirty_bitmap, NULL,
937 kvm_get_dirty_bitmap_cb);
938 return r;
941 int kvm_get_phys_ram_page_bitmap(unsigned char *bitmap)
943 unsigned int bsize = BITMAP_SIZE(phys_ram_size);
944 unsigned int brsize = BITMAP_SIZE(ram_size);
945 unsigned int extra_pages = (phys_ram_size - ram_size) / TARGET_PAGE_SIZE;
946 unsigned int extra_bytes = (extra_pages +7)/8;
947 unsigned int hole_start = BITMAP_SIZE(0xa0000);
948 unsigned int hole_end = BITMAP_SIZE(0xc0000);
950 memset(bitmap, 0xFF, brsize + extra_bytes);
951 memset(bitmap + hole_start, 0, hole_end - hole_start);
952 memset(bitmap + brsize + extra_bytes, 0, bsize - brsize - extra_bytes);
954 return 0;
957 #ifdef KVM_CAP_IRQCHIP
959 int kvm_set_irq(int irq, int level)
961 return kvm_set_irq_level(kvm_context, irq, level);
964 #endif
966 int qemu_kvm_get_dirty_pages(unsigned long phys_addr, void *buf)
968 return kvm_get_dirty_pages(kvm_context, phys_addr, buf);
971 void *kvm_cpu_create_phys_mem(target_phys_addr_t start_addr,
972 unsigned long size, int log, int writable)
974 return kvm_create_phys_mem(kvm_context, start_addr, size, log, writable);
977 void kvm_cpu_destroy_phys_mem(target_phys_addr_t start_addr,
978 unsigned long size)
980 kvm_destroy_phys_mem(kvm_context, start_addr, size);
983 void kvm_mutex_unlock(void)
985 assert(!cpu_single_env);
986 pthread_mutex_unlock(&qemu_mutex);
989 void kvm_mutex_lock(void)
991 pthread_mutex_lock(&qemu_mutex);
992 cpu_single_env = NULL;
995 int qemu_kvm_register_coalesced_mmio(target_phys_addr_t addr, unsigned int size)
997 return kvm_register_coalesced_mmio(kvm_context, addr, size);
1000 int qemu_kvm_unregister_coalesced_mmio(target_phys_addr_t addr,
1001 unsigned int size)
1003 return kvm_unregister_coalesced_mmio(kvm_context, addr, size);