4 * Copyright (C) 2006-2008 Qumranet Technologies
6 * Licensed under the terms of the GNU GPL version 2 or higher.
9 #include "config-host.h"
15 #include "qemu-common.h"
24 #include <sys/utsname.h>
25 #include <sys/syscall.h>
34 int kvm_pit_reinject
= 1;
36 kvm_context_t kvm_context
;
38 pthread_mutex_t qemu_mutex
= PTHREAD_MUTEX_INITIALIZER
;
39 pthread_cond_t qemu_vcpu_cond
= PTHREAD_COND_INITIALIZER
;
40 pthread_cond_t qemu_system_cond
= PTHREAD_COND_INITIALIZER
;
41 pthread_cond_t qemu_pause_cond
= PTHREAD_COND_INITIALIZER
;
42 pthread_cond_t qemu_work_cond
= PTHREAD_COND_INITIALIZER
;
43 __thread CPUState
*current_env
;
45 static int qemu_system_ready
;
47 #define SIG_IPI (SIGRTMIN+4)
50 static int io_thread_fd
= -1;
51 static int io_thread_sigfd
= -1;
53 static CPUState
*kvm_debug_cpu_requested
;
55 /* The list of ioperm_data */
56 static LIST_HEAD(, ioperm_data
) ioperm_head
;
58 static inline unsigned long kvm_get_thread_id(void)
60 return syscall(SYS_gettid
);
63 static void qemu_cond_wait(pthread_cond_t
*cond
)
65 CPUState
*env
= cpu_single_env
;
66 static const struct timespec ts
= {
71 pthread_cond_timedwait(cond
, &qemu_mutex
, &ts
);
75 static void sig_ipi_handler(int n
)
79 static void on_vcpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
81 struct qemu_work_item wi
;
83 if (env
== current_env
) {
90 if (!env
->kvm_cpu_state
.queued_work_first
)
91 env
->kvm_cpu_state
.queued_work_first
= &wi
;
93 env
->kvm_cpu_state
.queued_work_last
->next
= &wi
;
94 env
->kvm_cpu_state
.queued_work_last
= &wi
;
98 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
100 qemu_cond_wait(&qemu_work_cond
);
103 static void inject_interrupt(void *data
)
105 cpu_interrupt(current_env
, (long)data
);
108 void kvm_inject_interrupt(CPUState
*env
, int mask
)
110 on_vcpu(env
, inject_interrupt
, (void *)(long)mask
);
113 void kvm_update_interrupt_request(CPUState
*env
)
118 if (!current_env
|| !current_env
->kvm_cpu_state
.created
)
121 * Testing for created here is really redundant
123 if (current_env
&& current_env
->kvm_cpu_state
.created
&&
124 env
!= current_env
&& !env
->kvm_cpu_state
.signalled
)
128 env
->kvm_cpu_state
.signalled
= 1;
129 if (env
->kvm_cpu_state
.thread
)
130 pthread_kill(env
->kvm_cpu_state
.thread
, SIG_IPI
);
135 void kvm_update_after_sipi(CPUState
*env
)
137 env
->kvm_cpu_state
.sipi_needed
= 1;
138 kvm_update_interrupt_request(env
);
141 void kvm_apic_init(CPUState
*env
)
143 if (env
->cpu_index
!= 0)
144 env
->kvm_cpu_state
.init
= 1;
145 kvm_update_interrupt_request(env
);
150 static int try_push_interrupts(void *opaque
)
152 return kvm_arch_try_push_interrupts(opaque
);
155 static void post_kvm_run(void *opaque
, void *data
)
157 CPUState
*env
= (CPUState
*)data
;
159 pthread_mutex_lock(&qemu_mutex
);
160 kvm_arch_post_kvm_run(opaque
, env
);
163 static int pre_kvm_run(void *opaque
, void *data
)
165 CPUState
*env
= (CPUState
*)data
;
167 kvm_arch_pre_kvm_run(opaque
, env
);
169 if (env
->exit_request
)
171 pthread_mutex_unlock(&qemu_mutex
);
175 static void kvm_do_load_registers(void *_env
)
177 CPUState
*env
= _env
;
179 kvm_arch_load_regs(env
);
182 void kvm_load_registers(CPUState
*env
)
184 if (kvm_enabled() && qemu_system_ready
)
185 on_vcpu(env
, kvm_do_load_registers
, env
);
188 static void kvm_do_save_registers(void *_env
)
190 CPUState
*env
= _env
;
192 kvm_arch_save_regs(env
);
195 void kvm_save_registers(CPUState
*env
)
198 on_vcpu(env
, kvm_do_save_registers
, env
);
201 int kvm_cpu_exec(CPUState
*env
)
205 r
= kvm_run(kvm_context
, env
->cpu_index
, env
);
207 printf("kvm_run returned %d\n", r
);
214 static int has_work(CPUState
*env
)
216 if (!vm_running
|| (env
&& env
->kvm_cpu_state
.stopped
))
220 return kvm_arch_has_work(env
);
223 static void flush_queued_work(CPUState
*env
)
225 struct qemu_work_item
*wi
;
227 if (!env
->kvm_cpu_state
.queued_work_first
)
230 while ((wi
= env
->kvm_cpu_state
.queued_work_first
)) {
231 env
->kvm_cpu_state
.queued_work_first
= wi
->next
;
235 env
->kvm_cpu_state
.queued_work_last
= NULL
;
236 pthread_cond_broadcast(&qemu_work_cond
);
239 static void kvm_main_loop_wait(CPUState
*env
, int timeout
)
246 pthread_mutex_unlock(&qemu_mutex
);
248 ts
.tv_sec
= timeout
/ 1000;
249 ts
.tv_nsec
= (timeout
% 1000) * 1000000;
250 sigemptyset(&waitset
);
251 sigaddset(&waitset
, SIG_IPI
);
253 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
256 pthread_mutex_lock(&qemu_mutex
);
258 if (r
== -1 && !(e
== EAGAIN
|| e
== EINTR
)) {
259 printf("sigtimedwait: %s\n", strerror(e
));
263 cpu_single_env
= env
;
264 flush_queued_work(env
);
266 if (env
->kvm_cpu_state
.stop
) {
267 env
->kvm_cpu_state
.stop
= 0;
268 env
->kvm_cpu_state
.stopped
= 1;
269 pthread_cond_signal(&qemu_pause_cond
);
272 env
->kvm_cpu_state
.signalled
= 0;
275 static int all_threads_paused(void)
277 CPUState
*penv
= first_cpu
;
280 if (penv
->kvm_cpu_state
.stop
)
282 penv
= (CPUState
*)penv
->next_cpu
;
288 static void pause_all_threads(void)
290 CPUState
*penv
= first_cpu
;
293 if (penv
!= cpu_single_env
) {
294 penv
->kvm_cpu_state
.stop
= 1;
295 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
297 penv
->kvm_cpu_state
.stop
= 0;
298 penv
->kvm_cpu_state
.stopped
= 1;
301 penv
= (CPUState
*)penv
->next_cpu
;
304 while (!all_threads_paused())
305 qemu_cond_wait(&qemu_pause_cond
);
308 static void resume_all_threads(void)
310 CPUState
*penv
= first_cpu
;
312 assert(!cpu_single_env
);
315 penv
->kvm_cpu_state
.stop
= 0;
316 penv
->kvm_cpu_state
.stopped
= 0;
317 pthread_kill(penv
->kvm_cpu_state
.thread
, SIG_IPI
);
318 penv
= (CPUState
*)penv
->next_cpu
;
322 static void kvm_vm_state_change_handler(void *context
, int running
, int reason
)
325 resume_all_threads();
330 static void update_regs_for_sipi(CPUState
*env
)
332 kvm_arch_update_regs_for_sipi(env
);
333 env
->kvm_cpu_state
.sipi_needed
= 0;
336 static void update_regs_for_init(CPUState
*env
)
339 SegmentCache cs
= env
->segs
[R_CS
];
345 /* restore SIPI vector */
346 if(env
->kvm_cpu_state
.sipi_needed
)
347 env
->segs
[R_CS
] = cs
;
350 env
->kvm_cpu_state
.init
= 0;
351 kvm_arch_load_regs(env
);
354 static void setup_kernel_sigmask(CPUState
*env
)
359 sigaddset(&set
, SIGUSR2
);
360 sigaddset(&set
, SIGIO
);
361 sigaddset(&set
, SIGALRM
);
362 sigprocmask(SIG_BLOCK
, &set
, NULL
);
364 sigprocmask(SIG_BLOCK
, NULL
, &set
);
365 sigdelset(&set
, SIG_IPI
);
367 kvm_set_signal_mask(kvm_context
, env
->cpu_index
, &set
);
370 static void qemu_kvm_system_reset(void)
372 CPUState
*penv
= first_cpu
;
379 kvm_arch_cpu_reset(penv
);
380 penv
= (CPUState
*)penv
->next_cpu
;
383 resume_all_threads();
386 static int kvm_main_loop_cpu(CPUState
*env
)
388 setup_kernel_sigmask(env
);
390 pthread_mutex_lock(&qemu_mutex
);
391 if (kvm_irqchip_in_kernel(kvm_context
))
394 kvm_qemu_init_env(env
);
396 kvm_tpr_vcpu_start(env
);
399 cpu_single_env
= env
;
400 kvm_load_registers(env
);
403 while (!has_work(env
))
404 kvm_main_loop_wait(env
, 1000);
405 if (env
->interrupt_request
& (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_NMI
))
407 if (!kvm_irqchip_in_kernel(kvm_context
)) {
408 if (env
->kvm_cpu_state
.init
)
409 update_regs_for_init(env
);
410 if (env
->kvm_cpu_state
.sipi_needed
)
411 update_regs_for_sipi(env
);
413 if (!env
->halted
&& !env
->kvm_cpu_state
.init
)
415 env
->exit_request
= 0;
416 env
->exception_index
= EXCP_INTERRUPT
;
417 kvm_main_loop_wait(env
, 0);
419 pthread_mutex_unlock(&qemu_mutex
);
423 static void *ap_main_loop(void *_env
)
425 CPUState
*env
= _env
;
427 struct ioperm_data
*data
= NULL
;
430 env
->thread_id
= kvm_get_thread_id();
431 sigfillset(&signals
);
432 sigprocmask(SIG_BLOCK
, &signals
, NULL
);
433 kvm_create_vcpu(kvm_context
, env
->cpu_index
);
434 kvm_qemu_init_env(env
);
436 #ifdef USE_KVM_DEVICE_ASSIGNMENT
437 /* do ioperm for io ports of assigned devices */
438 LIST_FOREACH(data
, &ioperm_head
, entries
)
439 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
442 /* signal VCPU creation */
443 pthread_mutex_lock(&qemu_mutex
);
444 current_env
->kvm_cpu_state
.created
= 1;
445 pthread_cond_signal(&qemu_vcpu_cond
);
447 /* and wait for machine initialization */
448 while (!qemu_system_ready
)
449 qemu_cond_wait(&qemu_system_cond
);
450 pthread_mutex_unlock(&qemu_mutex
);
452 kvm_main_loop_cpu(env
);
456 void kvm_init_vcpu(CPUState
*env
)
458 pthread_create(&env
->kvm_cpu_state
.thread
, NULL
, ap_main_loop
, env
);
460 while (env
->kvm_cpu_state
.created
== 0)
461 qemu_cond_wait(&qemu_vcpu_cond
);
464 int kvm_init_ap(void)
469 qemu_add_vm_change_state_handler(kvm_vm_state_change_handler
, NULL
);
471 signal(SIG_IPI
, sig_ipi_handler
);
475 void qemu_kvm_notify_work(void)
481 if (io_thread_fd
== -1)
484 memcpy(buffer
, &value
, sizeof(value
));
489 len
= write(io_thread_fd
, buffer
+ offset
, 8 - offset
);
490 if (len
== -1 && errno
== EINTR
)
500 fprintf(stderr
, "failed to notify io thread\n");
503 /* If we have signalfd, we mask out the signals we want to handle and then
504 * use signalfd to listen for them. We rely on whatever the current signal
505 * handler is to dispatch the signals when we receive them.
508 static void sigfd_handler(void *opaque
)
510 int fd
= (unsigned long)opaque
;
511 struct qemu_signalfd_siginfo info
;
512 struct sigaction action
;
517 len
= read(fd
, &info
, sizeof(info
));
518 } while (len
== -1 && errno
== EINTR
);
520 if (len
== -1 && errno
== EAGAIN
)
523 if (len
!= sizeof(info
)) {
524 printf("read from sigfd returned %ld: %m\n", len
);
528 sigaction(info
.ssi_signo
, NULL
, &action
);
529 if (action
.sa_handler
)
530 action
.sa_handler(info
.ssi_signo
);
535 /* Used to break IO thread out of select */
536 static void io_thread_wakeup(void *opaque
)
538 int fd
= (unsigned long)opaque
;
545 len
= read(fd
, buffer
+ offset
, 8 - offset
);
546 if (len
== -1 && errno
== EINTR
)
556 int kvm_main_loop(void)
562 io_thread
= pthread_self();
563 qemu_system_ready
= 1;
565 if (qemu_eventfd(fds
) == -1) {
566 fprintf(stderr
, "failed to create eventfd\n");
570 qemu_set_fd_handler2(fds
[0], NULL
, io_thread_wakeup
, NULL
,
571 (void *)(unsigned long)fds
[0]);
573 io_thread_fd
= fds
[1];
576 sigaddset(&mask
, SIGIO
);
577 sigaddset(&mask
, SIGALRM
);
578 sigprocmask(SIG_BLOCK
, &mask
, NULL
);
580 sigfd
= qemu_signalfd(&mask
);
582 fprintf(stderr
, "failed to create signalfd\n");
586 fcntl(sigfd
, F_SETFL
, O_NONBLOCK
);
588 qemu_set_fd_handler2(sigfd
, NULL
, sigfd_handler
, NULL
,
589 (void *)(unsigned long)sigfd
);
591 pthread_cond_broadcast(&qemu_system_cond
);
593 io_thread_sigfd
= sigfd
;
594 cpu_single_env
= NULL
;
597 main_loop_wait(1000);
598 if (qemu_shutdown_requested())
600 else if (qemu_powerdown_requested())
601 qemu_system_powerdown();
602 else if (qemu_reset_requested())
603 qemu_kvm_system_reset();
604 #ifdef CONFIG_GDBSTUB
605 else if (kvm_debug_cpu_requested
) {
606 gdb_set_stop_cpu(kvm_debug_cpu_requested
);
608 kvm_debug_cpu_requested
= NULL
;
614 pthread_mutex_unlock(&qemu_mutex
);
619 #ifdef KVM_CAP_SET_GUEST_DEBUG
620 static int kvm_debug(void *opaque
, void *data
,
621 struct kvm_debug_exit_arch
*arch_info
)
623 int handle
= kvm_arch_debug(arch_info
);
624 CPUState
*env
= data
;
627 kvm_debug_cpu_requested
= env
;
628 env
->kvm_cpu_state
.stopped
= 1;
634 static int kvm_inb(void *opaque
, uint16_t addr
, uint8_t *data
)
636 *data
= cpu_inb(0, addr
);
640 static int kvm_inw(void *opaque
, uint16_t addr
, uint16_t *data
)
642 *data
= cpu_inw(0, addr
);
646 static int kvm_inl(void *opaque
, uint16_t addr
, uint32_t *data
)
648 *data
= cpu_inl(0, addr
);
652 #define PM_IO_BASE 0xb000
654 static int kvm_outb(void *opaque
, uint16_t addr
, uint8_t data
)
659 cpu_outb(0, 0xb3, 0);
666 x
= cpu_inw(0, PM_IO_BASE
+ 4);
668 cpu_outw(0, PM_IO_BASE
+ 4, x
);
675 x
= cpu_inw(0, PM_IO_BASE
+ 4);
677 cpu_outw(0, PM_IO_BASE
+ 4, x
);
685 cpu_outb(0, addr
, data
);
689 static int kvm_outw(void *opaque
, uint16_t addr
, uint16_t data
)
691 cpu_outw(0, addr
, data
);
695 static int kvm_outl(void *opaque
, uint16_t addr
, uint32_t data
)
697 cpu_outl(0, addr
, data
);
701 static int kvm_mmio_read(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
703 cpu_physical_memory_rw(addr
, data
, len
, 0);
707 static int kvm_mmio_write(void *opaque
, uint64_t addr
, uint8_t *data
, int len
)
709 cpu_physical_memory_rw(addr
, data
, len
, 1);
713 static int kvm_io_window(void *opaque
)
719 static int kvm_halt(void *opaque
, int vcpu
)
721 return kvm_arch_halt(opaque
, vcpu
);
724 static int kvm_shutdown(void *opaque
, void *data
)
726 CPUState
*env
= (CPUState
*)data
;
728 /* stop the current vcpu from going back to guest mode */
729 env
->kvm_cpu_state
.stopped
= 1;
731 qemu_system_reset_request();
735 static struct kvm_callbacks qemu_kvm_ops
= {
736 #ifdef KVM_CAP_SET_GUEST_DEBUG
745 .mmio_read
= kvm_mmio_read
,
746 .mmio_write
= kvm_mmio_write
,
748 .shutdown
= kvm_shutdown
,
749 .io_window
= kvm_io_window
,
750 .try_push_interrupts
= try_push_interrupts
,
751 #ifdef KVM_CAP_USER_NMI
752 .push_nmi
= kvm_arch_push_nmi
,
754 .post_kvm_run
= post_kvm_run
,
755 .pre_kvm_run
= pre_kvm_run
,
757 .tpr_access
= handle_tpr_access
,
760 .powerpc_dcr_read
= handle_powerpc_dcr_read
,
761 .powerpc_dcr_write
= handle_powerpc_dcr_write
,
767 /* Try to initialize kvm */
768 kvm_context
= kvm_init(&qemu_kvm_ops
, cpu_single_env
);
772 pthread_mutex_lock(&qemu_mutex
);
778 static int destroy_region_works
= 0;
781 int kvm_qemu_create_context(void)
787 kvm_disable_irqchip_creation(kvm_context
);
790 kvm_disable_pit_creation(kvm_context
);
792 if (kvm_create(kvm_context
, phys_ram_size
, (void**)&phys_ram_base
) < 0) {
796 r
= kvm_arch_qemu_create_context();
799 if (kvm_pit
&& !kvm_pit_reinject
) {
800 if (kvm_reinject_control(kvm_context
, 0)) {
801 fprintf(stderr
, "failure to disable in-kernel PIT reinjection\n");
806 destroy_region_works
= kvm_destroy_memory_region_works(kvm_context
);
809 if (kvm_irqchip
&& kvm_has_gsi_routing(kvm_context
)) {
810 kvm_clear_gsi_routes(kvm_context
);
811 for (i
= 0; i
< 8; ++i
) {
814 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_PIC_MASTER
, i
);
818 for (i
= 8; i
< 16; ++i
) {
819 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_PIC_SLAVE
, i
- 8);
823 for (i
= 0; i
< 24; ++i
) {
824 r
= kvm_add_irq_route(kvm_context
, i
, KVM_IRQCHIP_IOAPIC
, i
);
828 kvm_commit_irq_routes(kvm_context
);
833 void kvm_qemu_destroy(void)
835 kvm_finalize(kvm_context
);
839 static int must_use_aliases_source(target_phys_addr_t addr
)
841 if (destroy_region_works
)
843 if (addr
== 0xa0000 || addr
== 0xa8000)
848 static int must_use_aliases_target(target_phys_addr_t addr
)
850 if (destroy_region_works
)
852 if (addr
>= 0xe0000000 && addr
< 0x100000000ull
)
857 static struct mapping
{
858 target_phys_addr_t phys
;
862 static int nr_mappings
;
864 static struct mapping
*find_ram_mapping(ram_addr_t ram_addr
)
868 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
869 if (p
->ram
<= ram_addr
&& ram_addr
< p
->ram
+ p
->len
) {
876 static struct mapping
*find_mapping(target_phys_addr_t start_addr
)
880 for (p
= mappings
; p
< mappings
+ nr_mappings
; ++p
) {
881 if (p
->phys
<= start_addr
&& start_addr
< p
->phys
+ p
->len
) {
888 static void drop_mapping(target_phys_addr_t start_addr
)
890 struct mapping
*p
= find_mapping(start_addr
);
893 *p
= mappings
[--nr_mappings
];
897 void kvm_cpu_register_physical_memory(target_phys_addr_t start_addr
,
899 unsigned long phys_offset
)
902 unsigned long area_flags
= phys_offset
& ~TARGET_PAGE_MASK
;
907 phys_offset
&= ~IO_MEM_ROM
;
909 if (area_flags
== IO_MEM_UNASSIGNED
) {
911 if (must_use_aliases_source(start_addr
)) {
912 kvm_destroy_memory_alias(kvm_context
, start_addr
);
915 if (must_use_aliases_target(start_addr
))
918 kvm_unregister_memory_area(kvm_context
, start_addr
, size
);
922 r
= kvm_is_containing_region(kvm_context
, start_addr
, size
);
926 if (area_flags
>= TLB_MMIO
)
930 if (must_use_aliases_source(start_addr
)) {
931 p
= find_ram_mapping(phys_offset
);
933 kvm_create_memory_alias(kvm_context
, start_addr
, size
,
934 p
->phys
+ (phys_offset
- p
->ram
));
940 r
= kvm_register_phys_mem(kvm_context
, start_addr
,
941 phys_ram_base
+ phys_offset
,
944 printf("kvm_cpu_register_physical_memory: failed\n");
949 drop_mapping(start_addr
);
950 p
= &mappings
[nr_mappings
++];
951 p
->phys
= start_addr
;
952 p
->ram
= phys_offset
;
959 void kvm_cpu_unregister_physical_memory(target_phys_addr_t start_addr
,
960 target_phys_addr_t size
,
961 unsigned long phys_offset
)
963 kvm_unregister_memory_area(kvm_context
, start_addr
, size
);
966 int kvm_setup_guest_memory(void *area
, unsigned long size
)
971 if (kvm_enabled() && !kvm_has_sync_mmu())
972 ret
= madvise(area
, size
, MADV_DONTFORK
);
981 int kvm_qemu_check_extension(int ext
)
983 return kvm_check_extension(kvm_context
, ext
);
986 int kvm_qemu_init_env(CPUState
*cenv
)
988 return kvm_arch_qemu_init_env(cenv
);
991 #ifdef KVM_CAP_SET_GUEST_DEBUG
992 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
=
993 TAILQ_HEAD_INITIALIZER(kvm_sw_breakpoints
);
995 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(target_ulong pc
)
997 struct kvm_sw_breakpoint
*bp
;
999 TAILQ_FOREACH(bp
, &kvm_sw_breakpoints
, entry
) {
1006 struct kvm_set_guest_debug_data
{
1007 struct kvm_guest_debug dbg
;
1011 static void kvm_invoke_set_guest_debug(void *data
)
1013 struct kvm_set_guest_debug_data
*dbg_data
= data
;
1015 dbg_data
->err
= kvm_set_guest_debug(kvm_context
, cpu_single_env
->cpu_index
,
1019 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1021 struct kvm_set_guest_debug_data data
;
1023 data
.dbg
.control
= 0;
1024 if (env
->singlestep_enabled
)
1025 data
.dbg
.control
= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
1027 kvm_arch_update_guest_debug(env
, &data
.dbg
);
1028 data
.dbg
.control
|= reinject_trap
;
1030 on_vcpu(env
, kvm_invoke_set_guest_debug
, &data
);
1034 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1035 target_ulong len
, int type
)
1037 struct kvm_sw_breakpoint
*bp
;
1041 if (type
== GDB_BREAKPOINT_SW
) {
1042 bp
= kvm_find_sw_breakpoint(addr
);
1048 bp
= qemu_malloc(sizeof(struct kvm_sw_breakpoint
));
1054 err
= kvm_arch_insert_sw_breakpoint(current_env
, bp
);
1060 TAILQ_INSERT_HEAD(&kvm_sw_breakpoints
, bp
, entry
);
1062 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
1067 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1068 err
= kvm_update_guest_debug(env
, 0);
1075 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1076 target_ulong len
, int type
)
1078 struct kvm_sw_breakpoint
*bp
;
1082 if (type
== GDB_BREAKPOINT_SW
) {
1083 bp
= kvm_find_sw_breakpoint(addr
);
1087 if (bp
->use_count
> 1) {
1092 err
= kvm_arch_remove_sw_breakpoint(current_env
, bp
);
1096 TAILQ_REMOVE(&kvm_sw_breakpoints
, bp
, entry
);
1099 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
1104 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1105 err
= kvm_update_guest_debug(env
, 0);
1112 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1114 struct kvm_sw_breakpoint
*bp
, *next
;
1117 TAILQ_FOREACH_SAFE(bp
, &kvm_sw_breakpoints
, entry
, next
) {
1118 if (kvm_arch_remove_sw_breakpoint(current_env
, bp
) != 0) {
1119 /* Try harder to find a CPU that currently sees the breakpoint. */
1120 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1121 if (kvm_arch_remove_sw_breakpoint(env
, bp
) == 0)
1126 kvm_arch_remove_all_hw_breakpoints();
1128 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
1129 kvm_update_guest_debug(env
, 0);
1132 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1134 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1139 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1140 target_ulong len
, int type
)
1145 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1146 target_ulong len
, int type
)
1151 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1154 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1157 * dirty pages logging
1159 /* FIXME: use unsigned long pointer instead of unsigned char */
1160 unsigned char *kvm_dirty_bitmap
= NULL
;
1161 int kvm_physical_memory_set_dirty_tracking(int enable
)
1169 if (!kvm_dirty_bitmap
) {
1170 unsigned bitmap_size
= BITMAP_SIZE(phys_ram_size
);
1171 kvm_dirty_bitmap
= qemu_malloc(bitmap_size
);
1172 if (kvm_dirty_bitmap
== NULL
) {
1173 perror("Failed to allocate dirty pages bitmap");
1177 r
= kvm_dirty_pages_log_enable_all(kvm_context
);
1182 if (kvm_dirty_bitmap
) {
1183 r
= kvm_dirty_pages_log_reset(kvm_context
);
1184 qemu_free(kvm_dirty_bitmap
);
1185 kvm_dirty_bitmap
= NULL
;
1191 /* get kvm's dirty pages bitmap and update qemu's */
1192 static int kvm_get_dirty_pages_log_range(unsigned long start_addr
,
1193 unsigned char *bitmap
,
1194 unsigned int offset
,
1195 unsigned long mem_size
)
1197 unsigned int i
, j
, n
=0;
1199 unsigned long page_number
, addr
, addr1
;
1200 ram_addr_t ram_addr
;
1201 unsigned int len
= ((mem_size
/TARGET_PAGE_SIZE
) + 7) / 8;
1204 * bitmap-traveling is faster than memory-traveling (for addr...)
1205 * especially when most of the memory is not dirty.
1207 for (i
=0; i
<len
; i
++) {
1212 page_number
= i
* 8 + j
;
1213 addr1
= page_number
* TARGET_PAGE_SIZE
;
1214 addr
= offset
+ addr1
;
1215 ram_addr
= cpu_get_physical_page_desc(addr
);
1216 cpu_physical_memory_set_dirty(ram_addr
);
1222 static int kvm_get_dirty_bitmap_cb(unsigned long start
, unsigned long len
,
1223 void *bitmap
, void *opaque
)
1225 return kvm_get_dirty_pages_log_range(start
, bitmap
, start
, len
);
1229 * get kvm's dirty pages bitmap and update qemu's
1230 * we only care about physical ram, which resides in slots 0 and 3
1232 int kvm_update_dirty_pages_log(void)
1237 r
= kvm_get_dirty_pages_range(kvm_context
, 0, phys_ram_size
,
1238 kvm_dirty_bitmap
, NULL
,
1239 kvm_get_dirty_bitmap_cb
);
1243 void kvm_qemu_log_memory(target_phys_addr_t start
, target_phys_addr_t size
,
1247 kvm_dirty_pages_log_enable_slot(kvm_context
, start
, size
);
1250 if (must_use_aliases_target(start
))
1253 kvm_dirty_pages_log_disable_slot(kvm_context
, start
, size
);
1257 int kvm_get_phys_ram_page_bitmap(unsigned char *bitmap
)
1259 unsigned int bsize
= BITMAP_SIZE(phys_ram_size
);
1260 unsigned int brsize
= BITMAP_SIZE(ram_size
);
1261 unsigned int extra_pages
= (phys_ram_size
- ram_size
) / TARGET_PAGE_SIZE
;
1262 unsigned int extra_bytes
= (extra_pages
+7)/8;
1263 unsigned int hole_start
= BITMAP_SIZE(0xa0000);
1264 unsigned int hole_end
= BITMAP_SIZE(0xc0000);
1266 memset(bitmap
, 0xFF, brsize
+ extra_bytes
);
1267 memset(bitmap
+ hole_start
, 0, hole_end
- hole_start
);
1268 memset(bitmap
+ brsize
+ extra_bytes
, 0, bsize
- brsize
- extra_bytes
);
1273 #ifdef KVM_CAP_IRQCHIP
1275 int kvm_set_irq(int irq
, int level
, int *status
)
1277 return kvm_set_irq_level(kvm_context
, irq
, level
, status
);
1282 int qemu_kvm_get_dirty_pages(unsigned long phys_addr
, void *buf
)
1284 return kvm_get_dirty_pages(kvm_context
, phys_addr
, buf
);
1287 void *kvm_cpu_create_phys_mem(target_phys_addr_t start_addr
,
1288 unsigned long size
, int log
, int writable
)
1290 return kvm_create_phys_mem(kvm_context
, start_addr
, size
, log
, writable
);
1293 void kvm_cpu_destroy_phys_mem(target_phys_addr_t start_addr
,
1296 kvm_destroy_phys_mem(kvm_context
, start_addr
, size
);
1299 void kvm_mutex_unlock(void)
1301 assert(!cpu_single_env
);
1302 pthread_mutex_unlock(&qemu_mutex
);
1305 void kvm_mutex_lock(void)
1307 pthread_mutex_lock(&qemu_mutex
);
1308 cpu_single_env
= NULL
;
1311 int qemu_kvm_register_coalesced_mmio(target_phys_addr_t addr
, unsigned int size
)
1313 return kvm_register_coalesced_mmio(kvm_context
, addr
, size
);
1316 int qemu_kvm_unregister_coalesced_mmio(target_phys_addr_t addr
,
1319 return kvm_unregister_coalesced_mmio(kvm_context
, addr
, size
);
1322 int kvm_coalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
1324 return kvm_register_coalesced_mmio(kvm_context
, start
, size
);
1327 int kvm_uncoalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
1329 return kvm_unregister_coalesced_mmio(kvm_context
, start
, size
);
1332 #ifdef USE_KVM_DEVICE_ASSIGNMENT
1333 void kvm_add_ioperm_data(struct ioperm_data
*data
)
1335 LIST_INSERT_HEAD(&ioperm_head
, data
, entries
);
1338 void kvm_remove_ioperm_data(unsigned long start_port
, unsigned long num
)
1340 struct ioperm_data
*data
;
1342 data
= LIST_FIRST(&ioperm_head
);
1344 struct ioperm_data
*next
= LIST_NEXT(data
, entries
);
1346 if (data
->start_port
== start_port
&& data
->num
== num
) {
1347 LIST_REMOVE(data
, entries
);
1355 void kvm_ioperm(CPUState
*env
, void *data
)
1357 if (kvm_enabled() && qemu_system_ready
)
1358 on_vcpu(env
, kvm_arch_do_ioperm
, data
);
1363 void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1369 if (must_use_aliases_source(start_addr
))
1373 buf
= qemu_malloc((end_addr
- start_addr
) / 8 + 2);
1374 kvm_get_dirty_pages_range(kvm_context
, start_addr
, end_addr
- start_addr
,
1375 buf
, NULL
, kvm_get_dirty_bitmap_cb
);
1380 int kvm_log_start(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
1383 if (must_use_aliases_source(phys_addr
))
1386 kvm_qemu_log_memory(phys_addr
, len
, 1);
1390 int kvm_log_stop(target_phys_addr_t phys_addr
, target_phys_addr_t len
)
1393 if (must_use_aliases_source(phys_addr
))
1396 kvm_qemu_log_memory(phys_addr
, len
, 0);
1400 /* hack: both libkvm and upstream qemu define kvm_has_sync_mmu(), differently */
1401 #undef kvm_has_sync_mmu
1402 int qemu_kvm_has_sync_mmu(void)
1404 return kvm_has_sync_mmu(kvm_context
);
1407 void qemu_kvm_cpu_stop(CPUState
*env
)
1410 env
->kvm_cpu_state
.stopped
= 1;