4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
30 /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
31 #define PAGE_SIZE TARGET_PAGE_SIZE
36 #define dprintf(fmt, ...) \
37 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
39 #define dprintf(fmt, ...) \
43 typedef struct KVMSlot
45 target_phys_addr_t start_addr
;
46 ram_addr_t memory_size
;
47 ram_addr_t phys_offset
;
52 typedef struct kvm_dirty_log KVMDirtyLog
;
62 int broken_set_mem_region
;
65 #ifdef KVM_CAP_SET_GUEST_DEBUG
66 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
;
68 int irqchip_in_kernel
;
72 static KVMState
*kvm_state
;
74 static KVMSlot
*kvm_alloc_slot(KVMState
*s
)
78 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
79 /* KVM private memory slots */
82 if (s
->slots
[i
].memory_size
== 0)
86 fprintf(stderr
, "%s: no free slot available\n", __func__
);
90 static KVMSlot
*kvm_lookup_matching_slot(KVMState
*s
,
91 target_phys_addr_t start_addr
,
92 target_phys_addr_t end_addr
)
96 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
97 KVMSlot
*mem
= &s
->slots
[i
];
99 if (start_addr
== mem
->start_addr
&&
100 end_addr
== mem
->start_addr
+ mem
->memory_size
) {
109 * Find overlapping slot with lowest start address
111 static KVMSlot
*kvm_lookup_overlapping_slot(KVMState
*s
,
112 target_phys_addr_t start_addr
,
113 target_phys_addr_t end_addr
)
115 KVMSlot
*found
= NULL
;
118 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
119 KVMSlot
*mem
= &s
->slots
[i
];
121 if (mem
->memory_size
== 0 ||
122 (found
&& found
->start_addr
< mem
->start_addr
)) {
126 if (end_addr
> mem
->start_addr
&&
127 start_addr
< mem
->start_addr
+ mem
->memory_size
) {
135 static int kvm_set_user_memory_region(KVMState
*s
, KVMSlot
*slot
)
137 struct kvm_userspace_memory_region mem
;
139 mem
.slot
= slot
->slot
;
140 mem
.guest_phys_addr
= slot
->start_addr
;
141 mem
.memory_size
= slot
->memory_size
;
142 mem
.userspace_addr
= (unsigned long)qemu_get_ram_ptr(slot
->phys_offset
);
143 mem
.flags
= slot
->flags
;
144 if (s
->migration_log
) {
145 mem
.flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
147 return kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
150 static void kvm_reset_vcpu(void *opaque
)
152 CPUState
*env
= opaque
;
154 kvm_arch_reset_vcpu(env
);
155 if (kvm_arch_put_registers(env
)) {
156 fprintf(stderr
, "Fatal: kvm vcpu reset failed\n");
162 int kvm_irqchip_in_kernel(void)
164 return kvm_state
->irqchip_in_kernel
;
168 int kvm_pit_in_kernel(void)
170 return kvm_state
->pit_in_kernel
;
174 int kvm_init_vcpu(CPUState
*env
)
176 KVMState
*s
= kvm_state
;
180 dprintf("kvm_init_vcpu\n");
182 ret
= kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, env
->cpu_index
);
184 dprintf("kvm_create_vcpu failed\n");
191 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
193 dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
197 env
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
199 if (env
->kvm_run
== MAP_FAILED
) {
201 dprintf("mmap'ing vcpu state failed\n");
205 ret
= kvm_arch_init_vcpu(env
);
207 qemu_register_reset(kvm_reset_vcpu
, env
);
208 kvm_arch_reset_vcpu(env
);
209 ret
= kvm_arch_put_registers(env
);
216 * dirty pages logging control
218 static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr
,
219 ram_addr_t size
, int flags
, int mask
)
221 KVMState
*s
= kvm_state
;
222 KVMSlot
*mem
= kvm_lookup_matching_slot(s
, phys_addr
, phys_addr
+ size
);
226 fprintf(stderr
, "BUG: %s: invalid parameters " TARGET_FMT_plx
"-"
227 TARGET_FMT_plx
"\n", __func__
, phys_addr
,
228 (target_phys_addr_t
)(phys_addr
+ size
- 1));
232 old_flags
= mem
->flags
;
234 flags
= (mem
->flags
& ~mask
) | flags
;
237 /* If nothing changed effectively, no need to issue ioctl */
238 if (s
->migration_log
) {
239 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
241 if (flags
== old_flags
) {
245 return kvm_set_user_memory_region(s
, mem
);
248 int kvm_log_start(target_phys_addr_t phys_addr
, ram_addr_t size
)
250 return kvm_dirty_pages_log_change(phys_addr
, size
,
251 KVM_MEM_LOG_DIRTY_PAGES
,
252 KVM_MEM_LOG_DIRTY_PAGES
);
255 int kvm_log_stop(target_phys_addr_t phys_addr
, ram_addr_t size
)
257 return kvm_dirty_pages_log_change(phys_addr
, size
,
259 KVM_MEM_LOG_DIRTY_PAGES
);
262 int kvm_set_migration_log(int enable
)
264 KVMState
*s
= kvm_state
;
268 s
->migration_log
= enable
;
270 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
273 if (!!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) == enable
) {
276 err
= kvm_set_user_memory_region(s
, mem
);
284 static int test_le_bit(unsigned long nr
, unsigned char *addr
)
286 return (addr
[nr
>> 3] >> (nr
& 7)) & 1;
290 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
291 * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
292 * This means all bits are set to dirty.
294 * @start_add: start of logged region.
295 * @end_addr: end of logged region.
297 int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
298 target_phys_addr_t end_addr
)
300 KVMState
*s
= kvm_state
;
301 unsigned long size
, allocated_size
= 0;
302 target_phys_addr_t phys_addr
;
308 d
.dirty_bitmap
= NULL
;
309 while (start_addr
< end_addr
) {
310 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, end_addr
);
315 size
= ((mem
->memory_size
>> TARGET_PAGE_BITS
) + 7) / 8;
316 if (!d
.dirty_bitmap
) {
317 d
.dirty_bitmap
= qemu_malloc(size
);
318 } else if (size
> allocated_size
) {
319 d
.dirty_bitmap
= qemu_realloc(d
.dirty_bitmap
, size
);
321 allocated_size
= size
;
322 memset(d
.dirty_bitmap
, 0, allocated_size
);
326 if (kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
) == -1) {
327 dprintf("ioctl failed %d\n", errno
);
332 for (phys_addr
= mem
->start_addr
, addr
= mem
->phys_offset
;
333 phys_addr
< mem
->start_addr
+ mem
->memory_size
;
334 phys_addr
+= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
335 unsigned char *bitmap
= (unsigned char *)d
.dirty_bitmap
;
336 unsigned nr
= (phys_addr
- mem
->start_addr
) >> TARGET_PAGE_BITS
;
338 if (test_le_bit(nr
, bitmap
)) {
339 cpu_physical_memory_set_dirty(addr
);
342 start_addr
= phys_addr
;
344 qemu_free(d
.dirty_bitmap
);
350 int kvm_coalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
353 #ifdef KVM_CAP_COALESCED_MMIO
354 KVMState
*s
= kvm_state
;
356 if (s
->coalesced_mmio
) {
357 struct kvm_coalesced_mmio_zone zone
;
362 ret
= kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
369 int kvm_uncoalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
372 #ifdef KVM_CAP_COALESCED_MMIO
373 KVMState
*s
= kvm_state
;
375 if (s
->coalesced_mmio
) {
376 struct kvm_coalesced_mmio_zone zone
;
381 ret
= kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
388 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
392 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
401 int kvm_init(int smp_cpus
)
403 static const char upgrade_note
[] =
404 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
405 "(see http://sourceforge.net/projects/kvm).\n";
411 fprintf(stderr
, "No SMP KVM support, use '-smp 1'\n");
415 s
= qemu_mallocz(sizeof(KVMState
));
417 #ifdef KVM_CAP_SET_GUEST_DEBUG
418 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
420 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++)
421 s
->slots
[i
].slot
= i
;
424 s
->fd
= qemu_open("/dev/kvm", O_RDWR
);
426 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
431 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
432 if (ret
< KVM_API_VERSION
) {
435 fprintf(stderr
, "kvm version too old\n");
439 if (ret
> KVM_API_VERSION
) {
441 fprintf(stderr
, "kvm version not supported\n");
445 s
->vmfd
= kvm_ioctl(s
, KVM_CREATE_VM
, 0);
449 /* initially, KVM allocated its own memory and we had to jump through
450 * hooks to make phys_ram_base point to this. Modern versions of KVM
451 * just use a user allocated buffer so we can use regular pages
452 * unmodified. Make sure we have a sufficiently modern version of KVM.
454 if (!kvm_check_extension(s
, KVM_CAP_USER_MEMORY
)) {
456 fprintf(stderr
, "kvm does not support KVM_CAP_USER_MEMORY\n%s",
461 /* There was a nasty bug in < kvm-80 that prevents memory slots from being
462 * destroyed properly. Since we rely on this capability, refuse to work
463 * with any kernel without this capability. */
464 if (!kvm_check_extension(s
, KVM_CAP_DESTROY_MEMORY_REGION_WORKS
)) {
468 "KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s",
473 #ifdef KVM_CAP_COALESCED_MMIO
474 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
476 s
->coalesced_mmio
= 0;
479 s
->broken_set_mem_region
= 1;
480 #ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
481 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
);
483 s
->broken_set_mem_region
= 0;
488 #ifdef KVM_CAP_VCPU_EVENTS
489 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
492 ret
= kvm_arch_init(s
, smp_cpus
);
513 static int kvm_handle_io(uint16_t port
, void *data
, int direction
, int size
,
519 for (i
= 0; i
< count
; i
++) {
520 if (direction
== KVM_EXIT_IO_IN
) {
523 stb_p(ptr
, cpu_inb(port
));
526 stw_p(ptr
, cpu_inw(port
));
529 stl_p(ptr
, cpu_inl(port
));
535 cpu_outb(port
, ldub_p(ptr
));
538 cpu_outw(port
, lduw_p(ptr
));
541 cpu_outl(port
, ldl_p(ptr
));
553 static void kvm_run_coalesced_mmio(CPUState
*env
, struct kvm_run
*run
)
555 #ifdef KVM_CAP_COALESCED_MMIO
556 KVMState
*s
= kvm_state
;
557 if (s
->coalesced_mmio
) {
558 struct kvm_coalesced_mmio_ring
*ring
;
560 ring
= (void *)run
+ (s
->coalesced_mmio
* TARGET_PAGE_SIZE
);
561 while (ring
->first
!= ring
->last
) {
562 struct kvm_coalesced_mmio
*ent
;
564 ent
= &ring
->coalesced_mmio
[ring
->first
];
566 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
567 /* FIXME smp_wmb() */
568 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
574 void kvm_cpu_synchronize_state(CPUState
*env
)
576 if (!env
->kvm_state
->regs_modified
) {
577 kvm_arch_get_registers(env
);
578 env
->kvm_state
->regs_modified
= 1;
582 int kvm_cpu_exec(CPUState
*env
)
584 struct kvm_run
*run
= env
->kvm_run
;
587 dprintf("kvm_cpu_exec()\n");
590 if (env
->exit_request
) {
591 dprintf("interrupt exit requested\n");
596 if (env
->kvm_state
->regs_modified
) {
597 kvm_arch_put_registers(env
);
598 env
->kvm_state
->regs_modified
= 0;
601 kvm_arch_pre_run(env
, run
);
602 qemu_mutex_unlock_iothread();
603 ret
= kvm_vcpu_ioctl(env
, KVM_RUN
, 0);
604 qemu_mutex_lock_iothread();
605 kvm_arch_post_run(env
, run
);
607 if (ret
== -EINTR
|| ret
== -EAGAIN
) {
608 dprintf("io window exit\n");
614 dprintf("kvm run failed %s\n", strerror(-ret
));
618 kvm_run_coalesced_mmio(env
, run
);
620 ret
= 0; /* exit loop */
621 switch (run
->exit_reason
) {
623 dprintf("handle_io\n");
624 ret
= kvm_handle_io(run
->io
.port
,
625 (uint8_t *)run
+ run
->io
.data_offset
,
631 dprintf("handle_mmio\n");
632 cpu_physical_memory_rw(run
->mmio
.phys_addr
,
638 case KVM_EXIT_IRQ_WINDOW_OPEN
:
639 dprintf("irq_window_open\n");
641 case KVM_EXIT_SHUTDOWN
:
642 dprintf("shutdown\n");
643 qemu_system_reset_request();
646 case KVM_EXIT_UNKNOWN
:
647 dprintf("kvm_exit_unknown\n");
649 case KVM_EXIT_FAIL_ENTRY
:
650 dprintf("kvm_exit_fail_entry\n");
652 case KVM_EXIT_EXCEPTION
:
653 dprintf("kvm_exit_exception\n");
656 dprintf("kvm_exit_debug\n");
657 #ifdef KVM_CAP_SET_GUEST_DEBUG
658 if (kvm_arch_debug(&run
->debug
.arch
)) {
659 gdb_set_stop_cpu(env
);
661 env
->exception_index
= EXCP_DEBUG
;
664 /* re-enter, this exception was guest-internal */
666 #endif /* KVM_CAP_SET_GUEST_DEBUG */
669 dprintf("kvm_arch_handle_exit\n");
670 ret
= kvm_arch_handle_exit(env
, run
);
675 if (env
->exit_request
) {
676 env
->exit_request
= 0;
677 env
->exception_index
= EXCP_INTERRUPT
;
683 void kvm_set_phys_mem(target_phys_addr_t start_addr
,
685 ram_addr_t phys_offset
)
687 KVMState
*s
= kvm_state
;
688 ram_addr_t flags
= phys_offset
& ~TARGET_PAGE_MASK
;
692 if (start_addr
& ~TARGET_PAGE_MASK
) {
693 if (flags
>= IO_MEM_UNASSIGNED
) {
694 if (!kvm_lookup_overlapping_slot(s
, start_addr
,
695 start_addr
+ size
)) {
698 fprintf(stderr
, "Unaligned split of a KVM memory slot\n");
700 fprintf(stderr
, "Only page-aligned memory slots supported\n");
705 /* KVM does not support read-only slots */
706 phys_offset
&= ~IO_MEM_ROM
;
709 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, start_addr
+ size
);
714 if (flags
< IO_MEM_UNASSIGNED
&& start_addr
>= mem
->start_addr
&&
715 (start_addr
+ size
<= mem
->start_addr
+ mem
->memory_size
) &&
716 (phys_offset
- start_addr
== mem
->phys_offset
- mem
->start_addr
)) {
717 /* The new slot fits into the existing one and comes with
718 * identical parameters - nothing to be done. */
724 /* unregister the overlapping slot */
725 mem
->memory_size
= 0;
726 err
= kvm_set_user_memory_region(s
, mem
);
728 fprintf(stderr
, "%s: error unregistering overlapping slot: %s\n",
729 __func__
, strerror(-err
));
733 /* Workaround for older KVM versions: we can't join slots, even not by
734 * unregistering the previous ones and then registering the larger
735 * slot. We have to maintain the existing fragmentation. Sigh.
737 * This workaround assumes that the new slot starts at the same
738 * address as the first existing one. If not or if some overlapping
739 * slot comes around later, we will fail (not seen in practice so far)
740 * - and actually require a recent KVM version. */
741 if (s
->broken_set_mem_region
&&
742 old
.start_addr
== start_addr
&& old
.memory_size
< size
&&
743 flags
< IO_MEM_UNASSIGNED
) {
744 mem
= kvm_alloc_slot(s
);
745 mem
->memory_size
= old
.memory_size
;
746 mem
->start_addr
= old
.start_addr
;
747 mem
->phys_offset
= old
.phys_offset
;
750 err
= kvm_set_user_memory_region(s
, mem
);
752 fprintf(stderr
, "%s: error updating slot: %s\n", __func__
,
757 start_addr
+= old
.memory_size
;
758 phys_offset
+= old
.memory_size
;
759 size
-= old
.memory_size
;
763 /* register prefix slot */
764 if (old
.start_addr
< start_addr
) {
765 mem
= kvm_alloc_slot(s
);
766 mem
->memory_size
= start_addr
- old
.start_addr
;
767 mem
->start_addr
= old
.start_addr
;
768 mem
->phys_offset
= old
.phys_offset
;
771 err
= kvm_set_user_memory_region(s
, mem
);
773 fprintf(stderr
, "%s: error registering prefix slot: %s\n",
774 __func__
, strerror(-err
));
779 /* register suffix slot */
780 if (old
.start_addr
+ old
.memory_size
> start_addr
+ size
) {
781 ram_addr_t size_delta
;
783 mem
= kvm_alloc_slot(s
);
784 mem
->start_addr
= start_addr
+ size
;
785 size_delta
= mem
->start_addr
- old
.start_addr
;
786 mem
->memory_size
= old
.memory_size
- size_delta
;
787 mem
->phys_offset
= old
.phys_offset
+ size_delta
;
790 err
= kvm_set_user_memory_region(s
, mem
);
792 fprintf(stderr
, "%s: error registering suffix slot: %s\n",
793 __func__
, strerror(-err
));
799 /* in case the KVM bug workaround already "consumed" the new slot */
803 /* KVM does not need to know about this memory */
804 if (flags
>= IO_MEM_UNASSIGNED
)
807 mem
= kvm_alloc_slot(s
);
808 mem
->memory_size
= size
;
809 mem
->start_addr
= start_addr
;
810 mem
->phys_offset
= phys_offset
;
813 err
= kvm_set_user_memory_region(s
, mem
);
815 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
822 int kvm_ioctl(KVMState
*s
, int type
, ...)
829 arg
= va_arg(ap
, void *);
832 ret
= ioctl(s
->fd
, type
, arg
);
839 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
846 arg
= va_arg(ap
, void *);
849 ret
= ioctl(s
->vmfd
, type
, arg
);
856 int kvm_vcpu_ioctl(CPUState
*env
, int type
, ...)
863 arg
= va_arg(ap
, void *);
866 ret
= ioctl(env
->kvm_fd
, type
, arg
);
873 int kvm_has_sync_mmu(void)
875 #ifdef KVM_CAP_SYNC_MMU
876 KVMState
*s
= kvm_state
;
878 return kvm_check_extension(s
, KVM_CAP_SYNC_MMU
);
884 int kvm_has_vcpu_events(void)
886 return kvm_state
->vcpu_events
;
890 void kvm_setup_guest_memory(void *start
, size_t size
)
892 if (!kvm_has_sync_mmu()) {
894 int ret
= madvise(start
, size
, MADV_DONTFORK
);
902 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
908 #endif /* KVM_UPSTREAM */
910 #ifdef KVM_CAP_SET_GUEST_DEBUG
913 static void on_vcpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
915 #ifdef CONFIG_IOTHREAD
916 if (env
== cpu_single_env
) {
925 #endif /* KVM_UPSTREAM */
927 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*env
,
930 struct kvm_sw_breakpoint
*bp
;
932 QTAILQ_FOREACH(bp
, &env
->kvm_state
->kvm_sw_breakpoints
, entry
) {
939 int kvm_sw_breakpoints_active(CPUState
*env
)
941 return !QTAILQ_EMPTY(&env
->kvm_state
->kvm_sw_breakpoints
);
946 struct kvm_set_guest_debug_data
{
947 struct kvm_guest_debug dbg
;
952 static void kvm_invoke_set_guest_debug(void *data
)
954 struct kvm_set_guest_debug_data
*dbg_data
= data
;
955 CPUState
*env
= dbg_data
->env
;
957 if (env
->kvm_state
->regs_modified
) {
958 kvm_arch_put_registers(env
);
959 env
->kvm_state
->regs_modified
= 0;
961 dbg_data
->err
= kvm_vcpu_ioctl(env
, KVM_SET_GUEST_DEBUG
, &dbg_data
->dbg
);
964 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
966 struct kvm_set_guest_debug_data data
;
968 data
.dbg
.control
= 0;
969 if (env
->singlestep_enabled
)
970 data
.dbg
.control
= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
972 kvm_arch_update_guest_debug(env
, &data
.dbg
);
973 data
.dbg
.control
|= reinject_trap
;
976 on_vcpu(env
, kvm_invoke_set_guest_debug
, &data
);
981 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
982 target_ulong len
, int type
)
984 struct kvm_sw_breakpoint
*bp
;
988 if (type
== GDB_BREAKPOINT_SW
) {
989 bp
= kvm_find_sw_breakpoint(current_env
, addr
);
995 bp
= qemu_malloc(sizeof(struct kvm_sw_breakpoint
));
1001 err
= kvm_arch_insert_sw_breakpoint(current_env
, bp
);
1007 QTAILQ_INSERT_HEAD(¤t_env
->kvm_state
->kvm_sw_breakpoints
,
1010 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
1015 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1016 err
= kvm_update_guest_debug(env
, 0);
1023 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1024 target_ulong len
, int type
)
1026 struct kvm_sw_breakpoint
*bp
;
1030 if (type
== GDB_BREAKPOINT_SW
) {
1031 bp
= kvm_find_sw_breakpoint(current_env
, addr
);
1035 if (bp
->use_count
> 1) {
1040 err
= kvm_arch_remove_sw_breakpoint(current_env
, bp
);
1044 QTAILQ_REMOVE(¤t_env
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
1047 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
1052 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1053 err
= kvm_update_guest_debug(env
, 0);
1060 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1062 struct kvm_sw_breakpoint
*bp
, *next
;
1063 KVMState
*s
= current_env
->kvm_state
;
1066 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
1067 if (kvm_arch_remove_sw_breakpoint(current_env
, bp
) != 0) {
1068 /* Try harder to find a CPU that currently sees the breakpoint. */
1069 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1070 if (kvm_arch_remove_sw_breakpoint(env
, bp
) == 0)
1075 kvm_arch_remove_all_hw_breakpoints();
1077 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
1078 kvm_update_guest_debug(env
, 0);
1081 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1083 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1088 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1089 target_ulong len
, int type
)
1094 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1095 target_ulong len
, int type
)
1100 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1103 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1105 #include "qemu-kvm.c"