4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
30 /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
31 #define PAGE_SIZE TARGET_PAGE_SIZE
36 #define dprintf(fmt, ...) \
37 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
39 #define dprintf(fmt, ...) \
43 typedef struct KVMSlot
45 target_phys_addr_t start_addr
;
46 ram_addr_t memory_size
;
47 ram_addr_t phys_offset
;
52 typedef struct kvm_dirty_log KVMDirtyLog
;
62 int broken_set_mem_region
;
64 #ifdef KVM_CAP_SET_GUEST_DEBUG
65 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
;
67 int irqchip_in_kernel
;
71 static KVMState
*kvm_state
;
73 static KVMSlot
*kvm_alloc_slot(KVMState
*s
)
77 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
78 /* KVM private memory slots */
81 if (s
->slots
[i
].memory_size
== 0)
85 fprintf(stderr
, "%s: no free slot available\n", __func__
);
89 static KVMSlot
*kvm_lookup_matching_slot(KVMState
*s
,
90 target_phys_addr_t start_addr
,
91 target_phys_addr_t end_addr
)
95 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
96 KVMSlot
*mem
= &s
->slots
[i
];
98 if (start_addr
== mem
->start_addr
&&
99 end_addr
== mem
->start_addr
+ mem
->memory_size
) {
108 * Find overlapping slot with lowest start address
110 static KVMSlot
*kvm_lookup_overlapping_slot(KVMState
*s
,
111 target_phys_addr_t start_addr
,
112 target_phys_addr_t end_addr
)
114 KVMSlot
*found
= NULL
;
117 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
118 KVMSlot
*mem
= &s
->slots
[i
];
120 if (mem
->memory_size
== 0 ||
121 (found
&& found
->start_addr
< mem
->start_addr
)) {
125 if (end_addr
> mem
->start_addr
&&
126 start_addr
< mem
->start_addr
+ mem
->memory_size
) {
134 static int kvm_set_user_memory_region(KVMState
*s
, KVMSlot
*slot
)
136 struct kvm_userspace_memory_region mem
;
138 mem
.slot
= slot
->slot
;
139 mem
.guest_phys_addr
= slot
->start_addr
;
140 mem
.memory_size
= slot
->memory_size
;
141 mem
.userspace_addr
= (unsigned long)qemu_get_ram_ptr(slot
->phys_offset
);
142 mem
.flags
= slot
->flags
;
143 if (s
->migration_log
) {
144 mem
.flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
146 return kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
149 static void kvm_reset_vcpu(void *opaque
)
151 CPUState
*env
= opaque
;
153 if (kvm_arch_put_registers(env
)) {
154 fprintf(stderr
, "Fatal: kvm vcpu reset failed\n");
160 int kvm_irqchip_in_kernel(void)
162 return kvm_state
->irqchip_in_kernel
;
166 int kvm_pit_in_kernel(void)
168 return kvm_state
->pit_in_kernel
;
172 int kvm_init_vcpu(CPUState
*env
)
174 KVMState
*s
= kvm_state
;
178 dprintf("kvm_init_vcpu\n");
180 ret
= kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, env
->cpu_index
);
182 dprintf("kvm_create_vcpu failed\n");
189 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
191 dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
195 env
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
197 if (env
->kvm_run
== MAP_FAILED
) {
199 dprintf("mmap'ing vcpu state failed\n");
203 ret
= kvm_arch_init_vcpu(env
);
205 qemu_register_reset(kvm_reset_vcpu
, env
);
206 ret
= kvm_arch_put_registers(env
);
212 int kvm_put_mp_state(CPUState
*env
)
214 struct kvm_mp_state mp_state
= { .mp_state
= env
->mp_state
};
216 return kvm_vcpu_ioctl(env
, KVM_SET_MP_STATE
, &mp_state
);
219 int kvm_get_mp_state(CPUState
*env
)
221 struct kvm_mp_state mp_state
;
224 ret
= kvm_vcpu_ioctl(env
, KVM_GET_MP_STATE
, &mp_state
);
228 env
->mp_state
= mp_state
.mp_state
;
233 * dirty pages logging control
235 static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr
,
236 ram_addr_t size
, int flags
, int mask
)
238 KVMState
*s
= kvm_state
;
239 KVMSlot
*mem
= kvm_lookup_matching_slot(s
, phys_addr
, phys_addr
+ size
);
243 fprintf(stderr
, "BUG: %s: invalid parameters " TARGET_FMT_plx
"-"
244 TARGET_FMT_plx
"\n", __func__
, phys_addr
,
245 (target_phys_addr_t
)(phys_addr
+ size
- 1));
249 old_flags
= mem
->flags
;
251 flags
= (mem
->flags
& ~mask
) | flags
;
254 /* If nothing changed effectively, no need to issue ioctl */
255 if (s
->migration_log
) {
256 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
258 if (flags
== old_flags
) {
262 return kvm_set_user_memory_region(s
, mem
);
265 int kvm_log_start(target_phys_addr_t phys_addr
, ram_addr_t size
)
267 return kvm_dirty_pages_log_change(phys_addr
, size
,
268 KVM_MEM_LOG_DIRTY_PAGES
,
269 KVM_MEM_LOG_DIRTY_PAGES
);
272 int kvm_log_stop(target_phys_addr_t phys_addr
, ram_addr_t size
)
274 return kvm_dirty_pages_log_change(phys_addr
, size
,
276 KVM_MEM_LOG_DIRTY_PAGES
);
279 int kvm_set_migration_log(int enable
)
281 KVMState
*s
= kvm_state
;
285 s
->migration_log
= enable
;
287 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
290 if (!!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) == enable
) {
293 err
= kvm_set_user_memory_region(s
, mem
);
301 static int test_le_bit(unsigned long nr
, unsigned char *addr
)
303 return (addr
[nr
>> 3] >> (nr
& 7)) & 1;
307 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
308 * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
309 * This means all bits are set to dirty.
311 * @start_add: start of logged region.
312 * @end_addr: end of logged region.
314 int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
315 target_phys_addr_t end_addr
)
317 KVMState
*s
= kvm_state
;
318 unsigned long size
, allocated_size
= 0;
319 target_phys_addr_t phys_addr
;
325 d
.dirty_bitmap
= NULL
;
326 while (start_addr
< end_addr
) {
327 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, end_addr
);
332 size
= ((mem
->memory_size
>> TARGET_PAGE_BITS
) + 7) / 8;
333 if (!d
.dirty_bitmap
) {
334 d
.dirty_bitmap
= qemu_malloc(size
);
335 } else if (size
> allocated_size
) {
336 d
.dirty_bitmap
= qemu_realloc(d
.dirty_bitmap
, size
);
338 allocated_size
= size
;
339 memset(d
.dirty_bitmap
, 0, allocated_size
);
343 if (kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
) == -1) {
344 dprintf("ioctl failed %d\n", errno
);
349 for (phys_addr
= mem
->start_addr
, addr
= mem
->phys_offset
;
350 phys_addr
< mem
->start_addr
+ mem
->memory_size
;
351 phys_addr
+= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
352 unsigned char *bitmap
= (unsigned char *)d
.dirty_bitmap
;
353 unsigned nr
= (phys_addr
- mem
->start_addr
) >> TARGET_PAGE_BITS
;
355 if (test_le_bit(nr
, bitmap
)) {
356 cpu_physical_memory_set_dirty(addr
);
359 start_addr
= phys_addr
;
361 qemu_free(d
.dirty_bitmap
);
367 int kvm_coalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
370 #ifdef KVM_CAP_COALESCED_MMIO
371 KVMState
*s
= kvm_state
;
373 if (s
->coalesced_mmio
) {
374 struct kvm_coalesced_mmio_zone zone
;
379 ret
= kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
386 int kvm_uncoalesce_mmio_region(target_phys_addr_t start
, ram_addr_t size
)
389 #ifdef KVM_CAP_COALESCED_MMIO
390 KVMState
*s
= kvm_state
;
392 if (s
->coalesced_mmio
) {
393 struct kvm_coalesced_mmio_zone zone
;
398 ret
= kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
405 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
409 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
418 int kvm_init(int smp_cpus
)
420 static const char upgrade_note
[] =
421 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
422 "(see http://sourceforge.net/projects/kvm).\n";
428 fprintf(stderr
, "No SMP KVM support, use '-smp 1'\n");
432 s
= qemu_mallocz(sizeof(KVMState
));
434 #ifdef KVM_CAP_SET_GUEST_DEBUG
435 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
437 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++)
438 s
->slots
[i
].slot
= i
;
441 s
->fd
= open("/dev/kvm", O_RDWR
);
443 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
448 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
449 if (ret
< KVM_API_VERSION
) {
452 fprintf(stderr
, "kvm version too old\n");
456 if (ret
> KVM_API_VERSION
) {
458 fprintf(stderr
, "kvm version not supported\n");
462 s
->vmfd
= kvm_ioctl(s
, KVM_CREATE_VM
, 0);
466 /* initially, KVM allocated its own memory and we had to jump through
467 * hooks to make phys_ram_base point to this. Modern versions of KVM
468 * just use a user allocated buffer so we can use regular pages
469 * unmodified. Make sure we have a sufficiently modern version of KVM.
471 if (!kvm_check_extension(s
, KVM_CAP_USER_MEMORY
)) {
473 fprintf(stderr
, "kvm does not support KVM_CAP_USER_MEMORY\n%s",
478 /* There was a nasty bug in < kvm-80 that prevents memory slots from being
479 * destroyed properly. Since we rely on this capability, refuse to work
480 * with any kernel without this capability. */
481 if (!kvm_check_extension(s
, KVM_CAP_DESTROY_MEMORY_REGION_WORKS
)) {
485 "KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s",
490 #ifdef KVM_CAP_COALESCED_MMIO
491 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
493 s
->coalesced_mmio
= 0;
496 s
->broken_set_mem_region
= 1;
497 #ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
498 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
);
500 s
->broken_set_mem_region
= 0;
504 ret
= kvm_arch_init(s
, smp_cpus
);
525 static int kvm_handle_io(uint16_t port
, void *data
, int direction
, int size
,
531 for (i
= 0; i
< count
; i
++) {
532 if (direction
== KVM_EXIT_IO_IN
) {
535 stb_p(ptr
, cpu_inb(port
));
538 stw_p(ptr
, cpu_inw(port
));
541 stl_p(ptr
, cpu_inl(port
));
547 cpu_outb(port
, ldub_p(ptr
));
550 cpu_outw(port
, lduw_p(ptr
));
553 cpu_outl(port
, ldl_p(ptr
));
565 static void kvm_run_coalesced_mmio(CPUState
*env
, struct kvm_run
*run
)
567 #ifdef KVM_CAP_COALESCED_MMIO
568 KVMState
*s
= kvm_state
;
569 if (s
->coalesced_mmio
) {
570 struct kvm_coalesced_mmio_ring
*ring
;
572 ring
= (void *)run
+ (s
->coalesced_mmio
* TARGET_PAGE_SIZE
);
573 while (ring
->first
!= ring
->last
) {
574 struct kvm_coalesced_mmio
*ent
;
576 ent
= &ring
->coalesced_mmio
[ring
->first
];
578 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
579 /* FIXME smp_wmb() */
580 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
586 void kvm_cpu_synchronize_state(CPUState
*env
)
588 if (!env
->kvm_state
->regs_modified
) {
589 kvm_arch_get_registers(env
);
590 env
->kvm_state
->regs_modified
= 1;
594 int kvm_cpu_exec(CPUState
*env
)
596 struct kvm_run
*run
= env
->kvm_run
;
599 dprintf("kvm_cpu_exec()\n");
602 if (env
->exit_request
) {
603 dprintf("interrupt exit requested\n");
608 if (env
->kvm_state
->regs_modified
) {
609 kvm_arch_put_registers(env
);
610 env
->kvm_state
->regs_modified
= 0;
613 kvm_arch_pre_run(env
, run
);
614 qemu_mutex_unlock_iothread();
615 ret
= kvm_vcpu_ioctl(env
, KVM_RUN
, 0);
616 qemu_mutex_lock_iothread();
617 kvm_arch_post_run(env
, run
);
619 if (ret
== -EINTR
|| ret
== -EAGAIN
) {
620 dprintf("io window exit\n");
626 dprintf("kvm run failed %s\n", strerror(-ret
));
630 kvm_run_coalesced_mmio(env
, run
);
632 ret
= 0; /* exit loop */
633 switch (run
->exit_reason
) {
635 dprintf("handle_io\n");
636 ret
= kvm_handle_io(run
->io
.port
,
637 (uint8_t *)run
+ run
->io
.data_offset
,
643 dprintf("handle_mmio\n");
644 cpu_physical_memory_rw(run
->mmio
.phys_addr
,
650 case KVM_EXIT_IRQ_WINDOW_OPEN
:
651 dprintf("irq_window_open\n");
653 case KVM_EXIT_SHUTDOWN
:
654 dprintf("shutdown\n");
655 qemu_system_reset_request();
658 case KVM_EXIT_UNKNOWN
:
659 dprintf("kvm_exit_unknown\n");
661 case KVM_EXIT_FAIL_ENTRY
:
662 dprintf("kvm_exit_fail_entry\n");
664 case KVM_EXIT_EXCEPTION
:
665 dprintf("kvm_exit_exception\n");
668 dprintf("kvm_exit_debug\n");
669 #ifdef KVM_CAP_SET_GUEST_DEBUG
670 if (kvm_arch_debug(&run
->debug
.arch
)) {
671 gdb_set_stop_cpu(env
);
673 env
->exception_index
= EXCP_DEBUG
;
676 /* re-enter, this exception was guest-internal */
678 #endif /* KVM_CAP_SET_GUEST_DEBUG */
681 dprintf("kvm_arch_handle_exit\n");
682 ret
= kvm_arch_handle_exit(env
, run
);
687 if (env
->exit_request
) {
688 env
->exit_request
= 0;
689 env
->exception_index
= EXCP_INTERRUPT
;
695 void kvm_set_phys_mem(target_phys_addr_t start_addr
,
697 ram_addr_t phys_offset
)
699 KVMState
*s
= kvm_state
;
700 ram_addr_t flags
= phys_offset
& ~TARGET_PAGE_MASK
;
704 if (start_addr
& ~TARGET_PAGE_MASK
) {
705 if (flags
>= IO_MEM_UNASSIGNED
) {
706 if (!kvm_lookup_overlapping_slot(s
, start_addr
,
707 start_addr
+ size
)) {
710 fprintf(stderr
, "Unaligned split of a KVM memory slot\n");
712 fprintf(stderr
, "Only page-aligned memory slots supported\n");
717 /* KVM does not support read-only slots */
718 phys_offset
&= ~IO_MEM_ROM
;
721 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, start_addr
+ size
);
726 if (flags
< IO_MEM_UNASSIGNED
&& start_addr
>= mem
->start_addr
&&
727 (start_addr
+ size
<= mem
->start_addr
+ mem
->memory_size
) &&
728 (phys_offset
- start_addr
== mem
->phys_offset
- mem
->start_addr
)) {
729 /* The new slot fits into the existing one and comes with
730 * identical parameters - nothing to be done. */
736 /* unregister the overlapping slot */
737 mem
->memory_size
= 0;
738 err
= kvm_set_user_memory_region(s
, mem
);
740 fprintf(stderr
, "%s: error unregistering overlapping slot: %s\n",
741 __func__
, strerror(-err
));
745 /* Workaround for older KVM versions: we can't join slots, even not by
746 * unregistering the previous ones and then registering the larger
747 * slot. We have to maintain the existing fragmentation. Sigh.
749 * This workaround assumes that the new slot starts at the same
750 * address as the first existing one. If not or if some overlapping
751 * slot comes around later, we will fail (not seen in practice so far)
752 * - and actually require a recent KVM version. */
753 if (s
->broken_set_mem_region
&&
754 old
.start_addr
== start_addr
&& old
.memory_size
< size
&&
755 flags
< IO_MEM_UNASSIGNED
) {
756 mem
= kvm_alloc_slot(s
);
757 mem
->memory_size
= old
.memory_size
;
758 mem
->start_addr
= old
.start_addr
;
759 mem
->phys_offset
= old
.phys_offset
;
762 err
= kvm_set_user_memory_region(s
, mem
);
764 fprintf(stderr
, "%s: error updating slot: %s\n", __func__
,
769 start_addr
+= old
.memory_size
;
770 phys_offset
+= old
.memory_size
;
771 size
-= old
.memory_size
;
775 /* register prefix slot */
776 if (old
.start_addr
< start_addr
) {
777 mem
= kvm_alloc_slot(s
);
778 mem
->memory_size
= start_addr
- old
.start_addr
;
779 mem
->start_addr
= old
.start_addr
;
780 mem
->phys_offset
= old
.phys_offset
;
783 err
= kvm_set_user_memory_region(s
, mem
);
785 fprintf(stderr
, "%s: error registering prefix slot: %s\n",
786 __func__
, strerror(-err
));
791 /* register suffix slot */
792 if (old
.start_addr
+ old
.memory_size
> start_addr
+ size
) {
793 ram_addr_t size_delta
;
795 mem
= kvm_alloc_slot(s
);
796 mem
->start_addr
= start_addr
+ size
;
797 size_delta
= mem
->start_addr
- old
.start_addr
;
798 mem
->memory_size
= old
.memory_size
- size_delta
;
799 mem
->phys_offset
= old
.phys_offset
+ size_delta
;
802 err
= kvm_set_user_memory_region(s
, mem
);
804 fprintf(stderr
, "%s: error registering suffix slot: %s\n",
805 __func__
, strerror(-err
));
811 /* in case the KVM bug workaround already "consumed" the new slot */
815 /* KVM does not need to know about this memory */
816 if (flags
>= IO_MEM_UNASSIGNED
)
819 mem
= kvm_alloc_slot(s
);
820 mem
->memory_size
= size
;
821 mem
->start_addr
= start_addr
;
822 mem
->phys_offset
= phys_offset
;
825 err
= kvm_set_user_memory_region(s
, mem
);
827 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
834 int kvm_ioctl(KVMState
*s
, int type
, ...)
841 arg
= va_arg(ap
, void *);
844 ret
= ioctl(s
->fd
, type
, arg
);
851 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
858 arg
= va_arg(ap
, void *);
861 ret
= ioctl(s
->vmfd
, type
, arg
);
868 int kvm_vcpu_ioctl(CPUState
*env
, int type
, ...)
875 arg
= va_arg(ap
, void *);
878 ret
= ioctl(env
->kvm_fd
, type
, arg
);
885 int kvm_has_sync_mmu(void)
887 #ifdef KVM_CAP_SYNC_MMU
888 KVMState
*s
= kvm_state
;
890 return kvm_check_extension(s
, KVM_CAP_SYNC_MMU
);
897 void kvm_setup_guest_memory(void *start
, size_t size
)
899 if (!kvm_has_sync_mmu()) {
901 int ret
= madvise(start
, size
, MADV_DONTFORK
);
909 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
915 #endif /* KVM_UPSTREAM */
917 #ifdef KVM_CAP_SET_GUEST_DEBUG
920 static void on_vcpu(CPUState
*env
, void (*func
)(void *data
), void *data
)
922 #ifdef CONFIG_IOTHREAD
923 if (env
== cpu_single_env
) {
932 #endif /* KVM_UPSTREAM */
934 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*env
,
937 struct kvm_sw_breakpoint
*bp
;
939 QTAILQ_FOREACH(bp
, &env
->kvm_state
->kvm_sw_breakpoints
, entry
) {
946 int kvm_sw_breakpoints_active(CPUState
*env
)
948 return !QTAILQ_EMPTY(&env
->kvm_state
->kvm_sw_breakpoints
);
953 struct kvm_set_guest_debug_data
{
954 struct kvm_guest_debug dbg
;
959 static void kvm_invoke_set_guest_debug(void *data
)
961 struct kvm_set_guest_debug_data
*dbg_data
= data
;
962 CPUState
*env
= dbg_data
->env
;
964 if (env
->kvm_state
->regs_modified
) {
965 kvm_arch_put_registers(env
);
966 env
->kvm_state
->regs_modified
= 0;
968 dbg_data
->err
= kvm_vcpu_ioctl(env
, KVM_SET_GUEST_DEBUG
, &dbg_data
->dbg
);
971 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
973 struct kvm_set_guest_debug_data data
;
975 data
.dbg
.control
= 0;
976 if (env
->singlestep_enabled
)
977 data
.dbg
.control
= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
979 kvm_arch_update_guest_debug(env
, &data
.dbg
);
980 data
.dbg
.control
|= reinject_trap
;
983 on_vcpu(env
, kvm_invoke_set_guest_debug
, &data
);
988 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
989 target_ulong len
, int type
)
991 struct kvm_sw_breakpoint
*bp
;
995 if (type
== GDB_BREAKPOINT_SW
) {
996 bp
= kvm_find_sw_breakpoint(current_env
, addr
);
1002 bp
= qemu_malloc(sizeof(struct kvm_sw_breakpoint
));
1008 err
= kvm_arch_insert_sw_breakpoint(current_env
, bp
);
1014 QTAILQ_INSERT_HEAD(¤t_env
->kvm_state
->kvm_sw_breakpoints
,
1017 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
1022 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1023 err
= kvm_update_guest_debug(env
, 0);
1030 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1031 target_ulong len
, int type
)
1033 struct kvm_sw_breakpoint
*bp
;
1037 if (type
== GDB_BREAKPOINT_SW
) {
1038 bp
= kvm_find_sw_breakpoint(current_env
, addr
);
1042 if (bp
->use_count
> 1) {
1047 err
= kvm_arch_remove_sw_breakpoint(current_env
, bp
);
1051 QTAILQ_REMOVE(¤t_env
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
1054 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
1059 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1060 err
= kvm_update_guest_debug(env
, 0);
1067 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1069 struct kvm_sw_breakpoint
*bp
, *next
;
1070 KVMState
*s
= current_env
->kvm_state
;
1073 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
1074 if (kvm_arch_remove_sw_breakpoint(current_env
, bp
) != 0) {
1075 /* Try harder to find a CPU that currently sees the breakpoint. */
1076 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1077 if (kvm_arch_remove_sw_breakpoint(env
, bp
) == 0)
1082 kvm_arch_remove_all_hw_breakpoints();
1084 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
)
1085 kvm_update_guest_debug(env
, 0);
1088 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1090 int kvm_update_guest_debug(CPUState
*env
, unsigned long reinject_trap
)
1095 int kvm_insert_breakpoint(CPUState
*current_env
, target_ulong addr
,
1096 target_ulong len
, int type
)
1101 int kvm_remove_breakpoint(CPUState
*current_env
, target_ulong addr
,
1102 target_ulong len
, int type
)
1107 void kvm_remove_all_breakpoints(CPUState
*current_env
)
1110 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1112 #include "qemu-kvm.c"