3 #include "config-host.h"
13 #define MSR_IA32_TSC 0x10
15 extern void perror(const char *s
);
18 kvm_context_t kvm_context
;
19 static struct kvm_msr_list
*kvm_msr_list
;
20 static int kvm_has_msr_star
;
23 static CPUState
*saved_env
[NR_CPU
];
25 static void set_msr_entry(struct kvm_msr_entry
*entry
, uint32_t index
,
32 /* returns 0 on success, non-0 on failure */
33 static int get_msr_entry(struct kvm_msr_entry
*entry
, CPUState
*env
)
35 switch (entry
->index
) {
36 case MSR_IA32_SYSENTER_CS
:
37 env
->sysenter_cs
= entry
->data
;
39 case MSR_IA32_SYSENTER_ESP
:
40 env
->sysenter_esp
= entry
->data
;
42 case MSR_IA32_SYSENTER_EIP
:
43 env
->sysenter_eip
= entry
->data
;
46 env
->star
= entry
->data
;
50 env
->cstar
= entry
->data
;
52 case MSR_KERNELGSBASE
:
53 env
->kernelgsbase
= entry
->data
;
56 env
->fmask
= entry
->data
;
59 env
->lstar
= entry
->data
;
63 env
->tsc
= entry
->data
;
66 printf("Warning unknown msr index 0x%x\n", entry
->index
);
78 static void set_v8086_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
80 lhs
->selector
= rhs
->selector
;
81 lhs
->base
= rhs
->base
;
82 lhs
->limit
= rhs
->limit
;
94 static void set_seg(struct kvm_segment
*lhs
, const SegmentCache
*rhs
)
96 unsigned flags
= rhs
->flags
;
97 lhs
->selector
= rhs
->selector
;
98 lhs
->base
= rhs
->base
;
99 lhs
->limit
= rhs
->limit
;
100 lhs
->type
= (flags
>> DESC_TYPE_SHIFT
) & 15;
101 lhs
->present
= (flags
& DESC_P_MASK
) != 0;
102 lhs
->dpl
= rhs
->selector
& 3;
103 lhs
->db
= (flags
>> DESC_B_SHIFT
) & 1;
104 lhs
->s
= (flags
& DESC_S_MASK
) != 0;
105 lhs
->l
= (flags
>> DESC_L_SHIFT
) & 1;
106 lhs
->g
= (flags
& DESC_G_MASK
) != 0;
107 lhs
->avl
= (flags
& DESC_AVL_MASK
) != 0;
111 static void get_seg(SegmentCache
*lhs
, const struct kvm_segment
*rhs
)
113 lhs
->selector
= rhs
->selector
;
114 lhs
->base
= rhs
->base
;
115 lhs
->limit
= rhs
->limit
;
117 (rhs
->type
<< DESC_TYPE_SHIFT
)
118 | (rhs
->present
* DESC_P_MASK
)
119 | (rhs
->dpl
<< DESC_DPL_SHIFT
)
120 | (rhs
->db
<< DESC_B_SHIFT
)
121 | (rhs
->s
* DESC_S_MASK
)
122 | (rhs
->l
<< DESC_L_SHIFT
)
123 | (rhs
->g
* DESC_G_MASK
)
124 | (rhs
->avl
* DESC_AVL_MASK
);
127 /* the reset values of qemu are not compatible to SVM
128 * this function is used to fix the segment descriptor values */
129 static void fix_realmode_dataseg(struct kvm_segment
*seg
)
136 static void load_regs(CPUState
*env
)
138 struct kvm_regs regs
;
139 struct kvm_sregs sregs
;
140 struct kvm_msr_entry msrs
[MSR_COUNT
];
147 regs
.rax
= env
->regs
[R_EAX
];
148 regs
.rbx
= env
->regs
[R_EBX
];
149 regs
.rcx
= env
->regs
[R_ECX
];
150 regs
.rdx
= env
->regs
[R_EDX
];
151 regs
.rsi
= env
->regs
[R_ESI
];
152 regs
.rdi
= env
->regs
[R_EDI
];
153 regs
.rsp
= env
->regs
[R_ESP
];
154 regs
.rbp
= env
->regs
[R_EBP
];
156 regs
.r8
= env
->regs
[8];
157 regs
.r9
= env
->regs
[9];
158 regs
.r10
= env
->regs
[10];
159 regs
.r11
= env
->regs
[11];
160 regs
.r12
= env
->regs
[12];
161 regs
.r13
= env
->regs
[13];
162 regs
.r14
= env
->regs
[14];
163 regs
.r15
= env
->regs
[15];
166 regs
.rflags
= env
->eflags
;
169 kvm_set_regs(kvm_context
, 0, ®s
);
171 memcpy(sregs
.interrupt_bitmap
, env
->kvm_interrupt_bitmap
, sizeof(sregs
.interrupt_bitmap
));
173 if ((env
->eflags
& VM_MASK
)) {
174 set_v8086_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
175 set_v8086_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
176 set_v8086_seg(&sregs
.es
, &env
->segs
[R_ES
]);
177 set_v8086_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
178 set_v8086_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
179 set_v8086_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
181 set_seg(&sregs
.cs
, &env
->segs
[R_CS
]);
182 set_seg(&sregs
.ds
, &env
->segs
[R_DS
]);
183 set_seg(&sregs
.es
, &env
->segs
[R_ES
]);
184 set_seg(&sregs
.fs
, &env
->segs
[R_FS
]);
185 set_seg(&sregs
.gs
, &env
->segs
[R_GS
]);
186 set_seg(&sregs
.ss
, &env
->segs
[R_SS
]);
188 if (env
->cr
[0] & CR0_PE_MASK
) {
189 /* force ss cpl to cs cpl */
190 sregs
.ss
.selector
= (sregs
.ss
.selector
& ~3) |
191 (sregs
.cs
.selector
& 3);
192 sregs
.ss
.dpl
= sregs
.ss
.selector
& 3;
195 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
196 fix_realmode_dataseg(&sregs
.cs
);
197 fix_realmode_dataseg(&sregs
.ds
);
198 fix_realmode_dataseg(&sregs
.es
);
199 fix_realmode_dataseg(&sregs
.fs
);
200 fix_realmode_dataseg(&sregs
.gs
);
201 fix_realmode_dataseg(&sregs
.ss
);
205 set_seg(&sregs
.tr
, &env
->tr
);
206 set_seg(&sregs
.ldt
, &env
->ldt
);
208 sregs
.idt
.limit
= env
->idt
.limit
;
209 sregs
.idt
.base
= env
->idt
.base
;
210 sregs
.gdt
.limit
= env
->gdt
.limit
;
211 sregs
.gdt
.base
= env
->gdt
.base
;
213 sregs
.cr0
= env
->cr
[0];
214 sregs
.cr2
= env
->cr
[2];
215 sregs
.cr3
= env
->cr
[3];
216 sregs
.cr4
= env
->cr
[4];
218 sregs
.apic_base
= cpu_get_apic_base(env
);
219 sregs
.efer
= env
->efer
;
220 sregs
.cr8
= cpu_get_apic_tpr(env
);
222 kvm_set_sregs(kvm_context
, 0, &sregs
);
226 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_CS
, env
->sysenter_cs
);
227 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_ESP
, env
->sysenter_esp
);
228 set_msr_entry(&msrs
[n
++], MSR_IA32_SYSENTER_EIP
, env
->sysenter_eip
);
229 if (kvm_has_msr_star
)
230 set_msr_entry(&msrs
[n
++], MSR_STAR
, env
->star
);
231 set_msr_entry(&msrs
[n
++], MSR_IA32_TSC
, env
->tsc
);
233 set_msr_entry(&msrs
[n
++], MSR_CSTAR
, env
->cstar
);
234 set_msr_entry(&msrs
[n
++], MSR_KERNELGSBASE
, env
->kernelgsbase
);
235 set_msr_entry(&msrs
[n
++], MSR_FMASK
, env
->fmask
);
236 set_msr_entry(&msrs
[n
++], MSR_LSTAR
, env
->lstar
);
239 rc
= kvm_set_msrs(kvm_context
, 0, msrs
, n
);
241 perror("kvm_set_msrs FAILED");
245 static void save_regs(CPUState
*env
)
247 struct kvm_regs regs
;
248 struct kvm_sregs sregs
;
249 struct kvm_msr_entry msrs
[MSR_COUNT
];
253 kvm_get_regs(kvm_context
, 0, ®s
);
255 env
->regs
[R_EAX
] = regs
.rax
;
256 env
->regs
[R_EBX
] = regs
.rbx
;
257 env
->regs
[R_ECX
] = regs
.rcx
;
258 env
->regs
[R_EDX
] = regs
.rdx
;
259 env
->regs
[R_ESI
] = regs
.rsi
;
260 env
->regs
[R_EDI
] = regs
.rdi
;
261 env
->regs
[R_ESP
] = regs
.rsp
;
262 env
->regs
[R_EBP
] = regs
.rbp
;
264 env
->regs
[8] = regs
.r8
;
265 env
->regs
[9] = regs
.r9
;
266 env
->regs
[10] = regs
.r10
;
267 env
->regs
[11] = regs
.r11
;
268 env
->regs
[12] = regs
.r12
;
269 env
->regs
[13] = regs
.r13
;
270 env
->regs
[14] = regs
.r14
;
271 env
->regs
[15] = regs
.r15
;
274 env
->eflags
= regs
.rflags
;
277 kvm_get_sregs(kvm_context
, 0, &sregs
);
279 memcpy(env
->kvm_interrupt_bitmap
, sregs
.interrupt_bitmap
, sizeof(env
->kvm_interrupt_bitmap
));
281 get_seg(&env
->segs
[R_CS
], &sregs
.cs
);
282 get_seg(&env
->segs
[R_DS
], &sregs
.ds
);
283 get_seg(&env
->segs
[R_ES
], &sregs
.es
);
284 get_seg(&env
->segs
[R_FS
], &sregs
.fs
);
285 get_seg(&env
->segs
[R_GS
], &sregs
.gs
);
286 get_seg(&env
->segs
[R_SS
], &sregs
.ss
);
288 get_seg(&env
->tr
, &sregs
.tr
);
289 get_seg(&env
->ldt
, &sregs
.ldt
);
291 env
->idt
.limit
= sregs
.idt
.limit
;
292 env
->idt
.base
= sregs
.idt
.base
;
293 env
->gdt
.limit
= sregs
.gdt
.limit
;
294 env
->gdt
.base
= sregs
.gdt
.base
;
296 env
->cr
[0] = sregs
.cr0
;
297 env
->cr
[2] = sregs
.cr2
;
298 env
->cr
[3] = sregs
.cr3
;
299 env
->cr
[4] = sregs
.cr4
;
301 cpu_set_apic_base(env
, sregs
.apic_base
);
303 env
->efer
= sregs
.efer
;
304 //cpu_set_apic_tpr(env, sregs.cr8);
306 #define HFLAG_COPY_MASK ~( \
307 HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
308 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
309 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
310 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
314 hflags
= (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
315 hflags
|= (env
->cr
[0] & CR0_PE_MASK
) << (HF_PE_SHIFT
- CR0_PE_SHIFT
);
316 hflags
|= (env
->cr
[0] << (HF_MP_SHIFT
- CR0_MP_SHIFT
)) &
317 (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
);
318 hflags
|= (env
->eflags
& (HF_TF_MASK
| HF_VM_MASK
| HF_IOPL_MASK
));
319 hflags
|= (env
->cr
[4] & CR4_OSFXSR_MASK
) <<
320 (HF_OSFXSR_SHIFT
- CR4_OSFXSR_SHIFT
);
322 if (env
->efer
& MSR_EFER_LMA
) {
323 hflags
|= HF_LMA_MASK
;
326 if ((hflags
& HF_LMA_MASK
) && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)) {
327 hflags
|= HF_CS32_MASK
| HF_SS32_MASK
| HF_CS64_MASK
;
329 hflags
|= (env
->segs
[R_CS
].flags
& DESC_B_MASK
) >>
330 (DESC_B_SHIFT
- HF_CS32_SHIFT
);
331 hflags
|= (env
->segs
[R_SS
].flags
& DESC_B_MASK
) >>
332 (DESC_B_SHIFT
- HF_SS32_SHIFT
);
333 if (!(env
->cr
[0] & CR0_PE_MASK
) ||
334 (env
->eflags
& VM_MASK
) ||
335 !(hflags
& HF_CS32_MASK
)) {
336 hflags
|= HF_ADDSEG_MASK
;
338 hflags
|= ((env
->segs
[R_DS
].base
|
339 env
->segs
[R_ES
].base
|
340 env
->segs
[R_SS
].base
) != 0) <<
344 env
->hflags
= (env
->hflags
& HFLAG_COPY_MASK
) | hflags
;
345 CC_SRC
= env
->eflags
& (CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
346 DF
= 1 - (2 * ((env
->eflags
>> 10) & 1));
347 CC_OP
= CC_OP_EFLAGS
;
348 env
->eflags
&= ~(DF_MASK
| CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
354 msrs
[n
++].index
= MSR_IA32_SYSENTER_CS
;
355 msrs
[n
++].index
= MSR_IA32_SYSENTER_ESP
;
356 msrs
[n
++].index
= MSR_IA32_SYSENTER_EIP
;
357 if (kvm_has_msr_star
)
358 msrs
[n
++].index
= MSR_STAR
;
359 msrs
[n
++].index
= MSR_IA32_TSC
;
361 msrs
[n
++].index
= MSR_CSTAR
;
362 msrs
[n
++].index
= MSR_KERNELGSBASE
;
363 msrs
[n
++].index
= MSR_FMASK
;
364 msrs
[n
++].index
= MSR_LSTAR
;
366 rc
= kvm_get_msrs(kvm_context
, 0, msrs
, n
);
368 perror("kvm_get_msrs FAILED");
371 n
= rc
; /* actual number of MSRs */
372 for (i
=0 ; i
<n
; i
++) {
373 if (get_msr_entry(&msrs
[i
], env
))
382 static int try_push_interrupts(void *opaque
)
384 CPUState
**envs
= opaque
, *env
;
387 if (env
->ready_for_interrupt_injection
&&
388 (env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
389 (env
->eflags
& IF_MASK
)) {
390 env
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
391 // for now using cpu 0
392 kvm_inject_irq(kvm_context
, 0, cpu_get_pic_interrupt(env
));
395 return (env
->interrupt_request
& CPU_INTERRUPT_HARD
) != 0;
398 static void post_kvm_run(void *opaque
, struct kvm_run
*kvm_run
)
400 CPUState
**envs
= opaque
, *env
;
403 env
->eflags
= (kvm_run
->if_flag
) ? env
->eflags
| IF_MASK
:env
->eflags
& ~IF_MASK
;
404 env
->ready_for_interrupt_injection
= kvm_run
->ready_for_interrupt_injection
;
405 //cpu_set_apic_tpr(env, kvm_run->cr8);
406 cpu_set_apic_base(env
, kvm_run
->apic_base
);
409 static void pre_kvm_run(void *opaque
, struct kvm_run
*kvm_run
)
411 CPUState
**envs
= opaque
, *env
;
414 kvm_run
->cr8
= cpu_get_apic_tpr(env
);
417 void kvm_load_registers(CPUState
*env
)
422 void kvm_save_registers(CPUState
*env
)
427 int kvm_cpu_exec(CPUState
*env
)
430 int pending
= (!env
->ready_for_interrupt_injection
||
431 ((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
432 (env
->eflags
& IF_MASK
)));
434 if (!pending
&& (env
->interrupt_request
& CPU_INTERRUPT_EXIT
)) {
435 env
->interrupt_request
&= ~CPU_INTERRUPT_EXIT
;
436 env
->exception_index
= EXCP_INTERRUPT
;
444 r
= kvm_run(kvm_context
, 0);
446 printf("kvm_run returned %d\n", r
);
453 static int kvm_debug(void *opaque
, int vcpu
)
455 CPUState
**envs
= opaque
;
458 env
->exception_index
= EXCP_DEBUG
;
462 static int kvm_inb(void *opaque
, uint16_t addr
, uint8_t *data
)
464 *data
= cpu_inb(0, addr
);
468 static int kvm_inw(void *opaque
, uint16_t addr
, uint16_t *data
)
470 *data
= cpu_inw(0, addr
);
474 static int kvm_inl(void *opaque
, uint16_t addr
, uint32_t *data
)
476 *data
= cpu_inl(0, addr
);
480 #define PM_IO_BASE 0xb000
482 static int kvm_outb(void *opaque
, uint16_t addr
, uint8_t data
)
487 cpu_outb(0, 0xb3, 0);
494 x
= cpu_inw(0, PM_IO_BASE
+ 4);
496 cpu_outw(0, PM_IO_BASE
+ 4, x
);
503 x
= cpu_inw(0, PM_IO_BASE
+ 4);
505 cpu_outw(0, PM_IO_BASE
+ 4, x
);
513 cpu_outb(0, addr
, data
);
517 static int kvm_outw(void *opaque
, uint16_t addr
, uint16_t data
)
519 cpu_outw(0, addr
, data
);
523 static int kvm_outl(void *opaque
, uint16_t addr
, uint32_t data
)
525 cpu_outl(0, addr
, data
);
529 static int kvm_readb(void *opaque
, uint64_t addr
, uint8_t *data
)
531 *data
= ldub_phys(addr
);
535 static int kvm_readw(void *opaque
, uint64_t addr
, uint16_t *data
)
537 *data
= lduw_phys(addr
);
541 static int kvm_readl(void *opaque
, uint64_t addr
, uint32_t *data
)
543 *data
= ldl_phys(addr
);
547 static int kvm_readq(void *opaque
, uint64_t addr
, uint64_t *data
)
549 *data
= ldq_phys(addr
);
553 static int kvm_writeb(void *opaque
, uint64_t addr
, uint8_t data
)
555 stb_phys(addr
, data
);
559 static int kvm_writew(void *opaque
, uint64_t addr
, uint16_t data
)
561 stw_phys(addr
, data
);
565 static int kvm_writel(void *opaque
, uint64_t addr
, uint32_t data
)
567 stl_phys(addr
, data
);
571 static int kvm_writeq(void *opaque
, uint64_t addr
, uint64_t data
)
573 stq_phys(addr
, data
);
577 static int kvm_io_window(void *opaque
)
583 static int kvm_halt(void *opaque
, int vcpu
)
585 CPUState
**envs
= opaque
, *env
;
588 if (!((env
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
589 (env
->eflags
& IF_MASK
))) {
590 env
->hflags
|= HF_HALTED_MASK
;
591 env
->exception_index
= EXCP_HLT
;
597 static int kvm_shutdown(void *opaque
, int vcpu
)
599 qemu_system_reset_request();
603 static struct kvm_callbacks qemu_kvm_ops
= {
615 .writeb
= kvm_writeb
,
616 .writew
= kvm_writew
,
617 .writel
= kvm_writel
,
618 .writeq
= kvm_writeq
,
620 .shutdown
= kvm_shutdown
,
621 .io_window
= kvm_io_window
,
622 .try_push_interrupts
= try_push_interrupts
,
623 .post_kvm_run
= post_kvm_run
,
624 .pre_kvm_run
= pre_kvm_run
,
629 /* Try to initialize kvm */
630 kvm_context
= kvm_init(&qemu_kvm_ops
, saved_env
);
638 int kvm_qemu_create_context(void)
642 if (kvm_create(kvm_context
, phys_ram_size
, (void**)&phys_ram_base
) < 0) {
646 kvm_msr_list
= kvm_get_msr_list(kvm_context
);
651 for (i
= 0; i
< kvm_msr_list
->nmsrs
; ++i
)
652 if (kvm_msr_list
->indices
[i
] == MSR_STAR
)
653 kvm_has_msr_star
= 1;
657 void kvm_qemu_destroy(void)
659 kvm_finalize(kvm_context
);
662 static void do_cpuid_ent(struct kvm_cpuid_entry
*e
, uint32_t function
)
666 e
->function
= function
;
672 e
->edx
&= ~(1 << 12); /* disable mtrr support */
673 if (function
== 0x80000001) {
674 unsigned long h_eax
= function
, h_edx
;
677 // push/pop hack to workaround gcc 3 register pressure trouble
680 "push %%rbx; push %%rcx; cpuid; pop %%rcx; pop %%rbx"
682 "push %%ebx; push %%ecx; cpuid; pop %%ecx; pop %%ebx"
684 : "+a"(h_eax
), "=d"(h_edx
));
687 if ((h_edx
& 0x20000000) == 0)
688 e
->edx
&= ~0x20000000u
;
690 if ((h_edx
& 0x00000800) == 0)
691 e
->edx
&= ~0x00000800u
;
693 if ((h_edx
& 0x00100000) == 0)
694 e
->edx
&= ~0x00100000u
;
696 // sysenter isn't supported on compatibility mode on AMD. and syscall
697 // isn't supported in compatibility mode on Intel. so advertise the
698 // actuall cpu, and say goodbye to migration between different vendors
699 // is you use compatibility mode.
704 "push %%rax; push %%rbx; push %%rcx; push %%rdx \n\t"
707 "mov (%%rsp), %%rax \n\t"
708 "mov %%ebx, (%%rax) \n\t"
709 "mov %%ecx, 4(%%rax) \n\t"
710 "mov %%edx, 8(%%rax) \n\t"
711 "pop %%rdx; pop %%rcx; pop %%rbx; pop %%rax"
713 "push %%eax; push %%ebx; push %%ecx; push %%edx \n\t"
716 "mov (%%esp), %%eax \n\t"
717 "mov %%ebx, (%%eax) \n\t"
718 "mov %%ecx, 4(%%eax) \n\t"
719 "mov %%edx, 8(%%eax) \n\t"
720 "pop %%edx; pop %%ecx; pop %%ebx; pop %%eax"
722 : : "d"(bcd
) : "memory");
729 int kvm_qemu_init_env(CPUState
*cenv
)
731 struct kvm_cpuid_entry cpuid_ent
[100];
733 CPUState
*oldenv
= env
;
736 #define DECLARE_HOST_REGS
737 #include "hostregs_helper.h"
739 #define SAVE_HOST_REGS
740 #include "hostregs_helper.h"
748 for (i
= 0; i
<= limit
; ++i
)
749 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
);
753 for (i
= 0x80000000; i
<= limit
; ++i
)
754 do_cpuid_ent(&cpuid_ent
[cpuid_nent
++], i
);
756 kvm_setup_cpuid(kvm_context
, 0, cpuid_nent
, cpuid_ent
);
758 #include "hostregs_helper.h"
765 int kvm_update_debugger(CPUState
*env
)
767 struct kvm_debug_guest dbg
;
771 if (env
->nb_breakpoints
|| env
->singlestep_enabled
) {
773 for (i
= 0; i
< 4 && i
< env
->nb_breakpoints
; ++i
) {
774 dbg
.breakpoints
[i
].enabled
= 1;
775 dbg
.breakpoints
[i
].address
= env
->breakpoints
[i
];
777 dbg
.singlestep
= env
->singlestep_enabled
;
779 return kvm_guest_debug(kvm_context
, 0, &dbg
);
784 * dirty pages logging
786 /* FIXME: use unsigned long pointer instead of unsigned char */
787 unsigned char *kvm_dirty_bitmap
= NULL
;
788 int kvm_physical_memory_set_dirty_tracking(int enable
)
796 if (!kvm_dirty_bitmap
) {
797 unsigned bitmap_size
= BITMAP_SIZE(phys_ram_size
);
798 kvm_dirty_bitmap
= qemu_malloc(bitmap_size
);
799 if (kvm_dirty_bitmap
== NULL
) {
800 perror("Failed to allocate dirty pages bitmap");
804 r
= kvm_dirty_pages_log_enable_all(kvm_context
);
809 if (kvm_dirty_bitmap
) {
810 r
= kvm_dirty_pages_log_reset(kvm_context
);
811 qemu_free(kvm_dirty_bitmap
);
812 kvm_dirty_bitmap
= NULL
;
818 /* get kvm's dirty pages bitmap and update qemu's */
819 int kvm_get_dirty_pages_log_slot(int slot
,
820 unsigned char *bitmap
,
825 unsigned int i
, j
, n
=0;
827 unsigned page_number
, addr
, addr1
;
829 memset(bitmap
, 0, len
);
830 r
= kvm_get_dirty_pages(kvm_context
, slot
, bitmap
);
835 * bitmap-traveling is faster than memory-traveling (for addr...)
836 * especially when most of the memory is not dirty.
838 for (i
=0; i
<len
; i
++) {
843 page_number
= i
* 8 + j
;
844 addr1
= page_number
* TARGET_PAGE_SIZE
;
845 addr
= offset
+ addr1
;
846 cpu_physical_memory_set_dirty(addr
);
854 * get kvm's dirty pages bitmap and update qemu's
855 * we only care about physical ram, which resides in slots 0 and 3
857 int kvm_update_dirty_pages_log(void)
861 len
= BITMAP_SIZE(0xa0000);
862 r
= kvm_get_dirty_pages_log_slot(3, kvm_dirty_bitmap
, 0 , len
);
863 len
= BITMAP_SIZE(phys_ram_size
- 0xc0000);
864 r
= r
|| kvm_get_dirty_pages_log_slot(0, kvm_dirty_bitmap
, 0xc0000, len
);
868 int kvm_get_phys_ram_page_bitmap(unsigned char *bitmap
)
870 int r
=0, len
, offset
;
872 len
= BITMAP_SIZE(phys_ram_size
);
873 memset(bitmap
, 0, len
);
875 r
= kvm_get_mem_map(kvm_context
, 3, bitmap
);
879 offset
= BITMAP_SIZE(0xc0000);
880 r
= kvm_get_mem_map(kvm_context
, 0, bitmap
+ offset
);