pci: add helper functions to check ranges overlap.
[qemu/aliguori-queue.git] / target-i386 / machine.c
blob869c681f4d8e6b68ec2fa6689dc7a9b814e33ce2
1 #include "hw/hw.h"
2 #include "hw/boards.h"
3 #include "hw/pc.h"
4 #include "hw/isa.h"
5 #include "host-utils.h"
7 #include "exec-all.h"
8 #include "kvm.h"
10 static const VMStateDescription vmstate_segment = {
11 .name = "segment",
12 .version_id = 1,
13 .minimum_version_id = 1,
14 .minimum_version_id_old = 1,
15 .fields = (VMStateField []) {
16 VMSTATE_UINT32(selector, SegmentCache),
17 VMSTATE_UINTTL(base, SegmentCache),
18 VMSTATE_UINT32(limit, SegmentCache),
19 VMSTATE_UINT32(flags, SegmentCache),
20 VMSTATE_END_OF_LIST()
24 #define VMSTATE_SEGMENT(_field, _state) { \
25 .name = (stringify(_field)), \
26 .size = sizeof(SegmentCache), \
27 .vmsd = &vmstate_segment, \
28 .flags = VMS_STRUCT, \
29 .offset = offsetof(_state, _field) \
30 + type_check(SegmentCache,typeof_field(_state, _field)) \
33 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
34 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
36 static const VMStateDescription vmstate_xmm_reg = {
37 .name = "xmm_reg",
38 .version_id = 1,
39 .minimum_version_id = 1,
40 .minimum_version_id_old = 1,
41 .fields = (VMStateField []) {
42 VMSTATE_UINT64(XMM_Q(0), XMMReg),
43 VMSTATE_UINT64(XMM_Q(1), XMMReg),
44 VMSTATE_END_OF_LIST()
48 #define VMSTATE_XMM_REGS(_field, _state, _n) \
49 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_xmm_reg, XMMReg)
51 static const VMStateDescription vmstate_mtrr_var = {
52 .name = "mtrr_var",
53 .version_id = 1,
54 .minimum_version_id = 1,
55 .minimum_version_id_old = 1,
56 .fields = (VMStateField []) {
57 VMSTATE_UINT64(base, MTRRVar),
58 VMSTATE_UINT64(mask, MTRRVar),
59 VMSTATE_END_OF_LIST()
63 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
64 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
66 static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size)
68 fprintf(stderr, "call put_fpreg() with invalid arguments\n");
69 exit(0);
72 #ifdef USE_X86LDOUBLE
73 /* XXX: add that in a FPU generic layer */
74 union x86_longdouble {
75 uint64_t mant;
76 uint16_t exp;
79 #define MANTD1(fp) (fp & ((1LL << 52) - 1))
80 #define EXPBIAS1 1023
81 #define EXPD1(fp) ((fp >> 52) & 0x7FF)
82 #define SIGND1(fp) ((fp >> 32) & 0x80000000)
84 static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
86 int e;
87 /* mantissa */
88 p->mant = (MANTD1(temp) << 11) | (1LL << 63);
89 /* exponent + sign */
90 e = EXPD1(temp) - EXPBIAS1 + 16383;
91 e |= SIGND1(temp) >> 16;
92 p->exp = e;
95 static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
97 FPReg *fp_reg = opaque;
98 uint64_t mant;
99 uint16_t exp;
101 qemu_get_be64s(f, &mant);
102 qemu_get_be16s(f, &exp);
103 fp_reg->d = cpu_set_fp80(mant, exp);
104 return 0;
107 static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
109 FPReg *fp_reg = opaque;
110 uint64_t mant;
111 uint16_t exp;
112 /* we save the real CPU data (in case of MMX usage only 'mant'
113 contains the MMX register */
114 cpu_get_fp80(&mant, &exp, fp_reg->d);
115 qemu_put_be64s(f, &mant);
116 qemu_put_be16s(f, &exp);
119 static const VMStateInfo vmstate_fpreg = {
120 .name = "fpreg",
121 .get = get_fpreg,
122 .put = put_fpreg,
125 static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size)
127 union x86_longdouble *p = opaque;
128 uint64_t mant;
130 qemu_get_be64s(f, &mant);
131 p->mant = mant;
132 p->exp = 0xffff;
133 return 0;
136 static const VMStateInfo vmstate_fpreg_1_mmx = {
137 .name = "fpreg_1_mmx",
138 .get = get_fpreg_1_mmx,
139 .put = put_fpreg_error,
142 static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size)
144 union x86_longdouble *p = opaque;
145 uint64_t mant;
147 qemu_get_be64s(f, &mant);
148 fp64_to_fp80(p, mant);
149 return 0;
152 static const VMStateInfo vmstate_fpreg_1_no_mmx = {
153 .name = "fpreg_1_no_mmx",
154 .get = get_fpreg_1_no_mmx,
155 .put = put_fpreg_error,
158 static bool fpregs_is_0(void *opaque, int version_id)
160 CPUState *env = opaque;
162 return (env->fpregs_format_vmstate == 0);
165 static bool fpregs_is_1_mmx(void *opaque, int version_id)
167 CPUState *env = opaque;
168 int guess_mmx;
170 guess_mmx = ((env->fptag_vmstate == 0xff) &&
171 (env->fpus_vmstate & 0x3800) == 0);
172 return (guess_mmx && (env->fpregs_format_vmstate == 1));
175 static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
177 CPUState *env = opaque;
178 int guess_mmx;
180 guess_mmx = ((env->fptag_vmstate == 0xff) &&
181 (env->fpus_vmstate & 0x3800) == 0);
182 return (!guess_mmx && (env->fpregs_format_vmstate == 1));
185 #define VMSTATE_FP_REGS(_field, _state, _n) \
186 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0, vmstate_fpreg, FPReg), \
187 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \
188 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg)
190 #else
191 static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
193 FPReg *fp_reg = opaque;
195 qemu_get_be64s(f, &fp_reg->mmx.MMX_Q(0));
196 return 0;
199 static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
201 FPReg *fp_reg = opaque;
202 /* if we use doubles for float emulation, we save the doubles to
203 avoid losing information in case of MMX usage. It can give
204 problems if the image is restored on a CPU where long
205 doubles are used instead. */
206 qemu_put_be64s(f, &fp_reg->mmx.MMX_Q(0));
209 const VMStateInfo vmstate_fpreg = {
210 .name = "fpreg",
211 .get = get_fpreg,
212 .put = put_fpreg,
215 static int get_fpreg_0_mmx(QEMUFile *f, void *opaque, size_t size)
217 FPReg *fp_reg = opaque;
218 uint64_t mant;
219 uint16_t exp;
221 qemu_get_be64s(f, &mant);
222 qemu_get_be16s(f, &exp);
223 fp_reg->mmx.MMX_Q(0) = mant;
224 return 0;
227 const VMStateInfo vmstate_fpreg_0_mmx = {
228 .name = "fpreg_0_mmx",
229 .get = get_fpreg_0_mmx,
230 .put = put_fpreg_error,
233 static int get_fpreg_0_no_mmx(QEMUFile *f, void *opaque, size_t size)
235 FPReg *fp_reg = opaque;
236 uint64_t mant;
237 uint16_t exp;
239 qemu_get_be64s(f, &mant);
240 qemu_get_be16s(f, &exp);
242 fp_reg->d = cpu_set_fp80(mant, exp);
243 return 0;
246 const VMStateInfo vmstate_fpreg_0_no_mmx = {
247 .name = "fpreg_0_no_mmx",
248 .get = get_fpreg_0_no_mmx,
249 .put = put_fpreg_error,
252 static bool fpregs_is_1(void *opaque, int version_id)
254 CPUState *env = opaque;
256 return env->fpregs_format_vmstate == 1;
259 static bool fpregs_is_0_mmx(void *opaque, int version_id)
261 CPUState *env = opaque;
262 int guess_mmx;
264 guess_mmx = ((env->fptag_vmstate == 0xff) &&
265 (env->fpus_vmstate & 0x3800) == 0);
266 return guess_mmx && env->fpregs_format_vmstate == 0;
269 static bool fpregs_is_0_no_mmx(void *opaque, int version_id)
271 CPUState *env = opaque;
272 int guess_mmx;
274 guess_mmx = ((env->fptag_vmstate == 0xff) &&
275 (env->fpus_vmstate & 0x3800) == 0);
276 return !guess_mmx && env->fpregs_format_vmstate == 0;
279 #define VMSTATE_FP_REGS(_field, _state, _n) \
280 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1, vmstate_fpreg, FPReg), \
281 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0_mmx, vmstate_fpreg_0_mmx, FPReg), \
282 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0_no_mmx, vmstate_fpreg_0_no_mmx, FPReg)
284 #endif /* USE_X86LDOUBLE */
286 static bool version_is_5(void *opaque, int version_id)
288 return version_id == 5;
291 #ifdef TARGET_X86_64
292 static bool less_than_7(void *opaque, int version_id)
294 return version_id < 7;
297 static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
299 uint64_t *v = pv;
300 *v = qemu_get_be32(f);
301 return 0;
304 static void put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
306 uint64_t *v = pv;
307 qemu_put_be32(f, *v);
310 static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
311 .name = "uint64_as_uint32",
312 .get = get_uint64_as_uint32,
313 .put = put_uint64_as_uint32,
316 #define VMSTATE_HACK_UINT32(_f, _s, _t) \
317 VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint64_as_uint32, uint64_t)
318 #endif
320 static void cpu_pre_save(void *opaque)
322 CPUState *env = opaque;
323 int i, bit;
325 cpu_synchronize_state(env);
327 /* FPU */
328 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
329 env->fptag_vmstate = 0;
330 for(i = 0; i < 8; i++) {
331 env->fptag_vmstate |= ((!env->fptags[i]) << i);
334 #ifdef USE_X86LDOUBLE
335 env->fpregs_format_vmstate = 0;
336 #else
337 env->fpregs_format_vmstate = 1;
338 #endif
340 /* There can only be one pending IRQ set in the bitmap at a time, so try
341 to find it and save its number instead (-1 for none). */
342 env->pending_irq_vmstate = -1;
343 for (i = 0; i < ARRAY_SIZE(env->interrupt_bitmap); i++) {
344 if (env->interrupt_bitmap[i]) {
345 bit = ctz64(env->interrupt_bitmap[i]);
346 env->pending_irq_vmstate = i * 64 + bit;
347 break;
352 static int cpu_pre_load(void *opaque)
354 CPUState *env = opaque;
356 cpu_synchronize_state(env);
357 return 0;
360 static int cpu_post_load(void *opaque, int version_id)
362 CPUState *env = opaque;
363 int i;
365 /* XXX: restore FPU round state */
366 env->fpstt = (env->fpus_vmstate >> 11) & 7;
367 env->fpus = env->fpus_vmstate & ~0x3800;
368 env->fptag_vmstate ^= 0xff;
369 for(i = 0; i < 8; i++) {
370 env->fptags[i] = (env->fptag_vmstate >> i) & 1;
373 cpu_breakpoint_remove_all(env, BP_CPU);
374 cpu_watchpoint_remove_all(env, BP_CPU);
375 for (i = 0; i < 4; i++)
376 hw_breakpoint_insert(env, i);
378 if (version_id >= 9) {
379 memset(&env->interrupt_bitmap, 0, sizeof(env->interrupt_bitmap));
380 if (env->pending_irq_vmstate >= 0) {
381 env->interrupt_bitmap[env->pending_irq_vmstate / 64] |=
382 (uint64_t)1 << (env->pending_irq_vmstate % 64);
386 tlb_flush(env, 1);
387 return 0;
390 static const VMStateDescription vmstate_cpu = {
391 .name = "cpu",
392 .version_id = CPU_SAVE_VERSION,
393 .minimum_version_id = 3,
394 .minimum_version_id_old = 3,
395 .pre_save = cpu_pre_save,
396 .pre_load = cpu_pre_load,
397 .post_load = cpu_post_load,
398 .fields = (VMStateField []) {
399 VMSTATE_UINTTL_ARRAY(regs, CPUState, CPU_NB_REGS),
400 VMSTATE_UINTTL(eip, CPUState),
401 VMSTATE_UINTTL(eflags, CPUState),
402 VMSTATE_UINT32(hflags, CPUState),
403 /* FPU */
404 VMSTATE_UINT16(fpuc, CPUState),
405 VMSTATE_UINT16(fpus_vmstate, CPUState),
406 VMSTATE_UINT16(fptag_vmstate, CPUState),
407 VMSTATE_UINT16(fpregs_format_vmstate, CPUState),
408 VMSTATE_FP_REGS(fpregs, CPUState, 8),
410 VMSTATE_SEGMENT_ARRAY(segs, CPUState, 6),
411 VMSTATE_SEGMENT(ldt, CPUState),
412 VMSTATE_SEGMENT(tr, CPUState),
413 VMSTATE_SEGMENT(gdt, CPUState),
414 VMSTATE_SEGMENT(idt, CPUState),
416 VMSTATE_UINT32(sysenter_cs, CPUState),
417 #ifdef TARGET_X86_64
418 /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
419 VMSTATE_HACK_UINT32(sysenter_esp, CPUState, less_than_7),
420 VMSTATE_HACK_UINT32(sysenter_eip, CPUState, less_than_7),
421 VMSTATE_UINTTL_V(sysenter_esp, CPUState, 7),
422 VMSTATE_UINTTL_V(sysenter_eip, CPUState, 7),
423 #else
424 VMSTATE_UINTTL(sysenter_esp, CPUState),
425 VMSTATE_UINTTL(sysenter_eip, CPUState),
426 #endif
428 VMSTATE_UINTTL(cr[0], CPUState),
429 VMSTATE_UINTTL(cr[2], CPUState),
430 VMSTATE_UINTTL(cr[3], CPUState),
431 VMSTATE_UINTTL(cr[4], CPUState),
432 VMSTATE_UINTTL_ARRAY(dr, CPUState, 8),
433 /* MMU */
434 VMSTATE_INT32(a20_mask, CPUState),
435 /* XMM */
436 VMSTATE_UINT32(mxcsr, CPUState),
437 VMSTATE_XMM_REGS(xmm_regs, CPUState, CPU_NB_REGS),
439 #ifdef TARGET_X86_64
440 VMSTATE_UINT64(efer, CPUState),
441 VMSTATE_UINT64(star, CPUState),
442 VMSTATE_UINT64(lstar, CPUState),
443 VMSTATE_UINT64(cstar, CPUState),
444 VMSTATE_UINT64(fmask, CPUState),
445 VMSTATE_UINT64(kernelgsbase, CPUState),
446 #endif
447 VMSTATE_UINT32_V(smbase, CPUState, 4),
449 VMSTATE_UINT64_V(pat, CPUState, 5),
450 VMSTATE_UINT32_V(hflags2, CPUState, 5),
452 VMSTATE_UINT32_TEST(halted, CPUState, version_is_5),
453 VMSTATE_UINT64_V(vm_hsave, CPUState, 5),
454 VMSTATE_UINT64_V(vm_vmcb, CPUState, 5),
455 VMSTATE_UINT64_V(tsc_offset, CPUState, 5),
456 VMSTATE_UINT64_V(intercept, CPUState, 5),
457 VMSTATE_UINT16_V(intercept_cr_read, CPUState, 5),
458 VMSTATE_UINT16_V(intercept_cr_write, CPUState, 5),
459 VMSTATE_UINT16_V(intercept_dr_read, CPUState, 5),
460 VMSTATE_UINT16_V(intercept_dr_write, CPUState, 5),
461 VMSTATE_UINT32_V(intercept_exceptions, CPUState, 5),
462 VMSTATE_UINT8_V(v_tpr, CPUState, 5),
463 /* MTRRs */
464 VMSTATE_UINT64_ARRAY_V(mtrr_fixed, CPUState, 11, 8),
465 VMSTATE_UINT64_V(mtrr_deftype, CPUState, 8),
466 VMSTATE_MTRR_VARS(mtrr_var, CPUState, 8, 8),
467 /* KVM-related states */
468 VMSTATE_INT32_V(pending_irq_vmstate, CPUState, 9),
469 VMSTATE_UINT32_V(mp_state, CPUState, 9),
470 VMSTATE_UINT64_V(tsc, CPUState, 9),
471 /* MCE */
472 VMSTATE_UINT64_V(mcg_cap, CPUState, 10),
473 VMSTATE_UINT64_V(mcg_status, CPUState, 10),
474 VMSTATE_UINT64_V(mcg_ctl, CPUState, 10),
475 VMSTATE_UINT64_ARRAY_V(mce_banks, CPUState, MCE_BANKS_DEF *4, 10),
476 /* rdtscp */
477 VMSTATE_UINT64_V(tsc_aux, CPUState, 11),
478 VMSTATE_END_OF_LIST()
482 void cpu_save(QEMUFile *f, void *opaque)
484 vmstate_save_state(f, &vmstate_cpu, opaque);
487 int cpu_load(QEMUFile *f, void *opaque, int version_id)
489 return vmstate_load_state(f, &vmstate_cpu, opaque, version_id);