Merge remote-tracking branch 'remotes/elmarco/tags/leak-pull-request' into staging
[qemu/ar7.git] / target / i386 / machine.c
blob78ae2f986bfd4f28ee18b7c1f4e6d6365de1dec9
1 #include "qemu/osdep.h"
2 #include "qemu-common.h"
3 #include "cpu.h"
4 #include "exec/exec-all.h"
5 #include "hw/hw.h"
6 #include "hw/boards.h"
7 #include "hw/i386/pc.h"
8 #include "hw/isa/isa.h"
9 #include "migration/cpu.h"
11 #include "sysemu/kvm.h"
13 #include "qemu/error-report.h"
15 static const VMStateDescription vmstate_segment = {
16 .name = "segment",
17 .version_id = 1,
18 .minimum_version_id = 1,
19 .fields = (VMStateField[]) {
20 VMSTATE_UINT32(selector, SegmentCache),
21 VMSTATE_UINTTL(base, SegmentCache),
22 VMSTATE_UINT32(limit, SegmentCache),
23 VMSTATE_UINT32(flags, SegmentCache),
24 VMSTATE_END_OF_LIST()
28 #define VMSTATE_SEGMENT(_field, _state) { \
29 .name = (stringify(_field)), \
30 .size = sizeof(SegmentCache), \
31 .vmsd = &vmstate_segment, \
32 .flags = VMS_STRUCT, \
33 .offset = offsetof(_state, _field) \
34 + type_check(SegmentCache,typeof_field(_state, _field)) \
37 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
38 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
40 static const VMStateDescription vmstate_xmm_reg = {
41 .name = "xmm_reg",
42 .version_id = 1,
43 .minimum_version_id = 1,
44 .fields = (VMStateField[]) {
45 VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
46 VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
47 VMSTATE_END_OF_LIST()
51 #define VMSTATE_XMM_REGS(_field, _state, _start) \
52 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
53 vmstate_xmm_reg, ZMMReg)
55 /* YMMH format is the same as XMM, but for bits 128-255 */
56 static const VMStateDescription vmstate_ymmh_reg = {
57 .name = "ymmh_reg",
58 .version_id = 1,
59 .minimum_version_id = 1,
60 .fields = (VMStateField[]) {
61 VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
62 VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
63 VMSTATE_END_OF_LIST()
67 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
68 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
69 vmstate_ymmh_reg, ZMMReg)
71 static const VMStateDescription vmstate_zmmh_reg = {
72 .name = "zmmh_reg",
73 .version_id = 1,
74 .minimum_version_id = 1,
75 .fields = (VMStateField[]) {
76 VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
77 VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
78 VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
79 VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
80 VMSTATE_END_OF_LIST()
84 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
85 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
86 vmstate_zmmh_reg, ZMMReg)
88 #ifdef TARGET_X86_64
89 static const VMStateDescription vmstate_hi16_zmm_reg = {
90 .name = "hi16_zmm_reg",
91 .version_id = 1,
92 .minimum_version_id = 1,
93 .fields = (VMStateField[]) {
94 VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
95 VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
96 VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
97 VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
98 VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
99 VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
100 VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
101 VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
102 VMSTATE_END_OF_LIST()
106 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
107 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
108 vmstate_hi16_zmm_reg, ZMMReg)
109 #endif
111 static const VMStateDescription vmstate_bnd_regs = {
112 .name = "bnd_regs",
113 .version_id = 1,
114 .minimum_version_id = 1,
115 .fields = (VMStateField[]) {
116 VMSTATE_UINT64(lb, BNDReg),
117 VMSTATE_UINT64(ub, BNDReg),
118 VMSTATE_END_OF_LIST()
122 #define VMSTATE_BND_REGS(_field, _state, _n) \
123 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
125 static const VMStateDescription vmstate_mtrr_var = {
126 .name = "mtrr_var",
127 .version_id = 1,
128 .minimum_version_id = 1,
129 .fields = (VMStateField[]) {
130 VMSTATE_UINT64(base, MTRRVar),
131 VMSTATE_UINT64(mask, MTRRVar),
132 VMSTATE_END_OF_LIST()
136 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
137 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
139 static int put_fpreg_error(QEMUFile *f, void *opaque, size_t size,
140 VMStateField *field, QJSON *vmdesc)
142 fprintf(stderr, "call put_fpreg() with invalid arguments\n");
143 exit(0);
144 return 0;
147 /* XXX: add that in a FPU generic layer */
148 union x86_longdouble {
149 uint64_t mant;
150 uint16_t exp;
153 #define MANTD1(fp) (fp & ((1LL << 52) - 1))
154 #define EXPBIAS1 1023
155 #define EXPD1(fp) ((fp >> 52) & 0x7FF)
156 #define SIGND1(fp) ((fp >> 32) & 0x80000000)
158 static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
160 int e;
161 /* mantissa */
162 p->mant = (MANTD1(temp) << 11) | (1LL << 63);
163 /* exponent + sign */
164 e = EXPD1(temp) - EXPBIAS1 + 16383;
165 e |= SIGND1(temp) >> 16;
166 p->exp = e;
169 static int get_fpreg(QEMUFile *f, void *opaque, size_t size,
170 VMStateField *field)
172 FPReg *fp_reg = opaque;
173 uint64_t mant;
174 uint16_t exp;
176 qemu_get_be64s(f, &mant);
177 qemu_get_be16s(f, &exp);
178 fp_reg->d = cpu_set_fp80(mant, exp);
179 return 0;
182 static int put_fpreg(QEMUFile *f, void *opaque, size_t size,
183 VMStateField *field, QJSON *vmdesc)
185 FPReg *fp_reg = opaque;
186 uint64_t mant;
187 uint16_t exp;
188 /* we save the real CPU data (in case of MMX usage only 'mant'
189 contains the MMX register */
190 cpu_get_fp80(&mant, &exp, fp_reg->d);
191 qemu_put_be64s(f, &mant);
192 qemu_put_be16s(f, &exp);
194 return 0;
197 static const VMStateInfo vmstate_fpreg = {
198 .name = "fpreg",
199 .get = get_fpreg,
200 .put = put_fpreg,
203 static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size,
204 VMStateField *field)
206 union x86_longdouble *p = opaque;
207 uint64_t mant;
209 qemu_get_be64s(f, &mant);
210 p->mant = mant;
211 p->exp = 0xffff;
212 return 0;
215 static const VMStateInfo vmstate_fpreg_1_mmx = {
216 .name = "fpreg_1_mmx",
217 .get = get_fpreg_1_mmx,
218 .put = put_fpreg_error,
221 static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size,
222 VMStateField *field)
224 union x86_longdouble *p = opaque;
225 uint64_t mant;
227 qemu_get_be64s(f, &mant);
228 fp64_to_fp80(p, mant);
229 return 0;
232 static const VMStateInfo vmstate_fpreg_1_no_mmx = {
233 .name = "fpreg_1_no_mmx",
234 .get = get_fpreg_1_no_mmx,
235 .put = put_fpreg_error,
238 static bool fpregs_is_0(void *opaque, int version_id)
240 X86CPU *cpu = opaque;
241 CPUX86State *env = &cpu->env;
243 return (env->fpregs_format_vmstate == 0);
246 static bool fpregs_is_1_mmx(void *opaque, int version_id)
248 X86CPU *cpu = opaque;
249 CPUX86State *env = &cpu->env;
250 int guess_mmx;
252 guess_mmx = ((env->fptag_vmstate == 0xff) &&
253 (env->fpus_vmstate & 0x3800) == 0);
254 return (guess_mmx && (env->fpregs_format_vmstate == 1));
257 static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
259 X86CPU *cpu = opaque;
260 CPUX86State *env = &cpu->env;
261 int guess_mmx;
263 guess_mmx = ((env->fptag_vmstate == 0xff) &&
264 (env->fpus_vmstate & 0x3800) == 0);
265 return (!guess_mmx && (env->fpregs_format_vmstate == 1));
268 #define VMSTATE_FP_REGS(_field, _state, _n) \
269 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0, vmstate_fpreg, FPReg), \
270 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \
271 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg)
273 static bool version_is_5(void *opaque, int version_id)
275 return version_id == 5;
278 #ifdef TARGET_X86_64
279 static bool less_than_7(void *opaque, int version_id)
281 return version_id < 7;
284 static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size,
285 VMStateField *field)
287 uint64_t *v = pv;
288 *v = qemu_get_be32(f);
289 return 0;
292 static int put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size,
293 VMStateField *field, QJSON *vmdesc)
295 uint64_t *v = pv;
296 qemu_put_be32(f, *v);
298 return 0;
301 static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
302 .name = "uint64_as_uint32",
303 .get = get_uint64_as_uint32,
304 .put = put_uint64_as_uint32,
307 #define VMSTATE_HACK_UINT32(_f, _s, _t) \
308 VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint64_as_uint32, uint64_t)
309 #endif
311 static void cpu_pre_save(void *opaque)
313 X86CPU *cpu = opaque;
314 CPUX86State *env = &cpu->env;
315 int i;
317 /* FPU */
318 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
319 env->fptag_vmstate = 0;
320 for(i = 0; i < 8; i++) {
321 env->fptag_vmstate |= ((!env->fptags[i]) << i);
324 env->fpregs_format_vmstate = 0;
327 * Real mode guest segments register DPL should be zero.
328 * Older KVM version were setting it wrongly.
329 * Fixing it will allow live migration to host with unrestricted guest
330 * support (otherwise the migration will fail with invalid guest state
331 * error).
333 if (!(env->cr[0] & CR0_PE_MASK) &&
334 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
335 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
336 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
337 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
338 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
339 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
340 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
345 static int cpu_post_load(void *opaque, int version_id)
347 X86CPU *cpu = opaque;
348 CPUState *cs = CPU(cpu);
349 CPUX86State *env = &cpu->env;
350 int i;
352 if (env->tsc_khz && env->user_tsc_khz &&
353 env->tsc_khz != env->user_tsc_khz) {
354 error_report("Mismatch between user-specified TSC frequency and "
355 "migrated TSC frequency");
356 return -EINVAL;
360 * Real mode guest segments register DPL should be zero.
361 * Older KVM version were setting it wrongly.
362 * Fixing it will allow live migration from such host that don't have
363 * restricted guest support to a host with unrestricted guest support
364 * (otherwise the migration will fail with invalid guest state
365 * error).
367 if (!(env->cr[0] & CR0_PE_MASK) &&
368 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
369 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
370 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
371 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
372 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
373 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
374 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
377 /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
378 * running under KVM. This is wrong for conforming code segments.
379 * Luckily, in our implementation the CPL field of hflags is redundant
380 * and we can get the right value from the SS descriptor privilege level.
382 env->hflags &= ~HF_CPL_MASK;
383 env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
385 env->fpstt = (env->fpus_vmstate >> 11) & 7;
386 env->fpus = env->fpus_vmstate & ~0x3800;
387 env->fptag_vmstate ^= 0xff;
388 for(i = 0; i < 8; i++) {
389 env->fptags[i] = (env->fptag_vmstate >> i) & 1;
391 update_fp_status(env);
393 cpu_breakpoint_remove_all(cs, BP_CPU);
394 cpu_watchpoint_remove_all(cs, BP_CPU);
396 /* Indicate all breakpoints disabled, as they are, then
397 let the helper re-enable them. */
398 target_ulong dr7 = env->dr[7];
399 env->dr[7] = dr7 & ~(DR7_GLOBAL_BP_MASK | DR7_LOCAL_BP_MASK);
400 cpu_x86_update_dr7(env, dr7);
402 tlb_flush(cs);
404 if (tcg_enabled()) {
405 cpu_smm_update(cpu);
407 return 0;
410 static bool async_pf_msr_needed(void *opaque)
412 X86CPU *cpu = opaque;
414 return cpu->env.async_pf_en_msr != 0;
417 static bool pv_eoi_msr_needed(void *opaque)
419 X86CPU *cpu = opaque;
421 return cpu->env.pv_eoi_en_msr != 0;
424 static bool steal_time_msr_needed(void *opaque)
426 X86CPU *cpu = opaque;
428 return cpu->env.steal_time_msr != 0;
431 static const VMStateDescription vmstate_steal_time_msr = {
432 .name = "cpu/steal_time_msr",
433 .version_id = 1,
434 .minimum_version_id = 1,
435 .needed = steal_time_msr_needed,
436 .fields = (VMStateField[]) {
437 VMSTATE_UINT64(env.steal_time_msr, X86CPU),
438 VMSTATE_END_OF_LIST()
442 static const VMStateDescription vmstate_async_pf_msr = {
443 .name = "cpu/async_pf_msr",
444 .version_id = 1,
445 .minimum_version_id = 1,
446 .needed = async_pf_msr_needed,
447 .fields = (VMStateField[]) {
448 VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
449 VMSTATE_END_OF_LIST()
453 static const VMStateDescription vmstate_pv_eoi_msr = {
454 .name = "cpu/async_pv_eoi_msr",
455 .version_id = 1,
456 .minimum_version_id = 1,
457 .needed = pv_eoi_msr_needed,
458 .fields = (VMStateField[]) {
459 VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
460 VMSTATE_END_OF_LIST()
464 static bool fpop_ip_dp_needed(void *opaque)
466 X86CPU *cpu = opaque;
467 CPUX86State *env = &cpu->env;
469 return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
472 static const VMStateDescription vmstate_fpop_ip_dp = {
473 .name = "cpu/fpop_ip_dp",
474 .version_id = 1,
475 .minimum_version_id = 1,
476 .needed = fpop_ip_dp_needed,
477 .fields = (VMStateField[]) {
478 VMSTATE_UINT16(env.fpop, X86CPU),
479 VMSTATE_UINT64(env.fpip, X86CPU),
480 VMSTATE_UINT64(env.fpdp, X86CPU),
481 VMSTATE_END_OF_LIST()
485 static bool tsc_adjust_needed(void *opaque)
487 X86CPU *cpu = opaque;
488 CPUX86State *env = &cpu->env;
490 return env->tsc_adjust != 0;
493 static const VMStateDescription vmstate_msr_tsc_adjust = {
494 .name = "cpu/msr_tsc_adjust",
495 .version_id = 1,
496 .minimum_version_id = 1,
497 .needed = tsc_adjust_needed,
498 .fields = (VMStateField[]) {
499 VMSTATE_UINT64(env.tsc_adjust, X86CPU),
500 VMSTATE_END_OF_LIST()
504 static bool tscdeadline_needed(void *opaque)
506 X86CPU *cpu = opaque;
507 CPUX86State *env = &cpu->env;
509 return env->tsc_deadline != 0;
512 static const VMStateDescription vmstate_msr_tscdeadline = {
513 .name = "cpu/msr_tscdeadline",
514 .version_id = 1,
515 .minimum_version_id = 1,
516 .needed = tscdeadline_needed,
517 .fields = (VMStateField[]) {
518 VMSTATE_UINT64(env.tsc_deadline, X86CPU),
519 VMSTATE_END_OF_LIST()
523 static bool misc_enable_needed(void *opaque)
525 X86CPU *cpu = opaque;
526 CPUX86State *env = &cpu->env;
528 return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
531 static bool feature_control_needed(void *opaque)
533 X86CPU *cpu = opaque;
534 CPUX86State *env = &cpu->env;
536 return env->msr_ia32_feature_control != 0;
539 static const VMStateDescription vmstate_msr_ia32_misc_enable = {
540 .name = "cpu/msr_ia32_misc_enable",
541 .version_id = 1,
542 .minimum_version_id = 1,
543 .needed = misc_enable_needed,
544 .fields = (VMStateField[]) {
545 VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
546 VMSTATE_END_OF_LIST()
550 static const VMStateDescription vmstate_msr_ia32_feature_control = {
551 .name = "cpu/msr_ia32_feature_control",
552 .version_id = 1,
553 .minimum_version_id = 1,
554 .needed = feature_control_needed,
555 .fields = (VMStateField[]) {
556 VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
557 VMSTATE_END_OF_LIST()
561 static bool pmu_enable_needed(void *opaque)
563 X86CPU *cpu = opaque;
564 CPUX86State *env = &cpu->env;
565 int i;
567 if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
568 env->msr_global_status || env->msr_global_ovf_ctrl) {
569 return true;
571 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
572 if (env->msr_fixed_counters[i]) {
573 return true;
576 for (i = 0; i < MAX_GP_COUNTERS; i++) {
577 if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
578 return true;
582 return false;
585 static const VMStateDescription vmstate_msr_architectural_pmu = {
586 .name = "cpu/msr_architectural_pmu",
587 .version_id = 1,
588 .minimum_version_id = 1,
589 .needed = pmu_enable_needed,
590 .fields = (VMStateField[]) {
591 VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
592 VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
593 VMSTATE_UINT64(env.msr_global_status, X86CPU),
594 VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
595 VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
596 VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
597 VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
598 VMSTATE_END_OF_LIST()
602 static bool mpx_needed(void *opaque)
604 X86CPU *cpu = opaque;
605 CPUX86State *env = &cpu->env;
606 unsigned int i;
608 for (i = 0; i < 4; i++) {
609 if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
610 return true;
614 if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
615 return true;
618 return !!env->msr_bndcfgs;
621 static const VMStateDescription vmstate_mpx = {
622 .name = "cpu/mpx",
623 .version_id = 1,
624 .minimum_version_id = 1,
625 .needed = mpx_needed,
626 .fields = (VMStateField[]) {
627 VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
628 VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
629 VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
630 VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
631 VMSTATE_END_OF_LIST()
635 static bool hyperv_hypercall_enable_needed(void *opaque)
637 X86CPU *cpu = opaque;
638 CPUX86State *env = &cpu->env;
640 return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
643 static const VMStateDescription vmstate_msr_hypercall_hypercall = {
644 .name = "cpu/msr_hyperv_hypercall",
645 .version_id = 1,
646 .minimum_version_id = 1,
647 .needed = hyperv_hypercall_enable_needed,
648 .fields = (VMStateField[]) {
649 VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
650 VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
651 VMSTATE_END_OF_LIST()
655 static bool hyperv_vapic_enable_needed(void *opaque)
657 X86CPU *cpu = opaque;
658 CPUX86State *env = &cpu->env;
660 return env->msr_hv_vapic != 0;
663 static const VMStateDescription vmstate_msr_hyperv_vapic = {
664 .name = "cpu/msr_hyperv_vapic",
665 .version_id = 1,
666 .minimum_version_id = 1,
667 .needed = hyperv_vapic_enable_needed,
668 .fields = (VMStateField[]) {
669 VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
670 VMSTATE_END_OF_LIST()
674 static bool hyperv_time_enable_needed(void *opaque)
676 X86CPU *cpu = opaque;
677 CPUX86State *env = &cpu->env;
679 return env->msr_hv_tsc != 0;
682 static const VMStateDescription vmstate_msr_hyperv_time = {
683 .name = "cpu/msr_hyperv_time",
684 .version_id = 1,
685 .minimum_version_id = 1,
686 .needed = hyperv_time_enable_needed,
687 .fields = (VMStateField[]) {
688 VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
689 VMSTATE_END_OF_LIST()
693 static bool hyperv_crash_enable_needed(void *opaque)
695 X86CPU *cpu = opaque;
696 CPUX86State *env = &cpu->env;
697 int i;
699 for (i = 0; i < HV_X64_MSR_CRASH_PARAMS; i++) {
700 if (env->msr_hv_crash_params[i]) {
701 return true;
704 return false;
707 static const VMStateDescription vmstate_msr_hyperv_crash = {
708 .name = "cpu/msr_hyperv_crash",
709 .version_id = 1,
710 .minimum_version_id = 1,
711 .needed = hyperv_crash_enable_needed,
712 .fields = (VMStateField[]) {
713 VMSTATE_UINT64_ARRAY(env.msr_hv_crash_params,
714 X86CPU, HV_X64_MSR_CRASH_PARAMS),
715 VMSTATE_END_OF_LIST()
719 static bool hyperv_runtime_enable_needed(void *opaque)
721 X86CPU *cpu = opaque;
722 CPUX86State *env = &cpu->env;
724 if (!cpu->hyperv_runtime) {
725 return false;
728 return env->msr_hv_runtime != 0;
731 static const VMStateDescription vmstate_msr_hyperv_runtime = {
732 .name = "cpu/msr_hyperv_runtime",
733 .version_id = 1,
734 .minimum_version_id = 1,
735 .needed = hyperv_runtime_enable_needed,
736 .fields = (VMStateField[]) {
737 VMSTATE_UINT64(env.msr_hv_runtime, X86CPU),
738 VMSTATE_END_OF_LIST()
742 static bool hyperv_synic_enable_needed(void *opaque)
744 X86CPU *cpu = opaque;
745 CPUX86State *env = &cpu->env;
746 int i;
748 if (env->msr_hv_synic_control != 0 ||
749 env->msr_hv_synic_evt_page != 0 ||
750 env->msr_hv_synic_msg_page != 0) {
751 return true;
754 for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
755 if (env->msr_hv_synic_sint[i] != 0) {
756 return true;
760 return false;
763 static const VMStateDescription vmstate_msr_hyperv_synic = {
764 .name = "cpu/msr_hyperv_synic",
765 .version_id = 1,
766 .minimum_version_id = 1,
767 .needed = hyperv_synic_enable_needed,
768 .fields = (VMStateField[]) {
769 VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
770 VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
771 VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
772 VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
773 HV_SYNIC_SINT_COUNT),
774 VMSTATE_END_OF_LIST()
778 static bool hyperv_stimer_enable_needed(void *opaque)
780 X86CPU *cpu = opaque;
781 CPUX86State *env = &cpu->env;
782 int i;
784 for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) {
785 if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) {
786 return true;
789 return false;
792 static const VMStateDescription vmstate_msr_hyperv_stimer = {
793 .name = "cpu/msr_hyperv_stimer",
794 .version_id = 1,
795 .minimum_version_id = 1,
796 .needed = hyperv_stimer_enable_needed,
797 .fields = (VMStateField[]) {
798 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config,
799 X86CPU, HV_SYNIC_STIMER_COUNT),
800 VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count,
801 X86CPU, HV_SYNIC_STIMER_COUNT),
802 VMSTATE_END_OF_LIST()
806 static bool avx512_needed(void *opaque)
808 X86CPU *cpu = opaque;
809 CPUX86State *env = &cpu->env;
810 unsigned int i;
812 for (i = 0; i < NB_OPMASK_REGS; i++) {
813 if (env->opmask_regs[i]) {
814 return true;
818 for (i = 0; i < CPU_NB_REGS; i++) {
819 #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
820 if (ENV_XMM(i, 4) || ENV_XMM(i, 6) ||
821 ENV_XMM(i, 5) || ENV_XMM(i, 7)) {
822 return true;
824 #ifdef TARGET_X86_64
825 if (ENV_XMM(i+16, 0) || ENV_XMM(i+16, 1) ||
826 ENV_XMM(i+16, 2) || ENV_XMM(i+16, 3) ||
827 ENV_XMM(i+16, 4) || ENV_XMM(i+16, 5) ||
828 ENV_XMM(i+16, 6) || ENV_XMM(i+16, 7)) {
829 return true;
831 #endif
834 return false;
837 static const VMStateDescription vmstate_avx512 = {
838 .name = "cpu/avx512",
839 .version_id = 1,
840 .minimum_version_id = 1,
841 .needed = avx512_needed,
842 .fields = (VMStateField[]) {
843 VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
844 VMSTATE_ZMMH_REGS_VARS(env.xmm_regs, X86CPU, 0),
845 #ifdef TARGET_X86_64
846 VMSTATE_Hi16_ZMM_REGS_VARS(env.xmm_regs, X86CPU, 16),
847 #endif
848 VMSTATE_END_OF_LIST()
852 static bool xss_needed(void *opaque)
854 X86CPU *cpu = opaque;
855 CPUX86State *env = &cpu->env;
857 return env->xss != 0;
860 static const VMStateDescription vmstate_xss = {
861 .name = "cpu/xss",
862 .version_id = 1,
863 .minimum_version_id = 1,
864 .needed = xss_needed,
865 .fields = (VMStateField[]) {
866 VMSTATE_UINT64(env.xss, X86CPU),
867 VMSTATE_END_OF_LIST()
871 #ifdef TARGET_X86_64
872 static bool pkru_needed(void *opaque)
874 X86CPU *cpu = opaque;
875 CPUX86State *env = &cpu->env;
877 return env->pkru != 0;
880 static const VMStateDescription vmstate_pkru = {
881 .name = "cpu/pkru",
882 .version_id = 1,
883 .minimum_version_id = 1,
884 .needed = pkru_needed,
885 .fields = (VMStateField[]){
886 VMSTATE_UINT32(env.pkru, X86CPU),
887 VMSTATE_END_OF_LIST()
890 #endif
892 static bool tsc_khz_needed(void *opaque)
894 X86CPU *cpu = opaque;
895 CPUX86State *env = &cpu->env;
896 MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine());
897 PCMachineClass *pcmc = PC_MACHINE_CLASS(mc);
898 return env->tsc_khz && pcmc->save_tsc_khz;
901 static const VMStateDescription vmstate_tsc_khz = {
902 .name = "cpu/tsc_khz",
903 .version_id = 1,
904 .minimum_version_id = 1,
905 .needed = tsc_khz_needed,
906 .fields = (VMStateField[]) {
907 VMSTATE_INT64(env.tsc_khz, X86CPU),
908 VMSTATE_END_OF_LIST()
912 static bool mcg_ext_ctl_needed(void *opaque)
914 X86CPU *cpu = opaque;
915 CPUX86State *env = &cpu->env;
916 return cpu->enable_lmce && env->mcg_ext_ctl;
919 static const VMStateDescription vmstate_mcg_ext_ctl = {
920 .name = "cpu/mcg_ext_ctl",
921 .version_id = 1,
922 .minimum_version_id = 1,
923 .needed = mcg_ext_ctl_needed,
924 .fields = (VMStateField[]) {
925 VMSTATE_UINT64(env.mcg_ext_ctl, X86CPU),
926 VMSTATE_END_OF_LIST()
930 VMStateDescription vmstate_x86_cpu = {
931 .name = "cpu",
932 .version_id = 12,
933 .minimum_version_id = 3,
934 .pre_save = cpu_pre_save,
935 .post_load = cpu_post_load,
936 .fields = (VMStateField[]) {
937 VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
938 VMSTATE_UINTTL(env.eip, X86CPU),
939 VMSTATE_UINTTL(env.eflags, X86CPU),
940 VMSTATE_UINT32(env.hflags, X86CPU),
941 /* FPU */
942 VMSTATE_UINT16(env.fpuc, X86CPU),
943 VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
944 VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
945 VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
946 VMSTATE_FP_REGS(env.fpregs, X86CPU, 8),
948 VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
949 VMSTATE_SEGMENT(env.ldt, X86CPU),
950 VMSTATE_SEGMENT(env.tr, X86CPU),
951 VMSTATE_SEGMENT(env.gdt, X86CPU),
952 VMSTATE_SEGMENT(env.idt, X86CPU),
954 VMSTATE_UINT32(env.sysenter_cs, X86CPU),
955 #ifdef TARGET_X86_64
956 /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
957 VMSTATE_HACK_UINT32(env.sysenter_esp, X86CPU, less_than_7),
958 VMSTATE_HACK_UINT32(env.sysenter_eip, X86CPU, less_than_7),
959 VMSTATE_UINTTL_V(env.sysenter_esp, X86CPU, 7),
960 VMSTATE_UINTTL_V(env.sysenter_eip, X86CPU, 7),
961 #else
962 VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
963 VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
964 #endif
966 VMSTATE_UINTTL(env.cr[0], X86CPU),
967 VMSTATE_UINTTL(env.cr[2], X86CPU),
968 VMSTATE_UINTTL(env.cr[3], X86CPU),
969 VMSTATE_UINTTL(env.cr[4], X86CPU),
970 VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
971 /* MMU */
972 VMSTATE_INT32(env.a20_mask, X86CPU),
973 /* XMM */
974 VMSTATE_UINT32(env.mxcsr, X86CPU),
975 VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, 0),
977 #ifdef TARGET_X86_64
978 VMSTATE_UINT64(env.efer, X86CPU),
979 VMSTATE_UINT64(env.star, X86CPU),
980 VMSTATE_UINT64(env.lstar, X86CPU),
981 VMSTATE_UINT64(env.cstar, X86CPU),
982 VMSTATE_UINT64(env.fmask, X86CPU),
983 VMSTATE_UINT64(env.kernelgsbase, X86CPU),
984 #endif
985 VMSTATE_UINT32_V(env.smbase, X86CPU, 4),
987 VMSTATE_UINT64_V(env.pat, X86CPU, 5),
988 VMSTATE_UINT32_V(env.hflags2, X86CPU, 5),
990 VMSTATE_UINT32_TEST(parent_obj.halted, X86CPU, version_is_5),
991 VMSTATE_UINT64_V(env.vm_hsave, X86CPU, 5),
992 VMSTATE_UINT64_V(env.vm_vmcb, X86CPU, 5),
993 VMSTATE_UINT64_V(env.tsc_offset, X86CPU, 5),
994 VMSTATE_UINT64_V(env.intercept, X86CPU, 5),
995 VMSTATE_UINT16_V(env.intercept_cr_read, X86CPU, 5),
996 VMSTATE_UINT16_V(env.intercept_cr_write, X86CPU, 5),
997 VMSTATE_UINT16_V(env.intercept_dr_read, X86CPU, 5),
998 VMSTATE_UINT16_V(env.intercept_dr_write, X86CPU, 5),
999 VMSTATE_UINT32_V(env.intercept_exceptions, X86CPU, 5),
1000 VMSTATE_UINT8_V(env.v_tpr, X86CPU, 5),
1001 /* MTRRs */
1002 VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
1003 VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
1004 VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
1005 /* KVM-related states */
1006 VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
1007 VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
1008 VMSTATE_UINT64_V(env.tsc, X86CPU, 9),
1009 VMSTATE_INT32_V(env.exception_injected, X86CPU, 11),
1010 VMSTATE_UINT8_V(env.soft_interrupt, X86CPU, 11),
1011 VMSTATE_UINT8_V(env.nmi_injected, X86CPU, 11),
1012 VMSTATE_UINT8_V(env.nmi_pending, X86CPU, 11),
1013 VMSTATE_UINT8_V(env.has_error_code, X86CPU, 11),
1014 VMSTATE_UINT32_V(env.sipi_vector, X86CPU, 11),
1015 /* MCE */
1016 VMSTATE_UINT64_V(env.mcg_cap, X86CPU, 10),
1017 VMSTATE_UINT64_V(env.mcg_status, X86CPU, 10),
1018 VMSTATE_UINT64_V(env.mcg_ctl, X86CPU, 10),
1019 VMSTATE_UINT64_ARRAY_V(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4, 10),
1020 /* rdtscp */
1021 VMSTATE_UINT64_V(env.tsc_aux, X86CPU, 11),
1022 /* KVM pvclock msr */
1023 VMSTATE_UINT64_V(env.system_time_msr, X86CPU, 11),
1024 VMSTATE_UINT64_V(env.wall_clock_msr, X86CPU, 11),
1025 /* XSAVE related fields */
1026 VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
1027 VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
1028 VMSTATE_YMMH_REGS_VARS(env.xmm_regs, X86CPU, 0, 12),
1029 VMSTATE_END_OF_LIST()
1030 /* The above list is not sorted /wrt version numbers, watch out! */
1032 .subsections = (const VMStateDescription*[]) {
1033 &vmstate_async_pf_msr,
1034 &vmstate_pv_eoi_msr,
1035 &vmstate_steal_time_msr,
1036 &vmstate_fpop_ip_dp,
1037 &vmstate_msr_tsc_adjust,
1038 &vmstate_msr_tscdeadline,
1039 &vmstate_msr_ia32_misc_enable,
1040 &vmstate_msr_ia32_feature_control,
1041 &vmstate_msr_architectural_pmu,
1042 &vmstate_mpx,
1043 &vmstate_msr_hypercall_hypercall,
1044 &vmstate_msr_hyperv_vapic,
1045 &vmstate_msr_hyperv_time,
1046 &vmstate_msr_hyperv_crash,
1047 &vmstate_msr_hyperv_runtime,
1048 &vmstate_msr_hyperv_synic,
1049 &vmstate_msr_hyperv_stimer,
1050 &vmstate_avx512,
1051 &vmstate_xss,
1052 &vmstate_tsc_khz,
1053 #ifdef TARGET_X86_64
1054 &vmstate_pkru,
1055 #endif
1056 &vmstate_mcg_ext_ctl,
1057 NULL