valgrind/i386: avoid false positives on KVM_SET_VCPU_EVENTS ioctl
[qemu.git] / target-i386 / machine.c
blob1c13b1435286833cd9981156f7ea40657515af36
1 #include "hw/hw.h"
2 #include "hw/boards.h"
3 #include "hw/i386/pc.h"
4 #include "hw/isa/isa.h"
6 #include "cpu.h"
7 #include "sysemu/kvm.h"
9 static const VMStateDescription vmstate_segment = {
10 .name = "segment",
11 .version_id = 1,
12 .minimum_version_id = 1,
13 .fields = (VMStateField[]) {
14 VMSTATE_UINT32(selector, SegmentCache),
15 VMSTATE_UINTTL(base, SegmentCache),
16 VMSTATE_UINT32(limit, SegmentCache),
17 VMSTATE_UINT32(flags, SegmentCache),
18 VMSTATE_END_OF_LIST()
22 #define VMSTATE_SEGMENT(_field, _state) { \
23 .name = (stringify(_field)), \
24 .size = sizeof(SegmentCache), \
25 .vmsd = &vmstate_segment, \
26 .flags = VMS_STRUCT, \
27 .offset = offsetof(_state, _field) \
28 + type_check(SegmentCache,typeof_field(_state, _field)) \
31 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
32 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
34 static const VMStateDescription vmstate_xmm_reg = {
35 .name = "xmm_reg",
36 .version_id = 1,
37 .minimum_version_id = 1,
38 .fields = (VMStateField[]) {
39 VMSTATE_UINT64(XMM_Q(0), XMMReg),
40 VMSTATE_UINT64(XMM_Q(1), XMMReg),
41 VMSTATE_END_OF_LIST()
45 #define VMSTATE_XMM_REGS(_field, _state, _n) \
46 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_xmm_reg, XMMReg)
48 /* YMMH format is the same as XMM */
49 static const VMStateDescription vmstate_ymmh_reg = {
50 .name = "ymmh_reg",
51 .version_id = 1,
52 .minimum_version_id = 1,
53 .fields = (VMStateField[]) {
54 VMSTATE_UINT64(XMM_Q(0), XMMReg),
55 VMSTATE_UINT64(XMM_Q(1), XMMReg),
56 VMSTATE_END_OF_LIST()
60 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _n, _v) \
61 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_ymmh_reg, XMMReg)
63 static const VMStateDescription vmstate_zmmh_reg = {
64 .name = "zmmh_reg",
65 .version_id = 1,
66 .minimum_version_id = 1,
67 .fields = (VMStateField[]) {
68 VMSTATE_UINT64(YMM_Q(0), YMMReg),
69 VMSTATE_UINT64(YMM_Q(1), YMMReg),
70 VMSTATE_UINT64(YMM_Q(2), YMMReg),
71 VMSTATE_UINT64(YMM_Q(3), YMMReg),
72 VMSTATE_END_OF_LIST()
76 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _n) \
77 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_zmmh_reg, YMMReg)
79 #ifdef TARGET_X86_64
80 static const VMStateDescription vmstate_hi16_zmm_reg = {
81 .name = "hi16_zmm_reg",
82 .version_id = 1,
83 .minimum_version_id = 1,
84 .fields = (VMStateField[]) {
85 VMSTATE_UINT64(ZMM_Q(0), ZMMReg),
86 VMSTATE_UINT64(ZMM_Q(1), ZMMReg),
87 VMSTATE_UINT64(ZMM_Q(2), ZMMReg),
88 VMSTATE_UINT64(ZMM_Q(3), ZMMReg),
89 VMSTATE_UINT64(ZMM_Q(4), ZMMReg),
90 VMSTATE_UINT64(ZMM_Q(5), ZMMReg),
91 VMSTATE_UINT64(ZMM_Q(6), ZMMReg),
92 VMSTATE_UINT64(ZMM_Q(7), ZMMReg),
93 VMSTATE_END_OF_LIST()
97 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _n) \
98 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_hi16_zmm_reg, ZMMReg)
99 #endif
101 static const VMStateDescription vmstate_bnd_regs = {
102 .name = "bnd_regs",
103 .version_id = 1,
104 .minimum_version_id = 1,
105 .fields = (VMStateField[]) {
106 VMSTATE_UINT64(lb, BNDReg),
107 VMSTATE_UINT64(ub, BNDReg),
108 VMSTATE_END_OF_LIST()
112 #define VMSTATE_BND_REGS(_field, _state, _n) \
113 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
115 static const VMStateDescription vmstate_mtrr_var = {
116 .name = "mtrr_var",
117 .version_id = 1,
118 .minimum_version_id = 1,
119 .fields = (VMStateField[]) {
120 VMSTATE_UINT64(base, MTRRVar),
121 VMSTATE_UINT64(mask, MTRRVar),
122 VMSTATE_END_OF_LIST()
126 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
127 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
129 static void put_fpreg_error(QEMUFile *f, void *opaque, size_t size)
131 fprintf(stderr, "call put_fpreg() with invalid arguments\n");
132 exit(0);
135 /* XXX: add that in a FPU generic layer */
136 union x86_longdouble {
137 uint64_t mant;
138 uint16_t exp;
141 #define MANTD1(fp) (fp & ((1LL << 52) - 1))
142 #define EXPBIAS1 1023
143 #define EXPD1(fp) ((fp >> 52) & 0x7FF)
144 #define SIGND1(fp) ((fp >> 32) & 0x80000000)
146 static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
148 int e;
149 /* mantissa */
150 p->mant = (MANTD1(temp) << 11) | (1LL << 63);
151 /* exponent + sign */
152 e = EXPD1(temp) - EXPBIAS1 + 16383;
153 e |= SIGND1(temp) >> 16;
154 p->exp = e;
157 static int get_fpreg(QEMUFile *f, void *opaque, size_t size)
159 FPReg *fp_reg = opaque;
160 uint64_t mant;
161 uint16_t exp;
163 qemu_get_be64s(f, &mant);
164 qemu_get_be16s(f, &exp);
165 fp_reg->d = cpu_set_fp80(mant, exp);
166 return 0;
169 static void put_fpreg(QEMUFile *f, void *opaque, size_t size)
171 FPReg *fp_reg = opaque;
172 uint64_t mant;
173 uint16_t exp;
174 /* we save the real CPU data (in case of MMX usage only 'mant'
175 contains the MMX register */
176 cpu_get_fp80(&mant, &exp, fp_reg->d);
177 qemu_put_be64s(f, &mant);
178 qemu_put_be16s(f, &exp);
181 static const VMStateInfo vmstate_fpreg = {
182 .name = "fpreg",
183 .get = get_fpreg,
184 .put = put_fpreg,
187 static int get_fpreg_1_mmx(QEMUFile *f, void *opaque, size_t size)
189 union x86_longdouble *p = opaque;
190 uint64_t mant;
192 qemu_get_be64s(f, &mant);
193 p->mant = mant;
194 p->exp = 0xffff;
195 return 0;
198 static const VMStateInfo vmstate_fpreg_1_mmx = {
199 .name = "fpreg_1_mmx",
200 .get = get_fpreg_1_mmx,
201 .put = put_fpreg_error,
204 static int get_fpreg_1_no_mmx(QEMUFile *f, void *opaque, size_t size)
206 union x86_longdouble *p = opaque;
207 uint64_t mant;
209 qemu_get_be64s(f, &mant);
210 fp64_to_fp80(p, mant);
211 return 0;
214 static const VMStateInfo vmstate_fpreg_1_no_mmx = {
215 .name = "fpreg_1_no_mmx",
216 .get = get_fpreg_1_no_mmx,
217 .put = put_fpreg_error,
220 static bool fpregs_is_0(void *opaque, int version_id)
222 X86CPU *cpu = opaque;
223 CPUX86State *env = &cpu->env;
225 return (env->fpregs_format_vmstate == 0);
228 static bool fpregs_is_1_mmx(void *opaque, int version_id)
230 X86CPU *cpu = opaque;
231 CPUX86State *env = &cpu->env;
232 int guess_mmx;
234 guess_mmx = ((env->fptag_vmstate == 0xff) &&
235 (env->fpus_vmstate & 0x3800) == 0);
236 return (guess_mmx && (env->fpregs_format_vmstate == 1));
239 static bool fpregs_is_1_no_mmx(void *opaque, int version_id)
241 X86CPU *cpu = opaque;
242 CPUX86State *env = &cpu->env;
243 int guess_mmx;
245 guess_mmx = ((env->fptag_vmstate == 0xff) &&
246 (env->fpus_vmstate & 0x3800) == 0);
247 return (!guess_mmx && (env->fpregs_format_vmstate == 1));
250 #define VMSTATE_FP_REGS(_field, _state, _n) \
251 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_0, vmstate_fpreg, FPReg), \
252 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_mmx, vmstate_fpreg_1_mmx, FPReg), \
253 VMSTATE_ARRAY_TEST(_field, _state, _n, fpregs_is_1_no_mmx, vmstate_fpreg_1_no_mmx, FPReg)
255 static bool version_is_5(void *opaque, int version_id)
257 return version_id == 5;
260 #ifdef TARGET_X86_64
261 static bool less_than_7(void *opaque, int version_id)
263 return version_id < 7;
266 static int get_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
268 uint64_t *v = pv;
269 *v = qemu_get_be32(f);
270 return 0;
273 static void put_uint64_as_uint32(QEMUFile *f, void *pv, size_t size)
275 uint64_t *v = pv;
276 qemu_put_be32(f, *v);
279 static const VMStateInfo vmstate_hack_uint64_as_uint32 = {
280 .name = "uint64_as_uint32",
281 .get = get_uint64_as_uint32,
282 .put = put_uint64_as_uint32,
285 #define VMSTATE_HACK_UINT32(_f, _s, _t) \
286 VMSTATE_SINGLE_TEST(_f, _s, _t, 0, vmstate_hack_uint64_as_uint32, uint64_t)
287 #endif
289 static void cpu_pre_save(void *opaque)
291 X86CPU *cpu = opaque;
292 CPUX86State *env = &cpu->env;
293 int i;
295 /* FPU */
296 env->fpus_vmstate = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
297 env->fptag_vmstate = 0;
298 for(i = 0; i < 8; i++) {
299 env->fptag_vmstate |= ((!env->fptags[i]) << i);
302 env->fpregs_format_vmstate = 0;
305 * Real mode guest segments register DPL should be zero.
306 * Older KVM version were setting it wrongly.
307 * Fixing it will allow live migration to host with unrestricted guest
308 * support (otherwise the migration will fail with invalid guest state
309 * error).
311 if (!(env->cr[0] & CR0_PE_MASK) &&
312 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
313 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
314 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
315 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
316 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
317 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
318 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
323 static int cpu_post_load(void *opaque, int version_id)
325 X86CPU *cpu = opaque;
326 CPUState *cs = CPU(cpu);
327 CPUX86State *env = &cpu->env;
328 int i;
331 * Real mode guest segments register DPL should be zero.
332 * Older KVM version were setting it wrongly.
333 * Fixing it will allow live migration from such host that don't have
334 * restricted guest support to a host with unrestricted guest support
335 * (otherwise the migration will fail with invalid guest state
336 * error).
338 if (!(env->cr[0] & CR0_PE_MASK) &&
339 (env->segs[R_CS].flags >> DESC_DPL_SHIFT & 3) != 0) {
340 env->segs[R_CS].flags &= ~(env->segs[R_CS].flags & DESC_DPL_MASK);
341 env->segs[R_DS].flags &= ~(env->segs[R_DS].flags & DESC_DPL_MASK);
342 env->segs[R_ES].flags &= ~(env->segs[R_ES].flags & DESC_DPL_MASK);
343 env->segs[R_FS].flags &= ~(env->segs[R_FS].flags & DESC_DPL_MASK);
344 env->segs[R_GS].flags &= ~(env->segs[R_GS].flags & DESC_DPL_MASK);
345 env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
348 /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
349 * running under KVM. This is wrong for conforming code segments.
350 * Luckily, in our implementation the CPL field of hflags is redundant
351 * and we can get the right value from the SS descriptor privilege level.
353 env->hflags &= ~HF_CPL_MASK;
354 env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
356 env->fpstt = (env->fpus_vmstate >> 11) & 7;
357 env->fpus = env->fpus_vmstate & ~0x3800;
358 env->fptag_vmstate ^= 0xff;
359 for(i = 0; i < 8; i++) {
360 env->fptags[i] = (env->fptag_vmstate >> i) & 1;
362 update_fp_status(env);
364 cpu_breakpoint_remove_all(cs, BP_CPU);
365 cpu_watchpoint_remove_all(cs, BP_CPU);
366 for (i = 0; i < DR7_MAX_BP; i++) {
367 hw_breakpoint_insert(env, i);
369 tlb_flush(cs, 1);
371 return 0;
374 static bool async_pf_msr_needed(void *opaque)
376 X86CPU *cpu = opaque;
378 return cpu->env.async_pf_en_msr != 0;
381 static bool pv_eoi_msr_needed(void *opaque)
383 X86CPU *cpu = opaque;
385 return cpu->env.pv_eoi_en_msr != 0;
388 static bool steal_time_msr_needed(void *opaque)
390 X86CPU *cpu = opaque;
392 return cpu->env.steal_time_msr != 0;
395 static const VMStateDescription vmstate_steal_time_msr = {
396 .name = "cpu/steal_time_msr",
397 .version_id = 1,
398 .minimum_version_id = 1,
399 .fields = (VMStateField[]) {
400 VMSTATE_UINT64(env.steal_time_msr, X86CPU),
401 VMSTATE_END_OF_LIST()
405 static const VMStateDescription vmstate_async_pf_msr = {
406 .name = "cpu/async_pf_msr",
407 .version_id = 1,
408 .minimum_version_id = 1,
409 .fields = (VMStateField[]) {
410 VMSTATE_UINT64(env.async_pf_en_msr, X86CPU),
411 VMSTATE_END_OF_LIST()
415 static const VMStateDescription vmstate_pv_eoi_msr = {
416 .name = "cpu/async_pv_eoi_msr",
417 .version_id = 1,
418 .minimum_version_id = 1,
419 .fields = (VMStateField[]) {
420 VMSTATE_UINT64(env.pv_eoi_en_msr, X86CPU),
421 VMSTATE_END_OF_LIST()
425 static bool fpop_ip_dp_needed(void *opaque)
427 X86CPU *cpu = opaque;
428 CPUX86State *env = &cpu->env;
430 return env->fpop != 0 || env->fpip != 0 || env->fpdp != 0;
433 static const VMStateDescription vmstate_fpop_ip_dp = {
434 .name = "cpu/fpop_ip_dp",
435 .version_id = 1,
436 .minimum_version_id = 1,
437 .fields = (VMStateField[]) {
438 VMSTATE_UINT16(env.fpop, X86CPU),
439 VMSTATE_UINT64(env.fpip, X86CPU),
440 VMSTATE_UINT64(env.fpdp, X86CPU),
441 VMSTATE_END_OF_LIST()
445 static bool tsc_adjust_needed(void *opaque)
447 X86CPU *cpu = opaque;
448 CPUX86State *env = &cpu->env;
450 return env->tsc_adjust != 0;
453 static const VMStateDescription vmstate_msr_tsc_adjust = {
454 .name = "cpu/msr_tsc_adjust",
455 .version_id = 1,
456 .minimum_version_id = 1,
457 .fields = (VMStateField[]) {
458 VMSTATE_UINT64(env.tsc_adjust, X86CPU),
459 VMSTATE_END_OF_LIST()
463 static bool tscdeadline_needed(void *opaque)
465 X86CPU *cpu = opaque;
466 CPUX86State *env = &cpu->env;
468 return env->tsc_deadline != 0;
471 static const VMStateDescription vmstate_msr_tscdeadline = {
472 .name = "cpu/msr_tscdeadline",
473 .version_id = 1,
474 .minimum_version_id = 1,
475 .fields = (VMStateField[]) {
476 VMSTATE_UINT64(env.tsc_deadline, X86CPU),
477 VMSTATE_END_OF_LIST()
481 static bool misc_enable_needed(void *opaque)
483 X86CPU *cpu = opaque;
484 CPUX86State *env = &cpu->env;
486 return env->msr_ia32_misc_enable != MSR_IA32_MISC_ENABLE_DEFAULT;
489 static bool feature_control_needed(void *opaque)
491 X86CPU *cpu = opaque;
492 CPUX86State *env = &cpu->env;
494 return env->msr_ia32_feature_control != 0;
497 static const VMStateDescription vmstate_msr_ia32_misc_enable = {
498 .name = "cpu/msr_ia32_misc_enable",
499 .version_id = 1,
500 .minimum_version_id = 1,
501 .fields = (VMStateField[]) {
502 VMSTATE_UINT64(env.msr_ia32_misc_enable, X86CPU),
503 VMSTATE_END_OF_LIST()
507 static const VMStateDescription vmstate_msr_ia32_feature_control = {
508 .name = "cpu/msr_ia32_feature_control",
509 .version_id = 1,
510 .minimum_version_id = 1,
511 .fields = (VMStateField[]) {
512 VMSTATE_UINT64(env.msr_ia32_feature_control, X86CPU),
513 VMSTATE_END_OF_LIST()
517 static bool pmu_enable_needed(void *opaque)
519 X86CPU *cpu = opaque;
520 CPUX86State *env = &cpu->env;
521 int i;
523 if (env->msr_fixed_ctr_ctrl || env->msr_global_ctrl ||
524 env->msr_global_status || env->msr_global_ovf_ctrl) {
525 return true;
527 for (i = 0; i < MAX_FIXED_COUNTERS; i++) {
528 if (env->msr_fixed_counters[i]) {
529 return true;
532 for (i = 0; i < MAX_GP_COUNTERS; i++) {
533 if (env->msr_gp_counters[i] || env->msr_gp_evtsel[i]) {
534 return true;
538 return false;
541 static const VMStateDescription vmstate_msr_architectural_pmu = {
542 .name = "cpu/msr_architectural_pmu",
543 .version_id = 1,
544 .minimum_version_id = 1,
545 .fields = (VMStateField[]) {
546 VMSTATE_UINT64(env.msr_fixed_ctr_ctrl, X86CPU),
547 VMSTATE_UINT64(env.msr_global_ctrl, X86CPU),
548 VMSTATE_UINT64(env.msr_global_status, X86CPU),
549 VMSTATE_UINT64(env.msr_global_ovf_ctrl, X86CPU),
550 VMSTATE_UINT64_ARRAY(env.msr_fixed_counters, X86CPU, MAX_FIXED_COUNTERS),
551 VMSTATE_UINT64_ARRAY(env.msr_gp_counters, X86CPU, MAX_GP_COUNTERS),
552 VMSTATE_UINT64_ARRAY(env.msr_gp_evtsel, X86CPU, MAX_GP_COUNTERS),
553 VMSTATE_END_OF_LIST()
557 static bool mpx_needed(void *opaque)
559 X86CPU *cpu = opaque;
560 CPUX86State *env = &cpu->env;
561 unsigned int i;
563 for (i = 0; i < 4; i++) {
564 if (env->bnd_regs[i].lb || env->bnd_regs[i].ub) {
565 return true;
569 if (env->bndcs_regs.cfgu || env->bndcs_regs.sts) {
570 return true;
573 return !!env->msr_bndcfgs;
576 static const VMStateDescription vmstate_mpx = {
577 .name = "cpu/mpx",
578 .version_id = 1,
579 .minimum_version_id = 1,
580 .fields = (VMStateField[]) {
581 VMSTATE_BND_REGS(env.bnd_regs, X86CPU, 4),
582 VMSTATE_UINT64(env.bndcs_regs.cfgu, X86CPU),
583 VMSTATE_UINT64(env.bndcs_regs.sts, X86CPU),
584 VMSTATE_UINT64(env.msr_bndcfgs, X86CPU),
585 VMSTATE_END_OF_LIST()
589 static bool hyperv_hypercall_enable_needed(void *opaque)
591 X86CPU *cpu = opaque;
592 CPUX86State *env = &cpu->env;
594 return env->msr_hv_hypercall != 0 || env->msr_hv_guest_os_id != 0;
597 static const VMStateDescription vmstate_msr_hypercall_hypercall = {
598 .name = "cpu/msr_hyperv_hypercall",
599 .version_id = 1,
600 .minimum_version_id = 1,
601 .fields = (VMStateField[]) {
602 VMSTATE_UINT64(env.msr_hv_guest_os_id, X86CPU),
603 VMSTATE_UINT64(env.msr_hv_hypercall, X86CPU),
604 VMSTATE_END_OF_LIST()
608 static bool hyperv_vapic_enable_needed(void *opaque)
610 X86CPU *cpu = opaque;
611 CPUX86State *env = &cpu->env;
613 return env->msr_hv_vapic != 0;
616 static const VMStateDescription vmstate_msr_hyperv_vapic = {
617 .name = "cpu/msr_hyperv_vapic",
618 .version_id = 1,
619 .minimum_version_id = 1,
620 .fields = (VMStateField[]) {
621 VMSTATE_UINT64(env.msr_hv_vapic, X86CPU),
622 VMSTATE_END_OF_LIST()
626 static bool hyperv_time_enable_needed(void *opaque)
628 X86CPU *cpu = opaque;
629 CPUX86State *env = &cpu->env;
631 return env->msr_hv_tsc != 0;
634 static const VMStateDescription vmstate_msr_hyperv_time = {
635 .name = "cpu/msr_hyperv_time",
636 .version_id = 1,
637 .minimum_version_id = 1,
638 .fields = (VMStateField[]) {
639 VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
640 VMSTATE_END_OF_LIST()
644 static bool avx512_needed(void *opaque)
646 X86CPU *cpu = opaque;
647 CPUX86State *env = &cpu->env;
648 unsigned int i;
650 for (i = 0; i < NB_OPMASK_REGS; i++) {
651 if (env->opmask_regs[i]) {
652 return true;
656 for (i = 0; i < CPU_NB_REGS; i++) {
657 #define ENV_ZMMH(reg, field) (env->zmmh_regs[reg].YMM_Q(field))
658 if (ENV_ZMMH(i, 0) || ENV_ZMMH(i, 1) ||
659 ENV_ZMMH(i, 2) || ENV_ZMMH(i, 3)) {
660 return true;
662 #ifdef TARGET_X86_64
663 #define ENV_Hi16_ZMM(reg, field) (env->hi16_zmm_regs[reg].ZMM_Q(field))
664 if (ENV_Hi16_ZMM(i, 0) || ENV_Hi16_ZMM(i, 1) ||
665 ENV_Hi16_ZMM(i, 2) || ENV_Hi16_ZMM(i, 3) ||
666 ENV_Hi16_ZMM(i, 4) || ENV_Hi16_ZMM(i, 5) ||
667 ENV_Hi16_ZMM(i, 6) || ENV_Hi16_ZMM(i, 7)) {
668 return true;
670 #endif
673 return false;
676 static const VMStateDescription vmstate_avx512 = {
677 .name = "cpu/avx512",
678 .version_id = 1,
679 .minimum_version_id = 1,
680 .fields = (VMStateField[]) {
681 VMSTATE_UINT64_ARRAY(env.opmask_regs, X86CPU, NB_OPMASK_REGS),
682 VMSTATE_ZMMH_REGS_VARS(env.zmmh_regs, X86CPU, CPU_NB_REGS),
683 #ifdef TARGET_X86_64
684 VMSTATE_Hi16_ZMM_REGS_VARS(env.hi16_zmm_regs, X86CPU, CPU_NB_REGS),
685 #endif
686 VMSTATE_END_OF_LIST()
690 VMStateDescription vmstate_x86_cpu = {
691 .name = "cpu",
692 .version_id = 12,
693 .minimum_version_id = 3,
694 .pre_save = cpu_pre_save,
695 .post_load = cpu_post_load,
696 .fields = (VMStateField[]) {
697 VMSTATE_UINTTL_ARRAY(env.regs, X86CPU, CPU_NB_REGS),
698 VMSTATE_UINTTL(env.eip, X86CPU),
699 VMSTATE_UINTTL(env.eflags, X86CPU),
700 VMSTATE_UINT32(env.hflags, X86CPU),
701 /* FPU */
702 VMSTATE_UINT16(env.fpuc, X86CPU),
703 VMSTATE_UINT16(env.fpus_vmstate, X86CPU),
704 VMSTATE_UINT16(env.fptag_vmstate, X86CPU),
705 VMSTATE_UINT16(env.fpregs_format_vmstate, X86CPU),
706 VMSTATE_FP_REGS(env.fpregs, X86CPU, 8),
708 VMSTATE_SEGMENT_ARRAY(env.segs, X86CPU, 6),
709 VMSTATE_SEGMENT(env.ldt, X86CPU),
710 VMSTATE_SEGMENT(env.tr, X86CPU),
711 VMSTATE_SEGMENT(env.gdt, X86CPU),
712 VMSTATE_SEGMENT(env.idt, X86CPU),
714 VMSTATE_UINT32(env.sysenter_cs, X86CPU),
715 #ifdef TARGET_X86_64
716 /* Hack: In v7 size changed from 32 to 64 bits on x86_64 */
717 VMSTATE_HACK_UINT32(env.sysenter_esp, X86CPU, less_than_7),
718 VMSTATE_HACK_UINT32(env.sysenter_eip, X86CPU, less_than_7),
719 VMSTATE_UINTTL_V(env.sysenter_esp, X86CPU, 7),
720 VMSTATE_UINTTL_V(env.sysenter_eip, X86CPU, 7),
721 #else
722 VMSTATE_UINTTL(env.sysenter_esp, X86CPU),
723 VMSTATE_UINTTL(env.sysenter_eip, X86CPU),
724 #endif
726 VMSTATE_UINTTL(env.cr[0], X86CPU),
727 VMSTATE_UINTTL(env.cr[2], X86CPU),
728 VMSTATE_UINTTL(env.cr[3], X86CPU),
729 VMSTATE_UINTTL(env.cr[4], X86CPU),
730 VMSTATE_UINTTL_ARRAY(env.dr, X86CPU, 8),
731 /* MMU */
732 VMSTATE_INT32(env.a20_mask, X86CPU),
733 /* XMM */
734 VMSTATE_UINT32(env.mxcsr, X86CPU),
735 VMSTATE_XMM_REGS(env.xmm_regs, X86CPU, CPU_NB_REGS),
737 #ifdef TARGET_X86_64
738 VMSTATE_UINT64(env.efer, X86CPU),
739 VMSTATE_UINT64(env.star, X86CPU),
740 VMSTATE_UINT64(env.lstar, X86CPU),
741 VMSTATE_UINT64(env.cstar, X86CPU),
742 VMSTATE_UINT64(env.fmask, X86CPU),
743 VMSTATE_UINT64(env.kernelgsbase, X86CPU),
744 #endif
745 VMSTATE_UINT32_V(env.smbase, X86CPU, 4),
747 VMSTATE_UINT64_V(env.pat, X86CPU, 5),
748 VMSTATE_UINT32_V(env.hflags2, X86CPU, 5),
750 VMSTATE_UINT32_TEST(parent_obj.halted, X86CPU, version_is_5),
751 VMSTATE_UINT64_V(env.vm_hsave, X86CPU, 5),
752 VMSTATE_UINT64_V(env.vm_vmcb, X86CPU, 5),
753 VMSTATE_UINT64_V(env.tsc_offset, X86CPU, 5),
754 VMSTATE_UINT64_V(env.intercept, X86CPU, 5),
755 VMSTATE_UINT16_V(env.intercept_cr_read, X86CPU, 5),
756 VMSTATE_UINT16_V(env.intercept_cr_write, X86CPU, 5),
757 VMSTATE_UINT16_V(env.intercept_dr_read, X86CPU, 5),
758 VMSTATE_UINT16_V(env.intercept_dr_write, X86CPU, 5),
759 VMSTATE_UINT32_V(env.intercept_exceptions, X86CPU, 5),
760 VMSTATE_UINT8_V(env.v_tpr, X86CPU, 5),
761 /* MTRRs */
762 VMSTATE_UINT64_ARRAY_V(env.mtrr_fixed, X86CPU, 11, 8),
763 VMSTATE_UINT64_V(env.mtrr_deftype, X86CPU, 8),
764 VMSTATE_MTRR_VARS(env.mtrr_var, X86CPU, MSR_MTRRcap_VCNT, 8),
765 /* KVM-related states */
766 VMSTATE_INT32_V(env.interrupt_injected, X86CPU, 9),
767 VMSTATE_UINT32_V(env.mp_state, X86CPU, 9),
768 VMSTATE_UINT64_V(env.tsc, X86CPU, 9),
769 VMSTATE_INT32_V(env.exception_injected, X86CPU, 11),
770 VMSTATE_UINT8_V(env.soft_interrupt, X86CPU, 11),
771 VMSTATE_UINT8_V(env.nmi_injected, X86CPU, 11),
772 VMSTATE_UINT8_V(env.nmi_pending, X86CPU, 11),
773 VMSTATE_UINT8_V(env.has_error_code, X86CPU, 11),
774 VMSTATE_UINT32_V(env.sipi_vector, X86CPU, 11),
775 /* MCE */
776 VMSTATE_UINT64_V(env.mcg_cap, X86CPU, 10),
777 VMSTATE_UINT64_V(env.mcg_status, X86CPU, 10),
778 VMSTATE_UINT64_V(env.mcg_ctl, X86CPU, 10),
779 VMSTATE_UINT64_ARRAY_V(env.mce_banks, X86CPU, MCE_BANKS_DEF * 4, 10),
780 /* rdtscp */
781 VMSTATE_UINT64_V(env.tsc_aux, X86CPU, 11),
782 /* KVM pvclock msr */
783 VMSTATE_UINT64_V(env.system_time_msr, X86CPU, 11),
784 VMSTATE_UINT64_V(env.wall_clock_msr, X86CPU, 11),
785 /* XSAVE related fields */
786 VMSTATE_UINT64_V(env.xcr0, X86CPU, 12),
787 VMSTATE_UINT64_V(env.xstate_bv, X86CPU, 12),
788 VMSTATE_YMMH_REGS_VARS(env.ymmh_regs, X86CPU, CPU_NB_REGS, 12),
789 VMSTATE_END_OF_LIST()
790 /* The above list is not sorted /wrt version numbers, watch out! */
792 .subsections = (VMStateSubsection []) {
794 .vmsd = &vmstate_async_pf_msr,
795 .needed = async_pf_msr_needed,
796 } , {
797 .vmsd = &vmstate_pv_eoi_msr,
798 .needed = pv_eoi_msr_needed,
799 } , {
800 .vmsd = &vmstate_steal_time_msr,
801 .needed = steal_time_msr_needed,
802 } , {
803 .vmsd = &vmstate_fpop_ip_dp,
804 .needed = fpop_ip_dp_needed,
805 }, {
806 .vmsd = &vmstate_msr_tsc_adjust,
807 .needed = tsc_adjust_needed,
808 }, {
809 .vmsd = &vmstate_msr_tscdeadline,
810 .needed = tscdeadline_needed,
811 }, {
812 .vmsd = &vmstate_msr_ia32_misc_enable,
813 .needed = misc_enable_needed,
814 }, {
815 .vmsd = &vmstate_msr_ia32_feature_control,
816 .needed = feature_control_needed,
817 }, {
818 .vmsd = &vmstate_msr_architectural_pmu,
819 .needed = pmu_enable_needed,
820 } , {
821 .vmsd = &vmstate_mpx,
822 .needed = mpx_needed,
823 }, {
824 .vmsd = &vmstate_msr_hypercall_hypercall,
825 .needed = hyperv_hypercall_enable_needed,
826 }, {
827 .vmsd = &vmstate_msr_hyperv_vapic,
828 .needed = hyperv_vapic_enable_needed,
829 }, {
830 .vmsd = &vmstate_msr_hyperv_time,
831 .needed = hyperv_time_enable_needed,
832 }, {
833 .vmsd = &vmstate_avx512,
834 .needed = avx512_needed,
835 } , {
836 /* empty */