1 #include "qemu/osdep.h"
3 #include "exec/exec-all.h"
5 #include "hw/i386/pc.h"
6 #include "hw/isa/isa.h"
7 #include "migration/cpu.h"
11 #include "sysemu/kvm.h"
12 #include "sysemu/tcg.h"
14 #include "qemu/error-report.h"
16 static const VMStateDescription vmstate_segment
= {
19 .minimum_version_id
= 1,
20 .fields
= (VMStateField
[]) {
21 VMSTATE_UINT32(selector
, SegmentCache
),
22 VMSTATE_UINTTL(base
, SegmentCache
),
23 VMSTATE_UINT32(limit
, SegmentCache
),
24 VMSTATE_UINT32(flags
, SegmentCache
),
29 #define VMSTATE_SEGMENT(_field, _state) { \
30 .name = (stringify(_field)), \
31 .size = sizeof(SegmentCache), \
32 .vmsd = &vmstate_segment, \
33 .flags = VMS_STRUCT, \
34 .offset = offsetof(_state, _field) \
35 + type_check(SegmentCache,typeof_field(_state, _field)) \
38 #define VMSTATE_SEGMENT_ARRAY(_field, _state, _n) \
39 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_segment, SegmentCache)
41 static const VMStateDescription vmstate_xmm_reg
= {
44 .minimum_version_id
= 1,
45 .fields
= (VMStateField
[]) {
46 VMSTATE_UINT64(ZMM_Q(0), ZMMReg
),
47 VMSTATE_UINT64(ZMM_Q(1), ZMMReg
),
52 #define VMSTATE_XMM_REGS(_field, _state, _start) \
53 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
54 vmstate_xmm_reg, ZMMReg)
56 /* YMMH format is the same as XMM, but for bits 128-255 */
57 static const VMStateDescription vmstate_ymmh_reg
= {
60 .minimum_version_id
= 1,
61 .fields
= (VMStateField
[]) {
62 VMSTATE_UINT64(ZMM_Q(2), ZMMReg
),
63 VMSTATE_UINT64(ZMM_Q(3), ZMMReg
),
68 #define VMSTATE_YMMH_REGS_VARS(_field, _state, _start, _v) \
69 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, _v, \
70 vmstate_ymmh_reg, ZMMReg)
72 static const VMStateDescription vmstate_zmmh_reg
= {
75 .minimum_version_id
= 1,
76 .fields
= (VMStateField
[]) {
77 VMSTATE_UINT64(ZMM_Q(4), ZMMReg
),
78 VMSTATE_UINT64(ZMM_Q(5), ZMMReg
),
79 VMSTATE_UINT64(ZMM_Q(6), ZMMReg
),
80 VMSTATE_UINT64(ZMM_Q(7), ZMMReg
),
85 #define VMSTATE_ZMMH_REGS_VARS(_field, _state, _start) \
86 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
87 vmstate_zmmh_reg, ZMMReg)
90 static const VMStateDescription vmstate_hi16_zmm_reg
= {
91 .name
= "hi16_zmm_reg",
93 .minimum_version_id
= 1,
94 .fields
= (VMStateField
[]) {
95 VMSTATE_UINT64(ZMM_Q(0), ZMMReg
),
96 VMSTATE_UINT64(ZMM_Q(1), ZMMReg
),
97 VMSTATE_UINT64(ZMM_Q(2), ZMMReg
),
98 VMSTATE_UINT64(ZMM_Q(3), ZMMReg
),
99 VMSTATE_UINT64(ZMM_Q(4), ZMMReg
),
100 VMSTATE_UINT64(ZMM_Q(5), ZMMReg
),
101 VMSTATE_UINT64(ZMM_Q(6), ZMMReg
),
102 VMSTATE_UINT64(ZMM_Q(7), ZMMReg
),
103 VMSTATE_END_OF_LIST()
107 #define VMSTATE_Hi16_ZMM_REGS_VARS(_field, _state, _start) \
108 VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, CPU_NB_REGS, 0, \
109 vmstate_hi16_zmm_reg, ZMMReg)
112 static const VMStateDescription vmstate_bnd_regs
= {
115 .minimum_version_id
= 1,
116 .fields
= (VMStateField
[]) {
117 VMSTATE_UINT64(lb
, BNDReg
),
118 VMSTATE_UINT64(ub
, BNDReg
),
119 VMSTATE_END_OF_LIST()
123 #define VMSTATE_BND_REGS(_field, _state, _n) \
124 VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_bnd_regs, BNDReg)
126 static const VMStateDescription vmstate_mtrr_var
= {
129 .minimum_version_id
= 1,
130 .fields
= (VMStateField
[]) {
131 VMSTATE_UINT64(base
, MTRRVar
),
132 VMSTATE_UINT64(mask
, MTRRVar
),
133 VMSTATE_END_OF_LIST()
137 #define VMSTATE_MTRR_VARS(_field, _state, _n, _v) \
138 VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_mtrr_var, MTRRVar)
140 typedef struct x86_FPReg_tmp
{
146 static void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, floatx80 f
)
151 *pmant
= temp
.l
.lower
;
152 *pexp
= temp
.l
.upper
;
155 static floatx80
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
159 temp
.l
.upper
= upper
;
164 static int fpreg_pre_save(void *opaque
)
166 x86_FPReg_tmp
*tmp
= opaque
;
168 /* we save the real CPU data (in case of MMX usage only 'mant'
169 contains the MMX register */
170 cpu_get_fp80(&tmp
->tmp_mant
, &tmp
->tmp_exp
, tmp
->parent
->d
);
175 static int fpreg_post_load(void *opaque
, int version
)
177 x86_FPReg_tmp
*tmp
= opaque
;
179 tmp
->parent
->d
= cpu_set_fp80(tmp
->tmp_mant
, tmp
->tmp_exp
);
183 static const VMStateDescription vmstate_fpreg_tmp
= {
185 .post_load
= fpreg_post_load
,
186 .pre_save
= fpreg_pre_save
,
187 .fields
= (VMStateField
[]) {
188 VMSTATE_UINT64(tmp_mant
, x86_FPReg_tmp
),
189 VMSTATE_UINT16(tmp_exp
, x86_FPReg_tmp
),
190 VMSTATE_END_OF_LIST()
194 static const VMStateDescription vmstate_fpreg
= {
196 .fields
= (VMStateField
[]) {
197 VMSTATE_WITH_TMP(FPReg
, x86_FPReg_tmp
, vmstate_fpreg_tmp
),
198 VMSTATE_END_OF_LIST()
202 static int cpu_pre_save(void *opaque
)
204 X86CPU
*cpu
= opaque
;
205 CPUX86State
*env
= &cpu
->env
;
209 env
->fpus_vmstate
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
210 env
->fptag_vmstate
= 0;
211 for(i
= 0; i
< 8; i
++) {
212 env
->fptag_vmstate
|= ((!env
->fptags
[i
]) << i
);
215 env
->fpregs_format_vmstate
= 0;
218 * Real mode guest segments register DPL should be zero.
219 * Older KVM version were setting it wrongly.
220 * Fixing it will allow live migration to host with unrestricted guest
221 * support (otherwise the migration will fail with invalid guest state
224 if (!(env
->cr
[0] & CR0_PE_MASK
) &&
225 (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
& 3) != 0) {
226 env
->segs
[R_CS
].flags
&= ~(env
->segs
[R_CS
].flags
& DESC_DPL_MASK
);
227 env
->segs
[R_DS
].flags
&= ~(env
->segs
[R_DS
].flags
& DESC_DPL_MASK
);
228 env
->segs
[R_ES
].flags
&= ~(env
->segs
[R_ES
].flags
& DESC_DPL_MASK
);
229 env
->segs
[R_FS
].flags
&= ~(env
->segs
[R_FS
].flags
& DESC_DPL_MASK
);
230 env
->segs
[R_GS
].flags
&= ~(env
->segs
[R_GS
].flags
& DESC_DPL_MASK
);
231 env
->segs
[R_SS
].flags
&= ~(env
->segs
[R_SS
].flags
& DESC_DPL_MASK
);
236 * In case vCPU may have enabled VMX, we need to make sure kernel have
237 * required capabilities in order to perform migration correctly:
239 * 1) We must be able to extract vCPU nested-state from KVM.
241 * 2) In case vCPU is running in guest-mode and it has a pending exception,
242 * we must be able to determine if it's in a pending or injected state.
243 * Note that in case KVM don't have required capability to do so,
244 * a pending/injected exception will always appear as an
245 * injected exception.
247 if (kvm_enabled() && cpu_vmx_maybe_enabled(env
) &&
248 (!env
->nested_state
||
249 (!kvm_has_exception_payload() && (env
->hflags
& HF_GUEST_MASK
) &&
250 env
->exception_injected
))) {
251 error_report("Guest maybe enabled nested virtualization but kernel "
252 "does not support required capabilities to save vCPU "
259 * When vCPU is running L2 and exception is still pending,
260 * it can potentially be intercepted by L1 hypervisor.
261 * In contrast to an injected exception which cannot be
262 * intercepted anymore.
264 * Furthermore, when a L2 exception is intercepted by L1
265 * hypervisor, it's exception payload (CR2/DR6 on #PF/#DB)
266 * should not be set yet in the respective vCPU register.
267 * Thus, in case an exception is pending, it is
268 * important to save the exception payload seperately.
270 * Therefore, if an exception is not in a pending state
271 * or vCPU is not in guest-mode, it is not important to
272 * distinguish between a pending and injected exception
273 * and we don't need to store seperately the exception payload.
275 * In order to preserve better backwards-compatabile migration,
276 * convert a pending exception to an injected exception in
277 * case it is not important to distingiush between them
278 * as described above.
280 if (env
->exception_pending
&& !(env
->hflags
& HF_GUEST_MASK
)) {
281 env
->exception_pending
= 0;
282 env
->exception_injected
= 1;
284 if (env
->exception_has_payload
) {
285 if (env
->exception_nr
== EXCP01_DB
) {
286 env
->dr
[6] = env
->exception_payload
;
287 } else if (env
->exception_nr
== EXCP0E_PAGE
) {
288 env
->cr
[2] = env
->exception_payload
;
296 static int cpu_post_load(void *opaque
, int version_id
)
298 X86CPU
*cpu
= opaque
;
299 CPUState
*cs
= CPU(cpu
);
300 CPUX86State
*env
= &cpu
->env
;
303 if (env
->tsc_khz
&& env
->user_tsc_khz
&&
304 env
->tsc_khz
!= env
->user_tsc_khz
) {
305 error_report("Mismatch between user-specified TSC frequency and "
306 "migrated TSC frequency");
310 if (env
->fpregs_format_vmstate
) {
311 error_report("Unsupported old non-softfloat CPU state");
315 * Real mode guest segments register DPL should be zero.
316 * Older KVM version were setting it wrongly.
317 * Fixing it will allow live migration from such host that don't have
318 * restricted guest support to a host with unrestricted guest support
319 * (otherwise the migration will fail with invalid guest state
322 if (!(env
->cr
[0] & CR0_PE_MASK
) &&
323 (env
->segs
[R_CS
].flags
>> DESC_DPL_SHIFT
& 3) != 0) {
324 env
->segs
[R_CS
].flags
&= ~(env
->segs
[R_CS
].flags
& DESC_DPL_MASK
);
325 env
->segs
[R_DS
].flags
&= ~(env
->segs
[R_DS
].flags
& DESC_DPL_MASK
);
326 env
->segs
[R_ES
].flags
&= ~(env
->segs
[R_ES
].flags
& DESC_DPL_MASK
);
327 env
->segs
[R_FS
].flags
&= ~(env
->segs
[R_FS
].flags
& DESC_DPL_MASK
);
328 env
->segs
[R_GS
].flags
&= ~(env
->segs
[R_GS
].flags
& DESC_DPL_MASK
);
329 env
->segs
[R_SS
].flags
&= ~(env
->segs
[R_SS
].flags
& DESC_DPL_MASK
);
332 /* Older versions of QEMU incorrectly used CS.DPL as the CPL when
333 * running under KVM. This is wrong for conforming code segments.
334 * Luckily, in our implementation the CPL field of hflags is redundant
335 * and we can get the right value from the SS descriptor privilege level.
337 env
->hflags
&= ~HF_CPL_MASK
;
338 env
->hflags
|= (env
->segs
[R_SS
].flags
>> DESC_DPL_SHIFT
) & HF_CPL_MASK
;
341 if ((env
->hflags
& HF_GUEST_MASK
) &&
342 (!env
->nested_state
||
343 !(env
->nested_state
->flags
& KVM_STATE_NESTED_GUEST_MODE
))) {
344 error_report("vCPU set in guest-mode inconsistent with "
345 "migrated kernel nested state");
351 * There are cases that we can get valid exception_nr with both
352 * exception_pending and exception_injected being cleared.
353 * This can happen in one of the following scenarios:
354 * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support.
355 * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support.
356 * 3) "cpu/exception_info" subsection not sent because there is no exception
357 * pending or guest wasn't running L2 (See comment in cpu_pre_save()).
359 * In those cases, we can just deduce that a valid exception_nr means
360 * we can treat the exception as already injected.
362 if ((env
->exception_nr
!= -1) &&
363 !env
->exception_pending
&& !env
->exception_injected
) {
364 env
->exception_injected
= 1;
367 env
->fpstt
= (env
->fpus_vmstate
>> 11) & 7;
368 env
->fpus
= env
->fpus_vmstate
& ~0x3800;
369 env
->fptag_vmstate
^= 0xff;
370 for(i
= 0; i
< 8; i
++) {
371 env
->fptags
[i
] = (env
->fptag_vmstate
>> i
) & 1;
375 update_fp_status(env
);
376 update_mxcsr_status(env
);
378 cpu_breakpoint_remove_all(cs
, BP_CPU
);
379 cpu_watchpoint_remove_all(cs
, BP_CPU
);
381 /* Indicate all breakpoints disabled, as they are, then
382 let the helper re-enable them. */
384 env
->dr
[7] = dr7
& ~(DR7_GLOBAL_BP_MASK
| DR7_LOCAL_BP_MASK
);
385 cpu_x86_update_dr7(env
, dr7
);
391 static bool async_pf_msr_needed(void *opaque
)
393 X86CPU
*cpu
= opaque
;
395 return cpu
->env
.async_pf_en_msr
!= 0;
398 static bool pv_eoi_msr_needed(void *opaque
)
400 X86CPU
*cpu
= opaque
;
402 return cpu
->env
.pv_eoi_en_msr
!= 0;
405 static bool steal_time_msr_needed(void *opaque
)
407 X86CPU
*cpu
= opaque
;
409 return cpu
->env
.steal_time_msr
!= 0;
412 static bool exception_info_needed(void *opaque
)
414 X86CPU
*cpu
= opaque
;
415 CPUX86State
*env
= &cpu
->env
;
418 * It is important to save exception-info only in case
419 * we need to distingiush between a pending and injected
420 * exception. Which is only required in case there is a
421 * pending exception and vCPU is running L2.
422 * For more info, refer to comment in cpu_pre_save().
424 return env
->exception_pending
&& (env
->hflags
& HF_GUEST_MASK
);
427 static const VMStateDescription vmstate_exception_info
= {
428 .name
= "cpu/exception_info",
430 .minimum_version_id
= 1,
431 .needed
= exception_info_needed
,
432 .fields
= (VMStateField
[]) {
433 VMSTATE_UINT8(env
.exception_pending
, X86CPU
),
434 VMSTATE_UINT8(env
.exception_injected
, X86CPU
),
435 VMSTATE_UINT8(env
.exception_has_payload
, X86CPU
),
436 VMSTATE_UINT64(env
.exception_payload
, X86CPU
),
437 VMSTATE_END_OF_LIST()
441 static const VMStateDescription vmstate_steal_time_msr
= {
442 .name
= "cpu/steal_time_msr",
444 .minimum_version_id
= 1,
445 .needed
= steal_time_msr_needed
,
446 .fields
= (VMStateField
[]) {
447 VMSTATE_UINT64(env
.steal_time_msr
, X86CPU
),
448 VMSTATE_END_OF_LIST()
452 static const VMStateDescription vmstate_async_pf_msr
= {
453 .name
= "cpu/async_pf_msr",
455 .minimum_version_id
= 1,
456 .needed
= async_pf_msr_needed
,
457 .fields
= (VMStateField
[]) {
458 VMSTATE_UINT64(env
.async_pf_en_msr
, X86CPU
),
459 VMSTATE_END_OF_LIST()
463 static const VMStateDescription vmstate_pv_eoi_msr
= {
464 .name
= "cpu/async_pv_eoi_msr",
466 .minimum_version_id
= 1,
467 .needed
= pv_eoi_msr_needed
,
468 .fields
= (VMStateField
[]) {
469 VMSTATE_UINT64(env
.pv_eoi_en_msr
, X86CPU
),
470 VMSTATE_END_OF_LIST()
474 static bool fpop_ip_dp_needed(void *opaque
)
476 X86CPU
*cpu
= opaque
;
477 CPUX86State
*env
= &cpu
->env
;
479 return env
->fpop
!= 0 || env
->fpip
!= 0 || env
->fpdp
!= 0;
482 static const VMStateDescription vmstate_fpop_ip_dp
= {
483 .name
= "cpu/fpop_ip_dp",
485 .minimum_version_id
= 1,
486 .needed
= fpop_ip_dp_needed
,
487 .fields
= (VMStateField
[]) {
488 VMSTATE_UINT16(env
.fpop
, X86CPU
),
489 VMSTATE_UINT64(env
.fpip
, X86CPU
),
490 VMSTATE_UINT64(env
.fpdp
, X86CPU
),
491 VMSTATE_END_OF_LIST()
495 static bool tsc_adjust_needed(void *opaque
)
497 X86CPU
*cpu
= opaque
;
498 CPUX86State
*env
= &cpu
->env
;
500 return env
->tsc_adjust
!= 0;
503 static const VMStateDescription vmstate_msr_tsc_adjust
= {
504 .name
= "cpu/msr_tsc_adjust",
506 .minimum_version_id
= 1,
507 .needed
= tsc_adjust_needed
,
508 .fields
= (VMStateField
[]) {
509 VMSTATE_UINT64(env
.tsc_adjust
, X86CPU
),
510 VMSTATE_END_OF_LIST()
514 static bool msr_smi_count_needed(void *opaque
)
516 X86CPU
*cpu
= opaque
;
517 CPUX86State
*env
= &cpu
->env
;
519 return cpu
->migrate_smi_count
&& env
->msr_smi_count
!= 0;
522 static const VMStateDescription vmstate_msr_smi_count
= {
523 .name
= "cpu/msr_smi_count",
525 .minimum_version_id
= 1,
526 .needed
= msr_smi_count_needed
,
527 .fields
= (VMStateField
[]) {
528 VMSTATE_UINT64(env
.msr_smi_count
, X86CPU
),
529 VMSTATE_END_OF_LIST()
533 static bool tscdeadline_needed(void *opaque
)
535 X86CPU
*cpu
= opaque
;
536 CPUX86State
*env
= &cpu
->env
;
538 return env
->tsc_deadline
!= 0;
541 static const VMStateDescription vmstate_msr_tscdeadline
= {
542 .name
= "cpu/msr_tscdeadline",
544 .minimum_version_id
= 1,
545 .needed
= tscdeadline_needed
,
546 .fields
= (VMStateField
[]) {
547 VMSTATE_UINT64(env
.tsc_deadline
, X86CPU
),
548 VMSTATE_END_OF_LIST()
552 static bool misc_enable_needed(void *opaque
)
554 X86CPU
*cpu
= opaque
;
555 CPUX86State
*env
= &cpu
->env
;
557 return env
->msr_ia32_misc_enable
!= MSR_IA32_MISC_ENABLE_DEFAULT
;
560 static bool feature_control_needed(void *opaque
)
562 X86CPU
*cpu
= opaque
;
563 CPUX86State
*env
= &cpu
->env
;
565 return env
->msr_ia32_feature_control
!= 0;
568 static const VMStateDescription vmstate_msr_ia32_misc_enable
= {
569 .name
= "cpu/msr_ia32_misc_enable",
571 .minimum_version_id
= 1,
572 .needed
= misc_enable_needed
,
573 .fields
= (VMStateField
[]) {
574 VMSTATE_UINT64(env
.msr_ia32_misc_enable
, X86CPU
),
575 VMSTATE_END_OF_LIST()
579 static const VMStateDescription vmstate_msr_ia32_feature_control
= {
580 .name
= "cpu/msr_ia32_feature_control",
582 .minimum_version_id
= 1,
583 .needed
= feature_control_needed
,
584 .fields
= (VMStateField
[]) {
585 VMSTATE_UINT64(env
.msr_ia32_feature_control
, X86CPU
),
586 VMSTATE_END_OF_LIST()
590 static bool pmu_enable_needed(void *opaque
)
592 X86CPU
*cpu
= opaque
;
593 CPUX86State
*env
= &cpu
->env
;
596 if (env
->msr_fixed_ctr_ctrl
|| env
->msr_global_ctrl
||
597 env
->msr_global_status
|| env
->msr_global_ovf_ctrl
) {
600 for (i
= 0; i
< MAX_FIXED_COUNTERS
; i
++) {
601 if (env
->msr_fixed_counters
[i
]) {
605 for (i
= 0; i
< MAX_GP_COUNTERS
; i
++) {
606 if (env
->msr_gp_counters
[i
] || env
->msr_gp_evtsel
[i
]) {
614 static const VMStateDescription vmstate_msr_architectural_pmu
= {
615 .name
= "cpu/msr_architectural_pmu",
617 .minimum_version_id
= 1,
618 .needed
= pmu_enable_needed
,
619 .fields
= (VMStateField
[]) {
620 VMSTATE_UINT64(env
.msr_fixed_ctr_ctrl
, X86CPU
),
621 VMSTATE_UINT64(env
.msr_global_ctrl
, X86CPU
),
622 VMSTATE_UINT64(env
.msr_global_status
, X86CPU
),
623 VMSTATE_UINT64(env
.msr_global_ovf_ctrl
, X86CPU
),
624 VMSTATE_UINT64_ARRAY(env
.msr_fixed_counters
, X86CPU
, MAX_FIXED_COUNTERS
),
625 VMSTATE_UINT64_ARRAY(env
.msr_gp_counters
, X86CPU
, MAX_GP_COUNTERS
),
626 VMSTATE_UINT64_ARRAY(env
.msr_gp_evtsel
, X86CPU
, MAX_GP_COUNTERS
),
627 VMSTATE_END_OF_LIST()
631 static bool mpx_needed(void *opaque
)
633 X86CPU
*cpu
= opaque
;
634 CPUX86State
*env
= &cpu
->env
;
637 for (i
= 0; i
< 4; i
++) {
638 if (env
->bnd_regs
[i
].lb
|| env
->bnd_regs
[i
].ub
) {
643 if (env
->bndcs_regs
.cfgu
|| env
->bndcs_regs
.sts
) {
647 return !!env
->msr_bndcfgs
;
650 static const VMStateDescription vmstate_mpx
= {
653 .minimum_version_id
= 1,
654 .needed
= mpx_needed
,
655 .fields
= (VMStateField
[]) {
656 VMSTATE_BND_REGS(env
.bnd_regs
, X86CPU
, 4),
657 VMSTATE_UINT64(env
.bndcs_regs
.cfgu
, X86CPU
),
658 VMSTATE_UINT64(env
.bndcs_regs
.sts
, X86CPU
),
659 VMSTATE_UINT64(env
.msr_bndcfgs
, X86CPU
),
660 VMSTATE_END_OF_LIST()
664 static bool hyperv_hypercall_enable_needed(void *opaque
)
666 X86CPU
*cpu
= opaque
;
667 CPUX86State
*env
= &cpu
->env
;
669 return env
->msr_hv_hypercall
!= 0 || env
->msr_hv_guest_os_id
!= 0;
672 static const VMStateDescription vmstate_msr_hypercall_hypercall
= {
673 .name
= "cpu/msr_hyperv_hypercall",
675 .minimum_version_id
= 1,
676 .needed
= hyperv_hypercall_enable_needed
,
677 .fields
= (VMStateField
[]) {
678 VMSTATE_UINT64(env
.msr_hv_guest_os_id
, X86CPU
),
679 VMSTATE_UINT64(env
.msr_hv_hypercall
, X86CPU
),
680 VMSTATE_END_OF_LIST()
684 static bool hyperv_vapic_enable_needed(void *opaque
)
686 X86CPU
*cpu
= opaque
;
687 CPUX86State
*env
= &cpu
->env
;
689 return env
->msr_hv_vapic
!= 0;
692 static const VMStateDescription vmstate_msr_hyperv_vapic
= {
693 .name
= "cpu/msr_hyperv_vapic",
695 .minimum_version_id
= 1,
696 .needed
= hyperv_vapic_enable_needed
,
697 .fields
= (VMStateField
[]) {
698 VMSTATE_UINT64(env
.msr_hv_vapic
, X86CPU
),
699 VMSTATE_END_OF_LIST()
703 static bool hyperv_time_enable_needed(void *opaque
)
705 X86CPU
*cpu
= opaque
;
706 CPUX86State
*env
= &cpu
->env
;
708 return env
->msr_hv_tsc
!= 0;
711 static const VMStateDescription vmstate_msr_hyperv_time
= {
712 .name
= "cpu/msr_hyperv_time",
714 .minimum_version_id
= 1,
715 .needed
= hyperv_time_enable_needed
,
716 .fields
= (VMStateField
[]) {
717 VMSTATE_UINT64(env
.msr_hv_tsc
, X86CPU
),
718 VMSTATE_END_OF_LIST()
722 static bool hyperv_crash_enable_needed(void *opaque
)
724 X86CPU
*cpu
= opaque
;
725 CPUX86State
*env
= &cpu
->env
;
728 for (i
= 0; i
< HV_CRASH_PARAMS
; i
++) {
729 if (env
->msr_hv_crash_params
[i
]) {
736 static const VMStateDescription vmstate_msr_hyperv_crash
= {
737 .name
= "cpu/msr_hyperv_crash",
739 .minimum_version_id
= 1,
740 .needed
= hyperv_crash_enable_needed
,
741 .fields
= (VMStateField
[]) {
742 VMSTATE_UINT64_ARRAY(env
.msr_hv_crash_params
, X86CPU
, HV_CRASH_PARAMS
),
743 VMSTATE_END_OF_LIST()
747 static bool hyperv_runtime_enable_needed(void *opaque
)
749 X86CPU
*cpu
= opaque
;
750 CPUX86State
*env
= &cpu
->env
;
752 if (!hyperv_feat_enabled(cpu
, HYPERV_FEAT_RUNTIME
)) {
756 return env
->msr_hv_runtime
!= 0;
759 static const VMStateDescription vmstate_msr_hyperv_runtime
= {
760 .name
= "cpu/msr_hyperv_runtime",
762 .minimum_version_id
= 1,
763 .needed
= hyperv_runtime_enable_needed
,
764 .fields
= (VMStateField
[]) {
765 VMSTATE_UINT64(env
.msr_hv_runtime
, X86CPU
),
766 VMSTATE_END_OF_LIST()
770 static bool hyperv_synic_enable_needed(void *opaque
)
772 X86CPU
*cpu
= opaque
;
773 CPUX86State
*env
= &cpu
->env
;
776 if (env
->msr_hv_synic_control
!= 0 ||
777 env
->msr_hv_synic_evt_page
!= 0 ||
778 env
->msr_hv_synic_msg_page
!= 0) {
782 for (i
= 0; i
< ARRAY_SIZE(env
->msr_hv_synic_sint
); i
++) {
783 if (env
->msr_hv_synic_sint
[i
] != 0) {
791 static int hyperv_synic_post_load(void *opaque
, int version_id
)
793 X86CPU
*cpu
= opaque
;
794 hyperv_x86_synic_update(cpu
);
798 static const VMStateDescription vmstate_msr_hyperv_synic
= {
799 .name
= "cpu/msr_hyperv_synic",
801 .minimum_version_id
= 1,
802 .needed
= hyperv_synic_enable_needed
,
803 .post_load
= hyperv_synic_post_load
,
804 .fields
= (VMStateField
[]) {
805 VMSTATE_UINT64(env
.msr_hv_synic_control
, X86CPU
),
806 VMSTATE_UINT64(env
.msr_hv_synic_evt_page
, X86CPU
),
807 VMSTATE_UINT64(env
.msr_hv_synic_msg_page
, X86CPU
),
808 VMSTATE_UINT64_ARRAY(env
.msr_hv_synic_sint
, X86CPU
, HV_SINT_COUNT
),
809 VMSTATE_END_OF_LIST()
813 static bool hyperv_stimer_enable_needed(void *opaque
)
815 X86CPU
*cpu
= opaque
;
816 CPUX86State
*env
= &cpu
->env
;
819 for (i
= 0; i
< ARRAY_SIZE(env
->msr_hv_stimer_config
); i
++) {
820 if (env
->msr_hv_stimer_config
[i
] || env
->msr_hv_stimer_count
[i
]) {
827 static const VMStateDescription vmstate_msr_hyperv_stimer
= {
828 .name
= "cpu/msr_hyperv_stimer",
830 .minimum_version_id
= 1,
831 .needed
= hyperv_stimer_enable_needed
,
832 .fields
= (VMStateField
[]) {
833 VMSTATE_UINT64_ARRAY(env
.msr_hv_stimer_config
, X86CPU
,
835 VMSTATE_UINT64_ARRAY(env
.msr_hv_stimer_count
, X86CPU
, HV_STIMER_COUNT
),
836 VMSTATE_END_OF_LIST()
840 static bool hyperv_reenlightenment_enable_needed(void *opaque
)
842 X86CPU
*cpu
= opaque
;
843 CPUX86State
*env
= &cpu
->env
;
845 return env
->msr_hv_reenlightenment_control
!= 0 ||
846 env
->msr_hv_tsc_emulation_control
!= 0 ||
847 env
->msr_hv_tsc_emulation_status
!= 0;
850 static const VMStateDescription vmstate_msr_hyperv_reenlightenment
= {
851 .name
= "cpu/msr_hyperv_reenlightenment",
853 .minimum_version_id
= 1,
854 .needed
= hyperv_reenlightenment_enable_needed
,
855 .fields
= (VMStateField
[]) {
856 VMSTATE_UINT64(env
.msr_hv_reenlightenment_control
, X86CPU
),
857 VMSTATE_UINT64(env
.msr_hv_tsc_emulation_control
, X86CPU
),
858 VMSTATE_UINT64(env
.msr_hv_tsc_emulation_status
, X86CPU
),
859 VMSTATE_END_OF_LIST()
863 static bool avx512_needed(void *opaque
)
865 X86CPU
*cpu
= opaque
;
866 CPUX86State
*env
= &cpu
->env
;
869 for (i
= 0; i
< NB_OPMASK_REGS
; i
++) {
870 if (env
->opmask_regs
[i
]) {
875 for (i
= 0; i
< CPU_NB_REGS
; i
++) {
876 #define ENV_XMM(reg, field) (env->xmm_regs[reg].ZMM_Q(field))
877 if (ENV_XMM(i
, 4) || ENV_XMM(i
, 6) ||
878 ENV_XMM(i
, 5) || ENV_XMM(i
, 7)) {
882 if (ENV_XMM(i
+16, 0) || ENV_XMM(i
+16, 1) ||
883 ENV_XMM(i
+16, 2) || ENV_XMM(i
+16, 3) ||
884 ENV_XMM(i
+16, 4) || ENV_XMM(i
+16, 5) ||
885 ENV_XMM(i
+16, 6) || ENV_XMM(i
+16, 7)) {
894 static const VMStateDescription vmstate_avx512
= {
895 .name
= "cpu/avx512",
897 .minimum_version_id
= 1,
898 .needed
= avx512_needed
,
899 .fields
= (VMStateField
[]) {
900 VMSTATE_UINT64_ARRAY(env
.opmask_regs
, X86CPU
, NB_OPMASK_REGS
),
901 VMSTATE_ZMMH_REGS_VARS(env
.xmm_regs
, X86CPU
, 0),
903 VMSTATE_Hi16_ZMM_REGS_VARS(env
.xmm_regs
, X86CPU
, 16),
905 VMSTATE_END_OF_LIST()
909 static bool xss_needed(void *opaque
)
911 X86CPU
*cpu
= opaque
;
912 CPUX86State
*env
= &cpu
->env
;
914 return env
->xss
!= 0;
917 static const VMStateDescription vmstate_xss
= {
920 .minimum_version_id
= 1,
921 .needed
= xss_needed
,
922 .fields
= (VMStateField
[]) {
923 VMSTATE_UINT64(env
.xss
, X86CPU
),
924 VMSTATE_END_OF_LIST()
929 static bool pkru_needed(void *opaque
)
931 X86CPU
*cpu
= opaque
;
932 CPUX86State
*env
= &cpu
->env
;
934 return env
->pkru
!= 0;
937 static const VMStateDescription vmstate_pkru
= {
940 .minimum_version_id
= 1,
941 .needed
= pkru_needed
,
942 .fields
= (VMStateField
[]){
943 VMSTATE_UINT32(env
.pkru
, X86CPU
),
944 VMSTATE_END_OF_LIST()
949 static bool tsc_khz_needed(void *opaque
)
951 X86CPU
*cpu
= opaque
;
952 CPUX86State
*env
= &cpu
->env
;
953 MachineClass
*mc
= MACHINE_GET_CLASS(qdev_get_machine());
954 PCMachineClass
*pcmc
= PC_MACHINE_CLASS(mc
);
955 return env
->tsc_khz
&& pcmc
->save_tsc_khz
;
958 static const VMStateDescription vmstate_tsc_khz
= {
959 .name
= "cpu/tsc_khz",
961 .minimum_version_id
= 1,
962 .needed
= tsc_khz_needed
,
963 .fields
= (VMStateField
[]) {
964 VMSTATE_INT64(env
.tsc_khz
, X86CPU
),
965 VMSTATE_END_OF_LIST()
971 static bool vmx_vmcs12_needed(void *opaque
)
973 struct kvm_nested_state
*nested_state
= opaque
;
974 return (nested_state
->size
>
975 offsetof(struct kvm_nested_state
, data
.vmx
[0].vmcs12
));
978 static const VMStateDescription vmstate_vmx_vmcs12
= {
979 .name
= "cpu/kvm_nested_state/vmx/vmcs12",
981 .minimum_version_id
= 1,
982 .needed
= vmx_vmcs12_needed
,
983 .fields
= (VMStateField
[]) {
984 VMSTATE_UINT8_ARRAY(data
.vmx
[0].vmcs12
,
985 struct kvm_nested_state
,
986 KVM_STATE_NESTED_VMX_VMCS_SIZE
),
987 VMSTATE_END_OF_LIST()
991 static bool vmx_shadow_vmcs12_needed(void *opaque
)
993 struct kvm_nested_state
*nested_state
= opaque
;
994 return (nested_state
->size
>
995 offsetof(struct kvm_nested_state
, data
.vmx
[0].shadow_vmcs12
));
998 static const VMStateDescription vmstate_vmx_shadow_vmcs12
= {
999 .name
= "cpu/kvm_nested_state/vmx/shadow_vmcs12",
1001 .minimum_version_id
= 1,
1002 .needed
= vmx_shadow_vmcs12_needed
,
1003 .fields
= (VMStateField
[]) {
1004 VMSTATE_UINT8_ARRAY(data
.vmx
[0].shadow_vmcs12
,
1005 struct kvm_nested_state
,
1006 KVM_STATE_NESTED_VMX_VMCS_SIZE
),
1007 VMSTATE_END_OF_LIST()
1011 static bool vmx_nested_state_needed(void *opaque
)
1013 struct kvm_nested_state
*nested_state
= opaque
;
1015 return (nested_state
->format
== KVM_STATE_NESTED_FORMAT_VMX
&&
1016 nested_state
->hdr
.vmx
.vmxon_pa
!= -1ull);
1019 static const VMStateDescription vmstate_vmx_nested_state
= {
1020 .name
= "cpu/kvm_nested_state/vmx",
1022 .minimum_version_id
= 1,
1023 .needed
= vmx_nested_state_needed
,
1024 .fields
= (VMStateField
[]) {
1025 VMSTATE_U64(hdr
.vmx
.vmxon_pa
, struct kvm_nested_state
),
1026 VMSTATE_U64(hdr
.vmx
.vmcs12_pa
, struct kvm_nested_state
),
1027 VMSTATE_U16(hdr
.vmx
.smm
.flags
, struct kvm_nested_state
),
1028 VMSTATE_END_OF_LIST()
1030 .subsections
= (const VMStateDescription
*[]) {
1031 &vmstate_vmx_vmcs12
,
1032 &vmstate_vmx_shadow_vmcs12
,
1037 static bool nested_state_needed(void *opaque
)
1039 X86CPU
*cpu
= opaque
;
1040 CPUX86State
*env
= &cpu
->env
;
1042 return (env
->nested_state
&&
1043 vmx_nested_state_needed(env
->nested_state
));
1046 static int nested_state_post_load(void *opaque
, int version_id
)
1048 X86CPU
*cpu
= opaque
;
1049 CPUX86State
*env
= &cpu
->env
;
1050 struct kvm_nested_state
*nested_state
= env
->nested_state
;
1051 int min_nested_state_len
= offsetof(struct kvm_nested_state
, data
);
1052 int max_nested_state_len
= kvm_max_nested_state_length();
1055 * If our kernel don't support setting nested state
1056 * and we have received nested state from migration stream,
1057 * we need to fail migration
1059 if (max_nested_state_len
<= 0) {
1060 error_report("Received nested state when kernel cannot restore it");
1065 * Verify that the size of received nested_state struct
1066 * at least cover required header and is not larger
1067 * than the max size that our kernel support
1069 if (nested_state
->size
< min_nested_state_len
) {
1070 error_report("Received nested state size less than min: "
1072 nested_state
->size
, min_nested_state_len
);
1075 if (nested_state
->size
> max_nested_state_len
) {
1076 error_report("Recieved unsupported nested state size: "
1077 "nested_state->size=%d, max=%d",
1078 nested_state
->size
, max_nested_state_len
);
1082 /* Verify format is valid */
1083 if ((nested_state
->format
!= KVM_STATE_NESTED_FORMAT_VMX
) &&
1084 (nested_state
->format
!= KVM_STATE_NESTED_FORMAT_SVM
)) {
1085 error_report("Received invalid nested state format: %d",
1086 nested_state
->format
);
1093 static const VMStateDescription vmstate_kvm_nested_state
= {
1094 .name
= "cpu/kvm_nested_state",
1096 .minimum_version_id
= 1,
1097 .fields
= (VMStateField
[]) {
1098 VMSTATE_U16(flags
, struct kvm_nested_state
),
1099 VMSTATE_U16(format
, struct kvm_nested_state
),
1100 VMSTATE_U32(size
, struct kvm_nested_state
),
1101 VMSTATE_END_OF_LIST()
1103 .subsections
= (const VMStateDescription
*[]) {
1104 &vmstate_vmx_nested_state
,
1109 static const VMStateDescription vmstate_nested_state
= {
1110 .name
= "cpu/nested_state",
1112 .minimum_version_id
= 1,
1113 .needed
= nested_state_needed
,
1114 .post_load
= nested_state_post_load
,
1115 .fields
= (VMStateField
[]) {
1116 VMSTATE_STRUCT_POINTER(env
.nested_state
, X86CPU
,
1117 vmstate_kvm_nested_state
,
1118 struct kvm_nested_state
),
1119 VMSTATE_END_OF_LIST()
1125 static bool mcg_ext_ctl_needed(void *opaque
)
1127 X86CPU
*cpu
= opaque
;
1128 CPUX86State
*env
= &cpu
->env
;
1129 return cpu
->enable_lmce
&& env
->mcg_ext_ctl
;
1132 static const VMStateDescription vmstate_mcg_ext_ctl
= {
1133 .name
= "cpu/mcg_ext_ctl",
1135 .minimum_version_id
= 1,
1136 .needed
= mcg_ext_ctl_needed
,
1137 .fields
= (VMStateField
[]) {
1138 VMSTATE_UINT64(env
.mcg_ext_ctl
, X86CPU
),
1139 VMSTATE_END_OF_LIST()
1143 static bool spec_ctrl_needed(void *opaque
)
1145 X86CPU
*cpu
= opaque
;
1146 CPUX86State
*env
= &cpu
->env
;
1148 return env
->spec_ctrl
!= 0;
1151 static const VMStateDescription vmstate_spec_ctrl
= {
1152 .name
= "cpu/spec_ctrl",
1154 .minimum_version_id
= 1,
1155 .needed
= spec_ctrl_needed
,
1156 .fields
= (VMStateField
[]){
1157 VMSTATE_UINT64(env
.spec_ctrl
, X86CPU
),
1158 VMSTATE_END_OF_LIST()
1162 static bool intel_pt_enable_needed(void *opaque
)
1164 X86CPU
*cpu
= opaque
;
1165 CPUX86State
*env
= &cpu
->env
;
1168 if (env
->msr_rtit_ctrl
|| env
->msr_rtit_status
||
1169 env
->msr_rtit_output_base
|| env
->msr_rtit_output_mask
||
1170 env
->msr_rtit_cr3_match
) {
1174 for (i
= 0; i
< MAX_RTIT_ADDRS
; i
++) {
1175 if (env
->msr_rtit_addrs
[i
]) {
1183 static const VMStateDescription vmstate_msr_intel_pt
= {
1184 .name
= "cpu/intel_pt",
1186 .minimum_version_id
= 1,
1187 .needed
= intel_pt_enable_needed
,
1188 .fields
= (VMStateField
[]) {
1189 VMSTATE_UINT64(env
.msr_rtit_ctrl
, X86CPU
),
1190 VMSTATE_UINT64(env
.msr_rtit_status
, X86CPU
),
1191 VMSTATE_UINT64(env
.msr_rtit_output_base
, X86CPU
),
1192 VMSTATE_UINT64(env
.msr_rtit_output_mask
, X86CPU
),
1193 VMSTATE_UINT64(env
.msr_rtit_cr3_match
, X86CPU
),
1194 VMSTATE_UINT64_ARRAY(env
.msr_rtit_addrs
, X86CPU
, MAX_RTIT_ADDRS
),
1195 VMSTATE_END_OF_LIST()
1199 static bool virt_ssbd_needed(void *opaque
)
1201 X86CPU
*cpu
= opaque
;
1202 CPUX86State
*env
= &cpu
->env
;
1204 return env
->virt_ssbd
!= 0;
1207 static const VMStateDescription vmstate_msr_virt_ssbd
= {
1208 .name
= "cpu/virt_ssbd",
1210 .minimum_version_id
= 1,
1211 .needed
= virt_ssbd_needed
,
1212 .fields
= (VMStateField
[]){
1213 VMSTATE_UINT64(env
.virt_ssbd
, X86CPU
),
1214 VMSTATE_END_OF_LIST()
1218 static bool svm_npt_needed(void *opaque
)
1220 X86CPU
*cpu
= opaque
;
1221 CPUX86State
*env
= &cpu
->env
;
1223 return !!(env
->hflags2
& HF2_NPT_MASK
);
1226 static const VMStateDescription vmstate_svm_npt
= {
1227 .name
= "cpu/svn_npt",
1229 .minimum_version_id
= 1,
1230 .needed
= svm_npt_needed
,
1231 .fields
= (VMStateField
[]){
1232 VMSTATE_UINT64(env
.nested_cr3
, X86CPU
),
1233 VMSTATE_UINT32(env
.nested_pg_mode
, X86CPU
),
1234 VMSTATE_END_OF_LIST()
1238 #ifndef TARGET_X86_64
1239 static bool intel_efer32_needed(void *opaque
)
1241 X86CPU
*cpu
= opaque
;
1242 CPUX86State
*env
= &cpu
->env
;
1244 return env
->efer
!= 0;
1247 static const VMStateDescription vmstate_efer32
= {
1248 .name
= "cpu/efer32",
1250 .minimum_version_id
= 1,
1251 .needed
= intel_efer32_needed
,
1252 .fields
= (VMStateField
[]) {
1253 VMSTATE_UINT64(env
.efer
, X86CPU
),
1254 VMSTATE_END_OF_LIST()
1259 VMStateDescription vmstate_x86_cpu
= {
1262 .minimum_version_id
= 11,
1263 .pre_save
= cpu_pre_save
,
1264 .post_load
= cpu_post_load
,
1265 .fields
= (VMStateField
[]) {
1266 VMSTATE_UINTTL_ARRAY(env
.regs
, X86CPU
, CPU_NB_REGS
),
1267 VMSTATE_UINTTL(env
.eip
, X86CPU
),
1268 VMSTATE_UINTTL(env
.eflags
, X86CPU
),
1269 VMSTATE_UINT32(env
.hflags
, X86CPU
),
1271 VMSTATE_UINT16(env
.fpuc
, X86CPU
),
1272 VMSTATE_UINT16(env
.fpus_vmstate
, X86CPU
),
1273 VMSTATE_UINT16(env
.fptag_vmstate
, X86CPU
),
1274 VMSTATE_UINT16(env
.fpregs_format_vmstate
, X86CPU
),
1276 VMSTATE_STRUCT_ARRAY(env
.fpregs
, X86CPU
, 8, 0, vmstate_fpreg
, FPReg
),
1278 VMSTATE_SEGMENT_ARRAY(env
.segs
, X86CPU
, 6),
1279 VMSTATE_SEGMENT(env
.ldt
, X86CPU
),
1280 VMSTATE_SEGMENT(env
.tr
, X86CPU
),
1281 VMSTATE_SEGMENT(env
.gdt
, X86CPU
),
1282 VMSTATE_SEGMENT(env
.idt
, X86CPU
),
1284 VMSTATE_UINT32(env
.sysenter_cs
, X86CPU
),
1285 VMSTATE_UINTTL(env
.sysenter_esp
, X86CPU
),
1286 VMSTATE_UINTTL(env
.sysenter_eip
, X86CPU
),
1288 VMSTATE_UINTTL(env
.cr
[0], X86CPU
),
1289 VMSTATE_UINTTL(env
.cr
[2], X86CPU
),
1290 VMSTATE_UINTTL(env
.cr
[3], X86CPU
),
1291 VMSTATE_UINTTL(env
.cr
[4], X86CPU
),
1292 VMSTATE_UINTTL_ARRAY(env
.dr
, X86CPU
, 8),
1294 VMSTATE_INT32(env
.a20_mask
, X86CPU
),
1296 VMSTATE_UINT32(env
.mxcsr
, X86CPU
),
1297 VMSTATE_XMM_REGS(env
.xmm_regs
, X86CPU
, 0),
1299 #ifdef TARGET_X86_64
1300 VMSTATE_UINT64(env
.efer
, X86CPU
),
1301 VMSTATE_UINT64(env
.star
, X86CPU
),
1302 VMSTATE_UINT64(env
.lstar
, X86CPU
),
1303 VMSTATE_UINT64(env
.cstar
, X86CPU
),
1304 VMSTATE_UINT64(env
.fmask
, X86CPU
),
1305 VMSTATE_UINT64(env
.kernelgsbase
, X86CPU
),
1307 VMSTATE_UINT32(env
.smbase
, X86CPU
),
1309 VMSTATE_UINT64(env
.pat
, X86CPU
),
1310 VMSTATE_UINT32(env
.hflags2
, X86CPU
),
1312 VMSTATE_UINT64(env
.vm_hsave
, X86CPU
),
1313 VMSTATE_UINT64(env
.vm_vmcb
, X86CPU
),
1314 VMSTATE_UINT64(env
.tsc_offset
, X86CPU
),
1315 VMSTATE_UINT64(env
.intercept
, X86CPU
),
1316 VMSTATE_UINT16(env
.intercept_cr_read
, X86CPU
),
1317 VMSTATE_UINT16(env
.intercept_cr_write
, X86CPU
),
1318 VMSTATE_UINT16(env
.intercept_dr_read
, X86CPU
),
1319 VMSTATE_UINT16(env
.intercept_dr_write
, X86CPU
),
1320 VMSTATE_UINT32(env
.intercept_exceptions
, X86CPU
),
1321 VMSTATE_UINT8(env
.v_tpr
, X86CPU
),
1323 VMSTATE_UINT64_ARRAY(env
.mtrr_fixed
, X86CPU
, 11),
1324 VMSTATE_UINT64(env
.mtrr_deftype
, X86CPU
),
1325 VMSTATE_MTRR_VARS(env
.mtrr_var
, X86CPU
, MSR_MTRRcap_VCNT
, 8),
1326 /* KVM-related states */
1327 VMSTATE_INT32(env
.interrupt_injected
, X86CPU
),
1328 VMSTATE_UINT32(env
.mp_state
, X86CPU
),
1329 VMSTATE_UINT64(env
.tsc
, X86CPU
),
1330 VMSTATE_INT32(env
.exception_nr
, X86CPU
),
1331 VMSTATE_UINT8(env
.soft_interrupt
, X86CPU
),
1332 VMSTATE_UINT8(env
.nmi_injected
, X86CPU
),
1333 VMSTATE_UINT8(env
.nmi_pending
, X86CPU
),
1334 VMSTATE_UINT8(env
.has_error_code
, X86CPU
),
1335 VMSTATE_UINT32(env
.sipi_vector
, X86CPU
),
1337 VMSTATE_UINT64(env
.mcg_cap
, X86CPU
),
1338 VMSTATE_UINT64(env
.mcg_status
, X86CPU
),
1339 VMSTATE_UINT64(env
.mcg_ctl
, X86CPU
),
1340 VMSTATE_UINT64_ARRAY(env
.mce_banks
, X86CPU
, MCE_BANKS_DEF
* 4),
1342 VMSTATE_UINT64(env
.tsc_aux
, X86CPU
),
1343 /* KVM pvclock msr */
1344 VMSTATE_UINT64(env
.system_time_msr
, X86CPU
),
1345 VMSTATE_UINT64(env
.wall_clock_msr
, X86CPU
),
1346 /* XSAVE related fields */
1347 VMSTATE_UINT64_V(env
.xcr0
, X86CPU
, 12),
1348 VMSTATE_UINT64_V(env
.xstate_bv
, X86CPU
, 12),
1349 VMSTATE_YMMH_REGS_VARS(env
.xmm_regs
, X86CPU
, 0, 12),
1350 VMSTATE_END_OF_LIST()
1351 /* The above list is not sorted /wrt version numbers, watch out! */
1353 .subsections
= (const VMStateDescription
*[]) {
1354 &vmstate_exception_info
,
1355 &vmstate_async_pf_msr
,
1356 &vmstate_pv_eoi_msr
,
1357 &vmstate_steal_time_msr
,
1358 &vmstate_fpop_ip_dp
,
1359 &vmstate_msr_tsc_adjust
,
1360 &vmstate_msr_tscdeadline
,
1361 &vmstate_msr_ia32_misc_enable
,
1362 &vmstate_msr_ia32_feature_control
,
1363 &vmstate_msr_architectural_pmu
,
1365 &vmstate_msr_hypercall_hypercall
,
1366 &vmstate_msr_hyperv_vapic
,
1367 &vmstate_msr_hyperv_time
,
1368 &vmstate_msr_hyperv_crash
,
1369 &vmstate_msr_hyperv_runtime
,
1370 &vmstate_msr_hyperv_synic
,
1371 &vmstate_msr_hyperv_stimer
,
1372 &vmstate_msr_hyperv_reenlightenment
,
1376 &vmstate_msr_smi_count
,
1377 #ifdef TARGET_X86_64
1381 &vmstate_mcg_ext_ctl
,
1382 &vmstate_msr_intel_pt
,
1383 &vmstate_msr_virt_ssbd
,
1385 #ifndef TARGET_X86_64
1389 &vmstate_nested_state
,