1 #include "qemu/osdep.h"
3 #include "qemu/error-report.h"
4 #include "sysemu/kvm.h"
5 #include "sysemu/tcg.h"
8 #include "cpu-features.h"
9 #include "migration/cpu.h"
10 #include "target/arm/gtimer.h"
12 static bool vfp_needed(void *opaque
)
16 return (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)
17 ? cpu_isar_feature(aa64_fp_simd
, cpu
)
18 : cpu_isar_feature(aa32_vfp_simd
, cpu
));
21 static bool vfp_fpcr_fpsr_needed(void *opaque
)
24 * If either the FPCR or the FPSR include set bits that are not
25 * visible in the AArch32 FPSCR view of floating point control/status
26 * then we must send the FPCR and FPSR as two separate fields in the
27 * cpu/vfp/fpcr_fpsr subsection, and we will send a 0 for the old
28 * FPSCR field in cpu/vfp.
30 * If all the set bits are representable in an AArch32 FPSCR then we
31 * send that value as the cpu/vfp FPSCR field, and don't send the
32 * cpu/vfp/fpcr_fpsr subsection.
34 * On incoming migration, if the cpu/vfp FPSCR field is non-zero we
35 * use it, and if the fpcr_fpsr subsection is present we use that.
36 * (The subsection will never be present with a non-zero FPSCR field,
37 * and if FPSCR is zero and the subsection is not present that means
38 * that FPSCR/FPSR/FPCR are zero.)
40 * This preserves migration compatibility with older QEMU versions,
44 CPUARMState
*env
= &cpu
->env
;
46 return (vfp_get_fpcr(env
) & ~FPSCR_FPCR_MASK
) ||
47 (vfp_get_fpsr(env
) & ~FPSCR_FPSR_MASK
);
50 static int get_fpscr(QEMUFile
*f
, void *opaque
, size_t size
,
51 const VMStateField
*field
)
54 CPUARMState
*env
= &cpu
->env
;
55 uint32_t val
= qemu_get_be32(f
);
58 /* 0 means we might have the data in the fpcr_fpsr subsection */
59 vfp_set_fpscr(env
, val
);
64 static int put_fpscr(QEMUFile
*f
, void *opaque
, size_t size
,
65 const VMStateField
*field
, JSONWriter
*vmdesc
)
68 CPUARMState
*env
= &cpu
->env
;
69 uint32_t fpscr
= vfp_fpcr_fpsr_needed(opaque
) ? 0 : vfp_get_fpscr(env
);
71 qemu_put_be32(f
, fpscr
);
75 static const VMStateInfo vmstate_fpscr
= {
81 static int get_fpcr(QEMUFile
*f
, void *opaque
, size_t size
,
82 const VMStateField
*field
)
85 CPUARMState
*env
= &cpu
->env
;
86 uint64_t val
= qemu_get_be64(f
);
88 vfp_set_fpcr(env
, val
);
92 static int put_fpcr(QEMUFile
*f
, void *opaque
, size_t size
,
93 const VMStateField
*field
, JSONWriter
*vmdesc
)
96 CPUARMState
*env
= &cpu
->env
;
98 qemu_put_be64(f
, vfp_get_fpcr(env
));
102 static const VMStateInfo vmstate_fpcr
= {
108 static int get_fpsr(QEMUFile
*f
, void *opaque
, size_t size
,
109 const VMStateField
*field
)
111 ARMCPU
*cpu
= opaque
;
112 CPUARMState
*env
= &cpu
->env
;
113 uint64_t val
= qemu_get_be64(f
);
115 vfp_set_fpsr(env
, val
);
119 static int put_fpsr(QEMUFile
*f
, void *opaque
, size_t size
,
120 const VMStateField
*field
, JSONWriter
*vmdesc
)
122 ARMCPU
*cpu
= opaque
;
123 CPUARMState
*env
= &cpu
->env
;
125 qemu_put_be64(f
, vfp_get_fpsr(env
));
129 static const VMStateInfo vmstate_fpsr
= {
135 static const VMStateDescription vmstate_vfp_fpcr_fpsr
= {
136 .name
= "cpu/vfp/fpcr_fpsr",
138 .minimum_version_id
= 1,
139 .needed
= vfp_fpcr_fpsr_needed
,
140 .fields
= (const VMStateField
[]) {
144 .size
= sizeof(uint64_t),
145 .info
= &vmstate_fpcr
,
152 .size
= sizeof(uint64_t),
153 .info
= &vmstate_fpsr
,
157 VMSTATE_END_OF_LIST()
161 static const VMStateDescription vmstate_vfp
= {
164 .minimum_version_id
= 3,
165 .needed
= vfp_needed
,
166 .fields
= (const VMStateField
[]) {
167 /* For compatibility, store Qn out of Zn here. */
168 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[0].d
, ARMCPU
, 0, 2),
169 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[1].d
, ARMCPU
, 0, 2),
170 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[2].d
, ARMCPU
, 0, 2),
171 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[3].d
, ARMCPU
, 0, 2),
172 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[4].d
, ARMCPU
, 0, 2),
173 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[5].d
, ARMCPU
, 0, 2),
174 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[6].d
, ARMCPU
, 0, 2),
175 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[7].d
, ARMCPU
, 0, 2),
176 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[8].d
, ARMCPU
, 0, 2),
177 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[9].d
, ARMCPU
, 0, 2),
178 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[10].d
, ARMCPU
, 0, 2),
179 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[11].d
, ARMCPU
, 0, 2),
180 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[12].d
, ARMCPU
, 0, 2),
181 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[13].d
, ARMCPU
, 0, 2),
182 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[14].d
, ARMCPU
, 0, 2),
183 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[15].d
, ARMCPU
, 0, 2),
184 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[16].d
, ARMCPU
, 0, 2),
185 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[17].d
, ARMCPU
, 0, 2),
186 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[18].d
, ARMCPU
, 0, 2),
187 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[19].d
, ARMCPU
, 0, 2),
188 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[20].d
, ARMCPU
, 0, 2),
189 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[21].d
, ARMCPU
, 0, 2),
190 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[22].d
, ARMCPU
, 0, 2),
191 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[23].d
, ARMCPU
, 0, 2),
192 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[24].d
, ARMCPU
, 0, 2),
193 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[25].d
, ARMCPU
, 0, 2),
194 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[26].d
, ARMCPU
, 0, 2),
195 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[27].d
, ARMCPU
, 0, 2),
196 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[28].d
, ARMCPU
, 0, 2),
197 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[29].d
, ARMCPU
, 0, 2),
198 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[30].d
, ARMCPU
, 0, 2),
199 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[31].d
, ARMCPU
, 0, 2),
201 /* The xregs array is a little awkward because element 1 (FPSCR)
202 * requires a specific accessor, so we have to split it up in
205 VMSTATE_UINT32(env
.vfp
.xregs
[0], ARMCPU
),
206 VMSTATE_UINT32_SUB_ARRAY(env
.vfp
.xregs
, ARMCPU
, 2, 14),
210 .size
= sizeof(uint32_t),
211 .info
= &vmstate_fpscr
,
215 VMSTATE_END_OF_LIST()
217 .subsections
= (const VMStateDescription
* const []) {
218 &vmstate_vfp_fpcr_fpsr
,
223 static bool iwmmxt_needed(void *opaque
)
225 ARMCPU
*cpu
= opaque
;
226 CPUARMState
*env
= &cpu
->env
;
228 return arm_feature(env
, ARM_FEATURE_IWMMXT
);
231 static const VMStateDescription vmstate_iwmmxt
= {
232 .name
= "cpu/iwmmxt",
234 .minimum_version_id
= 1,
235 .needed
= iwmmxt_needed
,
236 .fields
= (const VMStateField
[]) {
237 VMSTATE_UINT64_ARRAY(env
.iwmmxt
.regs
, ARMCPU
, 16),
238 VMSTATE_UINT32_ARRAY(env
.iwmmxt
.cregs
, ARMCPU
, 16),
239 VMSTATE_END_OF_LIST()
243 #ifdef TARGET_AARCH64
244 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
245 * and ARMPredicateReg is actively empty. This triggers errors
246 * in the expansion of the VMSTATE macros.
249 static bool sve_needed(void *opaque
)
251 ARMCPU
*cpu
= opaque
;
253 return cpu_isar_feature(aa64_sve
, cpu
);
256 /* The first two words of each Zreg is stored in VFP state. */
257 static const VMStateDescription vmstate_zreg_hi_reg
= {
258 .name
= "cpu/sve/zreg_hi",
260 .minimum_version_id
= 1,
261 .fields
= (const VMStateField
[]) {
262 VMSTATE_UINT64_SUB_ARRAY(d
, ARMVectorReg
, 2, ARM_MAX_VQ
- 2),
263 VMSTATE_END_OF_LIST()
267 static const VMStateDescription vmstate_preg_reg
= {
268 .name
= "cpu/sve/preg",
270 .minimum_version_id
= 1,
271 .fields
= (const VMStateField
[]) {
272 VMSTATE_UINT64_ARRAY(p
, ARMPredicateReg
, 2 * ARM_MAX_VQ
/ 8),
273 VMSTATE_END_OF_LIST()
277 static const VMStateDescription vmstate_sve
= {
280 .minimum_version_id
= 1,
281 .needed
= sve_needed
,
282 .fields
= (const VMStateField
[]) {
283 VMSTATE_STRUCT_ARRAY(env
.vfp
.zregs
, ARMCPU
, 32, 0,
284 vmstate_zreg_hi_reg
, ARMVectorReg
),
285 VMSTATE_STRUCT_ARRAY(env
.vfp
.pregs
, ARMCPU
, 17, 0,
286 vmstate_preg_reg
, ARMPredicateReg
),
287 VMSTATE_END_OF_LIST()
291 static const VMStateDescription vmstate_vreg
= {
294 .minimum_version_id
= 1,
295 .fields
= (const VMStateField
[]) {
296 VMSTATE_UINT64_ARRAY(d
, ARMVectorReg
, ARM_MAX_VQ
* 2),
297 VMSTATE_END_OF_LIST()
301 static bool za_needed(void *opaque
)
303 ARMCPU
*cpu
= opaque
;
306 * When ZA storage is disabled, its contents are discarded.
307 * It will be zeroed when ZA storage is re-enabled.
309 return FIELD_EX64(cpu
->env
.svcr
, SVCR
, ZA
);
312 static const VMStateDescription vmstate_za
= {
315 .minimum_version_id
= 1,
317 .fields
= (const VMStateField
[]) {
318 VMSTATE_STRUCT_ARRAY(env
.zarray
, ARMCPU
, ARM_MAX_VQ
* 16, 0,
319 vmstate_vreg
, ARMVectorReg
),
320 VMSTATE_END_OF_LIST()
325 static bool serror_needed(void *opaque
)
327 ARMCPU
*cpu
= opaque
;
328 CPUARMState
*env
= &cpu
->env
;
330 return env
->serror
.pending
!= 0;
333 static const VMStateDescription vmstate_serror
= {
334 .name
= "cpu/serror",
336 .minimum_version_id
= 1,
337 .needed
= serror_needed
,
338 .fields
= (const VMStateField
[]) {
339 VMSTATE_UINT8(env
.serror
.pending
, ARMCPU
),
340 VMSTATE_UINT8(env
.serror
.has_esr
, ARMCPU
),
341 VMSTATE_UINT64(env
.serror
.esr
, ARMCPU
),
342 VMSTATE_END_OF_LIST()
346 static bool irq_line_state_needed(void *opaque
)
351 static const VMStateDescription vmstate_irq_line_state
= {
352 .name
= "cpu/irq-line-state",
354 .minimum_version_id
= 1,
355 .needed
= irq_line_state_needed
,
356 .fields
= (const VMStateField
[]) {
357 VMSTATE_UINT32(env
.irq_line_state
, ARMCPU
),
358 VMSTATE_END_OF_LIST()
362 static bool wfxt_timer_needed(void *opaque
)
364 ARMCPU
*cpu
= opaque
;
366 /* We'll only have the timer object if FEAT_WFxT is implemented */
367 return cpu
->wfxt_timer
;
370 static const VMStateDescription vmstate_wfxt_timer
= {
371 .name
= "cpu/wfxt-timer",
373 .minimum_version_id
= 1,
374 .needed
= wfxt_timer_needed
,
375 .fields
= (const VMStateField
[]) {
376 VMSTATE_TIMER_PTR(wfxt_timer
, ARMCPU
),
377 VMSTATE_END_OF_LIST()
381 static bool m_needed(void *opaque
)
383 ARMCPU
*cpu
= opaque
;
384 CPUARMState
*env
= &cpu
->env
;
386 return arm_feature(env
, ARM_FEATURE_M
);
389 static const VMStateDescription vmstate_m_faultmask_primask
= {
390 .name
= "cpu/m/faultmask-primask",
392 .minimum_version_id
= 1,
394 .fields
= (const VMStateField
[]) {
395 VMSTATE_UINT32(env
.v7m
.faultmask
[M_REG_NS
], ARMCPU
),
396 VMSTATE_UINT32(env
.v7m
.primask
[M_REG_NS
], ARMCPU
),
397 VMSTATE_END_OF_LIST()
401 /* CSSELR is in a subsection because we didn't implement it previously.
402 * Migration from an old implementation will leave it at zero, which
403 * is OK since the only CPUs in the old implementation make the
405 * Since there was no version of QEMU which implemented the CSSELR for
406 * just non-secure, we transfer both banks here rather than putting
407 * the secure banked version in the m-security subsection.
409 static bool csselr_vmstate_validate(void *opaque
, int version_id
)
411 ARMCPU
*cpu
= opaque
;
413 return cpu
->env
.v7m
.csselr
[M_REG_NS
] <= R_V7M_CSSELR_INDEX_MASK
414 && cpu
->env
.v7m
.csselr
[M_REG_S
] <= R_V7M_CSSELR_INDEX_MASK
;
417 static bool m_csselr_needed(void *opaque
)
419 ARMCPU
*cpu
= opaque
;
421 return !arm_v7m_csselr_razwi(cpu
);
424 static const VMStateDescription vmstate_m_csselr
= {
425 .name
= "cpu/m/csselr",
427 .minimum_version_id
= 1,
428 .needed
= m_csselr_needed
,
429 .fields
= (const VMStateField
[]) {
430 VMSTATE_UINT32_ARRAY(env
.v7m
.csselr
, ARMCPU
, M_REG_NUM_BANKS
),
431 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate
),
432 VMSTATE_END_OF_LIST()
436 static const VMStateDescription vmstate_m_scr
= {
439 .minimum_version_id
= 1,
441 .fields
= (const VMStateField
[]) {
442 VMSTATE_UINT32(env
.v7m
.scr
[M_REG_NS
], ARMCPU
),
443 VMSTATE_END_OF_LIST()
447 static const VMStateDescription vmstate_m_other_sp
= {
448 .name
= "cpu/m/other-sp",
450 .minimum_version_id
= 1,
452 .fields
= (const VMStateField
[]) {
453 VMSTATE_UINT32(env
.v7m
.other_sp
, ARMCPU
),
454 VMSTATE_END_OF_LIST()
458 static bool m_v8m_needed(void *opaque
)
460 ARMCPU
*cpu
= opaque
;
461 CPUARMState
*env
= &cpu
->env
;
463 return arm_feature(env
, ARM_FEATURE_M
) && arm_feature(env
, ARM_FEATURE_V8
);
466 static const VMStateDescription vmstate_m_v8m
= {
469 .minimum_version_id
= 1,
470 .needed
= m_v8m_needed
,
471 .fields
= (const VMStateField
[]) {
472 VMSTATE_UINT32_ARRAY(env
.v7m
.msplim
, ARMCPU
, M_REG_NUM_BANKS
),
473 VMSTATE_UINT32_ARRAY(env
.v7m
.psplim
, ARMCPU
, M_REG_NUM_BANKS
),
474 VMSTATE_END_OF_LIST()
478 static const VMStateDescription vmstate_m_fp
= {
481 .minimum_version_id
= 1,
482 .needed
= vfp_needed
,
483 .fields
= (const VMStateField
[]) {
484 VMSTATE_UINT32_ARRAY(env
.v7m
.fpcar
, ARMCPU
, M_REG_NUM_BANKS
),
485 VMSTATE_UINT32_ARRAY(env
.v7m
.fpccr
, ARMCPU
, M_REG_NUM_BANKS
),
486 VMSTATE_UINT32_ARRAY(env
.v7m
.fpdscr
, ARMCPU
, M_REG_NUM_BANKS
),
487 VMSTATE_UINT32_ARRAY(env
.v7m
.cpacr
, ARMCPU
, M_REG_NUM_BANKS
),
488 VMSTATE_UINT32(env
.v7m
.nsacr
, ARMCPU
),
489 VMSTATE_END_OF_LIST()
493 static bool mve_needed(void *opaque
)
495 ARMCPU
*cpu
= opaque
;
497 return cpu_isar_feature(aa32_mve
, cpu
);
500 static const VMStateDescription vmstate_m_mve
= {
503 .minimum_version_id
= 1,
504 .needed
= mve_needed
,
505 .fields
= (const VMStateField
[]) {
506 VMSTATE_UINT32(env
.v7m
.vpr
, ARMCPU
),
507 VMSTATE_UINT32(env
.v7m
.ltpsize
, ARMCPU
),
508 VMSTATE_END_OF_LIST()
512 static const VMStateDescription vmstate_m
= {
515 .minimum_version_id
= 4,
517 .fields
= (const VMStateField
[]) {
518 VMSTATE_UINT32(env
.v7m
.vecbase
[M_REG_NS
], ARMCPU
),
519 VMSTATE_UINT32(env
.v7m
.basepri
[M_REG_NS
], ARMCPU
),
520 VMSTATE_UINT32(env
.v7m
.control
[M_REG_NS
], ARMCPU
),
521 VMSTATE_UINT32(env
.v7m
.ccr
[M_REG_NS
], ARMCPU
),
522 VMSTATE_UINT32(env
.v7m
.cfsr
[M_REG_NS
], ARMCPU
),
523 VMSTATE_UINT32(env
.v7m
.hfsr
, ARMCPU
),
524 VMSTATE_UINT32(env
.v7m
.dfsr
, ARMCPU
),
525 VMSTATE_UINT32(env
.v7m
.mmfar
[M_REG_NS
], ARMCPU
),
526 VMSTATE_UINT32(env
.v7m
.bfar
, ARMCPU
),
527 VMSTATE_UINT32(env
.v7m
.mpu_ctrl
[M_REG_NS
], ARMCPU
),
528 VMSTATE_INT32(env
.v7m
.exception
, ARMCPU
),
529 VMSTATE_END_OF_LIST()
531 .subsections
= (const VMStateDescription
* const []) {
532 &vmstate_m_faultmask_primask
,
543 static bool thumb2ee_needed(void *opaque
)
545 ARMCPU
*cpu
= opaque
;
546 CPUARMState
*env
= &cpu
->env
;
548 return arm_feature(env
, ARM_FEATURE_THUMB2EE
);
551 static const VMStateDescription vmstate_thumb2ee
= {
552 .name
= "cpu/thumb2ee",
554 .minimum_version_id
= 1,
555 .needed
= thumb2ee_needed
,
556 .fields
= (const VMStateField
[]) {
557 VMSTATE_UINT32(env
.teecr
, ARMCPU
),
558 VMSTATE_UINT32(env
.teehbr
, ARMCPU
),
559 VMSTATE_END_OF_LIST()
563 static bool pmsav7_needed(void *opaque
)
565 ARMCPU
*cpu
= opaque
;
566 CPUARMState
*env
= &cpu
->env
;
568 return arm_feature(env
, ARM_FEATURE_PMSA
) &&
569 arm_feature(env
, ARM_FEATURE_V7
) &&
570 !arm_feature(env
, ARM_FEATURE_V8
);
573 static bool pmsav7_rgnr_vmstate_validate(void *opaque
, int version_id
)
575 ARMCPU
*cpu
= opaque
;
577 return cpu
->env
.pmsav7
.rnr
[M_REG_NS
] < cpu
->pmsav7_dregion
;
580 static const VMStateDescription vmstate_pmsav7
= {
581 .name
= "cpu/pmsav7",
583 .minimum_version_id
= 1,
584 .needed
= pmsav7_needed
,
585 .fields
= (const VMStateField
[]) {
586 VMSTATE_VARRAY_UINT32(env
.pmsav7
.drbar
, ARMCPU
, pmsav7_dregion
, 0,
587 vmstate_info_uint32
, uint32_t),
588 VMSTATE_VARRAY_UINT32(env
.pmsav7
.drsr
, ARMCPU
, pmsav7_dregion
, 0,
589 vmstate_info_uint32
, uint32_t),
590 VMSTATE_VARRAY_UINT32(env
.pmsav7
.dracr
, ARMCPU
, pmsav7_dregion
, 0,
591 vmstate_info_uint32
, uint32_t),
592 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate
),
593 VMSTATE_END_OF_LIST()
597 static bool pmsav7_rnr_needed(void *opaque
)
599 ARMCPU
*cpu
= opaque
;
600 CPUARMState
*env
= &cpu
->env
;
602 /* For R profile cores pmsav7.rnr is migrated via the cpreg
603 * "RGNR" definition in helper.h. For M profile we have to
604 * migrate it separately.
606 return arm_feature(env
, ARM_FEATURE_M
);
609 static const VMStateDescription vmstate_pmsav7_rnr
= {
610 .name
= "cpu/pmsav7-rnr",
612 .minimum_version_id
= 1,
613 .needed
= pmsav7_rnr_needed
,
614 .fields
= (const VMStateField
[]) {
615 VMSTATE_UINT32(env
.pmsav7
.rnr
[M_REG_NS
], ARMCPU
),
616 VMSTATE_END_OF_LIST()
620 static bool pmsav8_needed(void *opaque
)
622 ARMCPU
*cpu
= opaque
;
623 CPUARMState
*env
= &cpu
->env
;
625 return arm_feature(env
, ARM_FEATURE_PMSA
) &&
626 arm_feature(env
, ARM_FEATURE_V8
);
629 static bool pmsav8r_needed(void *opaque
)
631 ARMCPU
*cpu
= opaque
;
632 CPUARMState
*env
= &cpu
->env
;
634 return arm_feature(env
, ARM_FEATURE_PMSA
) &&
635 arm_feature(env
, ARM_FEATURE_V8
) &&
636 !arm_feature(env
, ARM_FEATURE_M
);
639 static const VMStateDescription vmstate_pmsav8r
= {
640 .name
= "cpu/pmsav8/pmsav8r",
642 .minimum_version_id
= 1,
643 .needed
= pmsav8r_needed
,
644 .fields
= (const VMStateField
[]) {
645 VMSTATE_VARRAY_UINT32(env
.pmsav8
.hprbar
, ARMCPU
,
646 pmsav8r_hdregion
, 0, vmstate_info_uint32
, uint32_t),
647 VMSTATE_VARRAY_UINT32(env
.pmsav8
.hprlar
, ARMCPU
,
648 pmsav8r_hdregion
, 0, vmstate_info_uint32
, uint32_t),
649 VMSTATE_END_OF_LIST()
653 static const VMStateDescription vmstate_pmsav8
= {
654 .name
= "cpu/pmsav8",
656 .minimum_version_id
= 1,
657 .needed
= pmsav8_needed
,
658 .fields
= (const VMStateField
[]) {
659 VMSTATE_VARRAY_UINT32(env
.pmsav8
.rbar
[M_REG_NS
], ARMCPU
, pmsav7_dregion
,
660 0, vmstate_info_uint32
, uint32_t),
661 VMSTATE_VARRAY_UINT32(env
.pmsav8
.rlar
[M_REG_NS
], ARMCPU
, pmsav7_dregion
,
662 0, vmstate_info_uint32
, uint32_t),
663 VMSTATE_UINT32(env
.pmsav8
.mair0
[M_REG_NS
], ARMCPU
),
664 VMSTATE_UINT32(env
.pmsav8
.mair1
[M_REG_NS
], ARMCPU
),
665 VMSTATE_END_OF_LIST()
667 .subsections
= (const VMStateDescription
* const []) {
673 static bool s_rnr_vmstate_validate(void *opaque
, int version_id
)
675 ARMCPU
*cpu
= opaque
;
677 return cpu
->env
.pmsav7
.rnr
[M_REG_S
] < cpu
->pmsav7_dregion
;
680 static bool sau_rnr_vmstate_validate(void *opaque
, int version_id
)
682 ARMCPU
*cpu
= opaque
;
684 return cpu
->env
.sau
.rnr
< cpu
->sau_sregion
;
687 static bool m_security_needed(void *opaque
)
689 ARMCPU
*cpu
= opaque
;
690 CPUARMState
*env
= &cpu
->env
;
692 return arm_feature(env
, ARM_FEATURE_M_SECURITY
);
695 static const VMStateDescription vmstate_m_security
= {
696 .name
= "cpu/m-security",
698 .minimum_version_id
= 1,
699 .needed
= m_security_needed
,
700 .fields
= (const VMStateField
[]) {
701 VMSTATE_UINT32(env
.v7m
.secure
, ARMCPU
),
702 VMSTATE_UINT32(env
.v7m
.other_ss_msp
, ARMCPU
),
703 VMSTATE_UINT32(env
.v7m
.other_ss_psp
, ARMCPU
),
704 VMSTATE_UINT32(env
.v7m
.basepri
[M_REG_S
], ARMCPU
),
705 VMSTATE_UINT32(env
.v7m
.primask
[M_REG_S
], ARMCPU
),
706 VMSTATE_UINT32(env
.v7m
.faultmask
[M_REG_S
], ARMCPU
),
707 VMSTATE_UINT32(env
.v7m
.control
[M_REG_S
], ARMCPU
),
708 VMSTATE_UINT32(env
.v7m
.vecbase
[M_REG_S
], ARMCPU
),
709 VMSTATE_UINT32(env
.pmsav8
.mair0
[M_REG_S
], ARMCPU
),
710 VMSTATE_UINT32(env
.pmsav8
.mair1
[M_REG_S
], ARMCPU
),
711 VMSTATE_VARRAY_UINT32(env
.pmsav8
.rbar
[M_REG_S
], ARMCPU
, pmsav7_dregion
,
712 0, vmstate_info_uint32
, uint32_t),
713 VMSTATE_VARRAY_UINT32(env
.pmsav8
.rlar
[M_REG_S
], ARMCPU
, pmsav7_dregion
,
714 0, vmstate_info_uint32
, uint32_t),
715 VMSTATE_UINT32(env
.pmsav7
.rnr
[M_REG_S
], ARMCPU
),
716 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate
),
717 VMSTATE_UINT32(env
.v7m
.mpu_ctrl
[M_REG_S
], ARMCPU
),
718 VMSTATE_UINT32(env
.v7m
.ccr
[M_REG_S
], ARMCPU
),
719 VMSTATE_UINT32(env
.v7m
.mmfar
[M_REG_S
], ARMCPU
),
720 VMSTATE_UINT32(env
.v7m
.cfsr
[M_REG_S
], ARMCPU
),
721 VMSTATE_UINT32(env
.v7m
.sfsr
, ARMCPU
),
722 VMSTATE_UINT32(env
.v7m
.sfar
, ARMCPU
),
723 VMSTATE_VARRAY_UINT32(env
.sau
.rbar
, ARMCPU
, sau_sregion
, 0,
724 vmstate_info_uint32
, uint32_t),
725 VMSTATE_VARRAY_UINT32(env
.sau
.rlar
, ARMCPU
, sau_sregion
, 0,
726 vmstate_info_uint32
, uint32_t),
727 VMSTATE_UINT32(env
.sau
.rnr
, ARMCPU
),
728 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate
),
729 VMSTATE_UINT32(env
.sau
.ctrl
, ARMCPU
),
730 VMSTATE_UINT32(env
.v7m
.scr
[M_REG_S
], ARMCPU
),
731 /* AIRCR is not secure-only, but our implementation is R/O if the
732 * security extension is unimplemented, so we migrate it here.
734 VMSTATE_UINT32(env
.v7m
.aircr
, ARMCPU
),
735 VMSTATE_END_OF_LIST()
739 static int get_cpsr(QEMUFile
*f
, void *opaque
, size_t size
,
740 const VMStateField
*field
)
742 ARMCPU
*cpu
= opaque
;
743 CPUARMState
*env
= &cpu
->env
;
744 uint32_t val
= qemu_get_be32(f
);
746 if (arm_feature(env
, ARM_FEATURE_M
)) {
747 if (val
& XPSR_EXCP
) {
748 /* This is a CPSR format value from an older QEMU. (We can tell
749 * because values transferred in XPSR format always have zero
750 * for the EXCP field, and CPSR format will always have bit 4
751 * set in CPSR_M.) Rearrange it into XPSR format. The significant
752 * differences are that the T bit is not in the same place, the
753 * primask/faultmask info may be in the CPSR I and F bits, and
754 * we do not want the mode bits.
755 * We know that this cleanup happened before v8M, so there
756 * is no complication with banked primask/faultmask.
758 uint32_t newval
= val
;
760 assert(!arm_feature(env
, ARM_FEATURE_M_SECURITY
));
762 newval
&= (CPSR_NZCV
| CPSR_Q
| CPSR_IT
| CPSR_GE
);
766 /* If the I or F bits are set then this is a migration from
767 * an old QEMU which still stored the M profile FAULTMASK
768 * and PRIMASK in env->daif. For a new QEMU, the data is
769 * transferred using the vmstate_m_faultmask_primask subsection.
772 env
->v7m
.faultmask
[M_REG_NS
] = 1;
775 env
->v7m
.primask
[M_REG_NS
] = 1;
779 /* Ignore the low bits, they are handled by vmstate_m. */
780 xpsr_write(env
, val
, ~XPSR_EXCP
);
784 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
787 pstate_write(env
, val
);
791 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
795 static int put_cpsr(QEMUFile
*f
, void *opaque
, size_t size
,
796 const VMStateField
*field
, JSONWriter
*vmdesc
)
798 ARMCPU
*cpu
= opaque
;
799 CPUARMState
*env
= &cpu
->env
;
802 if (arm_feature(env
, ARM_FEATURE_M
)) {
803 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
804 val
= xpsr_read(env
) & ~XPSR_EXCP
;
805 } else if (is_a64(env
)) {
806 val
= pstate_read(env
);
808 val
= cpsr_read(env
);
811 qemu_put_be32(f
, val
);
815 static const VMStateInfo vmstate_cpsr
= {
821 static int get_power(QEMUFile
*f
, void *opaque
, size_t size
,
822 const VMStateField
*field
)
824 ARMCPU
*cpu
= opaque
;
825 bool powered_off
= qemu_get_byte(f
);
826 cpu
->power_state
= powered_off
? PSCI_OFF
: PSCI_ON
;
830 static int put_power(QEMUFile
*f
, void *opaque
, size_t size
,
831 const VMStateField
*field
, JSONWriter
*vmdesc
)
833 ARMCPU
*cpu
= opaque
;
835 /* Migration should never happen while we transition power states */
837 if (cpu
->power_state
== PSCI_ON
||
838 cpu
->power_state
== PSCI_OFF
) {
839 bool powered_off
= (cpu
->power_state
== PSCI_OFF
) ? true : false;
840 qemu_put_byte(f
, powered_off
);
847 static const VMStateInfo vmstate_powered_off
= {
848 .name
= "powered_off",
853 static int cpu_pre_save(void *opaque
)
855 ARMCPU
*cpu
= opaque
;
857 if (!kvm_enabled()) {
858 pmu_op_start(&cpu
->env
);
862 if (!write_kvmstate_to_list(cpu
)) {
863 /* This should never fail */
864 g_assert_not_reached();
868 * kvm_arm_cpu_pre_save() must be called after
869 * write_kvmstate_to_list()
871 kvm_arm_cpu_pre_save(cpu
);
873 if (!write_cpustate_to_list(cpu
, false)) {
874 /* This should never fail. */
875 g_assert_not_reached();
879 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
880 memcpy(cpu
->cpreg_vmstate_indexes
, cpu
->cpreg_indexes
,
881 cpu
->cpreg_array_len
* sizeof(uint64_t));
882 memcpy(cpu
->cpreg_vmstate_values
, cpu
->cpreg_values
,
883 cpu
->cpreg_array_len
* sizeof(uint64_t));
888 static int cpu_post_save(void *opaque
)
890 ARMCPU
*cpu
= opaque
;
892 if (!kvm_enabled()) {
893 pmu_op_finish(&cpu
->env
);
899 static int cpu_pre_load(void *opaque
)
901 ARMCPU
*cpu
= opaque
;
902 CPUARMState
*env
= &cpu
->env
;
905 * In an inbound migration where on the source FPSCR/FPSR/FPCR are 0,
906 * there will be no fpcr_fpsr subsection so we won't call vfp_set_fpcr()
907 * and vfp_set_fpsr() from get_fpcr() and get_fpsr(); also the get_fpscr()
908 * function will not call vfp_set_fpscr() because it will see a 0 in the
909 * inbound data. Ensure that in this case we have a correctly set up
910 * zero FPSCR/FPCR/FPSR.
912 * This is not strictly needed because FPSCR is zero out of reset, but
913 * it avoids the possibility of future confusing migration bugs if some
914 * future architecture change makes the reset value non-zero.
916 vfp_set_fpscr(env
, 0);
919 * Pre-initialize irq_line_state to a value that's never valid as
920 * real data, so cpu_post_load() can tell whether we've seen the
921 * irq-line-state subsection in the incoming migration state.
923 env
->irq_line_state
= UINT32_MAX
;
925 if (!kvm_enabled()) {
932 static int cpu_post_load(void *opaque
, int version_id
)
934 ARMCPU
*cpu
= opaque
;
935 CPUARMState
*env
= &cpu
->env
;
939 * Handle migration compatibility from old QEMU which didn't
940 * send the irq-line-state subsection. A QEMU without it did not
941 * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
942 * so for TCG the line state matches the bits set in cs->interrupt_request.
943 * For KVM the line state is not stored in cs->interrupt_request
944 * and so this will leave irq_line_state as 0, but this is OK because
945 * we only need to care about it for TCG.
947 if (env
->irq_line_state
== UINT32_MAX
) {
948 CPUState
*cs
= CPU(cpu
);
950 env
->irq_line_state
= cs
->interrupt_request
&
951 (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_FIQ
|
952 CPU_INTERRUPT_VIRQ
| CPU_INTERRUPT_VFIQ
);
955 /* Update the values list from the incoming migration data.
956 * Anything in the incoming data which we don't know about is
957 * a migration failure; anything we know about but the incoming
958 * data doesn't specify retains its current (reset) value.
959 * The indexes list remains untouched -- we only inspect the
960 * incoming migration index list so we can match the values array
961 * entries with the right slots in our own values array.
964 for (i
= 0, v
= 0; i
< cpu
->cpreg_array_len
965 && v
< cpu
->cpreg_vmstate_array_len
; i
++) {
966 if (cpu
->cpreg_vmstate_indexes
[v
] > cpu
->cpreg_indexes
[i
]) {
967 /* register in our list but not incoming : skip it */
970 if (cpu
->cpreg_vmstate_indexes
[v
] < cpu
->cpreg_indexes
[i
]) {
971 /* register in their list but not ours: fail migration */
974 /* matching register, copy the value over */
975 cpu
->cpreg_values
[i
] = cpu
->cpreg_vmstate_values
[v
];
980 if (!write_list_to_kvmstate(cpu
, KVM_PUT_FULL_STATE
)) {
983 /* Note that it's OK for the TCG side not to know about
984 * every register in the list; KVM is authoritative if
987 write_list_to_cpustate(cpu
);
988 kvm_arm_cpu_post_load(cpu
);
990 if (!write_list_to_cpustate(cpu
)) {
996 * Misaligned thumb pc is architecturally impossible. Fail the
997 * incoming migration. For TCG it would trigger the assert in
998 * thumb_tr_translate_insn().
1000 if (!is_a64(env
) && env
->thumb
&& (env
->regs
[15] & 1)) {
1004 if (tcg_enabled()) {
1005 hw_breakpoint_update_all(cpu
);
1006 hw_watchpoint_update_all(cpu
);
1010 * TCG gen_update_fp_context() relies on the invariant that
1011 * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
1012 * forbid bogus incoming data with some other value.
1014 if (arm_feature(env
, ARM_FEATURE_M
) && cpu_isar_feature(aa32_lob
, cpu
)) {
1015 if (extract32(env
->v7m
.fpdscr
[M_REG_NS
],
1016 FPCR_LTPSIZE_SHIFT
, FPCR_LTPSIZE_LENGTH
) != 4 ||
1017 extract32(env
->v7m
.fpdscr
[M_REG_S
],
1018 FPCR_LTPSIZE_SHIFT
, FPCR_LTPSIZE_LENGTH
) != 4) {
1023 if (!kvm_enabled()) {
1027 if (tcg_enabled()) {
1028 arm_rebuild_hflags(env
);
1034 const VMStateDescription vmstate_arm_cpu
= {
1037 .minimum_version_id
= 22,
1038 .pre_save
= cpu_pre_save
,
1039 .post_save
= cpu_post_save
,
1040 .pre_load
= cpu_pre_load
,
1041 .post_load
= cpu_post_load
,
1042 .fields
= (const VMStateField
[]) {
1043 VMSTATE_UINT32_ARRAY(env
.regs
, ARMCPU
, 16),
1044 VMSTATE_UINT64_ARRAY(env
.xregs
, ARMCPU
, 32),
1045 VMSTATE_UINT64(env
.pc
, ARMCPU
),
1049 .size
= sizeof(uint32_t),
1050 .info
= &vmstate_cpsr
,
1051 .flags
= VMS_SINGLE
,
1054 VMSTATE_UINT32(env
.spsr
, ARMCPU
),
1055 VMSTATE_UINT64_ARRAY(env
.banked_spsr
, ARMCPU
, 8),
1056 VMSTATE_UINT32_ARRAY(env
.banked_r13
, ARMCPU
, 8),
1057 VMSTATE_UINT32_ARRAY(env
.banked_r14
, ARMCPU
, 8),
1058 VMSTATE_UINT32_ARRAY(env
.usr_regs
, ARMCPU
, 5),
1059 VMSTATE_UINT32_ARRAY(env
.fiq_regs
, ARMCPU
, 5),
1060 VMSTATE_UINT64_ARRAY(env
.elr_el
, ARMCPU
, 4),
1061 VMSTATE_UINT64_ARRAY(env
.sp_el
, ARMCPU
, 4),
1062 /* The length-check must come before the arrays to avoid
1063 * incoming data possibly overflowing the array.
1065 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len
, ARMCPU
),
1066 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes
, ARMCPU
,
1067 cpreg_vmstate_array_len
,
1068 0, vmstate_info_uint64
, uint64_t),
1069 VMSTATE_VARRAY_INT32(cpreg_vmstate_values
, ARMCPU
,
1070 cpreg_vmstate_array_len
,
1071 0, vmstate_info_uint64
, uint64_t),
1072 VMSTATE_UINT64(env
.exclusive_addr
, ARMCPU
),
1073 VMSTATE_UINT64(env
.exclusive_val
, ARMCPU
),
1074 VMSTATE_UINT64(env
.exclusive_high
, ARMCPU
),
1075 VMSTATE_UNUSED(sizeof(uint64_t)),
1076 VMSTATE_UINT32(env
.exception
.syndrome
, ARMCPU
),
1077 VMSTATE_UINT32(env
.exception
.fsr
, ARMCPU
),
1078 VMSTATE_UINT64(env
.exception
.vaddress
, ARMCPU
),
1079 VMSTATE_TIMER_PTR(gt_timer
[GTIMER_PHYS
], ARMCPU
),
1080 VMSTATE_TIMER_PTR(gt_timer
[GTIMER_VIRT
], ARMCPU
),
1082 .name
= "power_state",
1084 .size
= sizeof(bool),
1085 .info
= &vmstate_powered_off
,
1086 .flags
= VMS_SINGLE
,
1089 VMSTATE_END_OF_LIST()
1091 .subsections
= (const VMStateDescription
* const []) {
1096 /* pmsav7_rnr must come before pmsav7 so that we have the
1097 * region number before we test it in the VMSTATE_VALIDATE
1098 * in vmstate_pmsav7.
1100 &vmstate_pmsav7_rnr
,
1103 &vmstate_m_security
,
1104 #ifdef TARGET_AARCH64
1109 &vmstate_irq_line_state
,
1110 &vmstate_wfxt_timer
,