1 #include "qemu/osdep.h"
3 #include "qemu/error-report.h"
4 #include "sysemu/kvm.h"
5 #include "sysemu/tcg.h"
8 #include "cpu-features.h"
9 #include "migration/cpu.h"
10 #include "target/arm/gtimer.h"
12 static bool vfp_needed(void *opaque
)
16 return (arm_feature(&cpu
->env
, ARM_FEATURE_AARCH64
)
17 ? cpu_isar_feature(aa64_fp_simd
, cpu
)
18 : cpu_isar_feature(aa32_vfp_simd
, cpu
));
21 static int get_fpscr(QEMUFile
*f
, void *opaque
, size_t size
,
22 const VMStateField
*field
)
25 CPUARMState
*env
= &cpu
->env
;
26 uint32_t val
= qemu_get_be32(f
);
28 vfp_set_fpscr(env
, val
);
32 static int put_fpscr(QEMUFile
*f
, void *opaque
, size_t size
,
33 const VMStateField
*field
, JSONWriter
*vmdesc
)
36 CPUARMState
*env
= &cpu
->env
;
38 qemu_put_be32(f
, vfp_get_fpscr(env
));
42 static const VMStateInfo vmstate_fpscr
= {
48 static const VMStateDescription vmstate_vfp
= {
51 .minimum_version_id
= 3,
53 .fields
= (const VMStateField
[]) {
54 /* For compatibility, store Qn out of Zn here. */
55 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[0].d
, ARMCPU
, 0, 2),
56 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[1].d
, ARMCPU
, 0, 2),
57 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[2].d
, ARMCPU
, 0, 2),
58 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[3].d
, ARMCPU
, 0, 2),
59 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[4].d
, ARMCPU
, 0, 2),
60 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[5].d
, ARMCPU
, 0, 2),
61 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[6].d
, ARMCPU
, 0, 2),
62 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[7].d
, ARMCPU
, 0, 2),
63 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[8].d
, ARMCPU
, 0, 2),
64 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[9].d
, ARMCPU
, 0, 2),
65 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[10].d
, ARMCPU
, 0, 2),
66 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[11].d
, ARMCPU
, 0, 2),
67 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[12].d
, ARMCPU
, 0, 2),
68 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[13].d
, ARMCPU
, 0, 2),
69 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[14].d
, ARMCPU
, 0, 2),
70 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[15].d
, ARMCPU
, 0, 2),
71 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[16].d
, ARMCPU
, 0, 2),
72 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[17].d
, ARMCPU
, 0, 2),
73 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[18].d
, ARMCPU
, 0, 2),
74 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[19].d
, ARMCPU
, 0, 2),
75 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[20].d
, ARMCPU
, 0, 2),
76 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[21].d
, ARMCPU
, 0, 2),
77 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[22].d
, ARMCPU
, 0, 2),
78 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[23].d
, ARMCPU
, 0, 2),
79 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[24].d
, ARMCPU
, 0, 2),
80 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[25].d
, ARMCPU
, 0, 2),
81 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[26].d
, ARMCPU
, 0, 2),
82 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[27].d
, ARMCPU
, 0, 2),
83 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[28].d
, ARMCPU
, 0, 2),
84 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[29].d
, ARMCPU
, 0, 2),
85 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[30].d
, ARMCPU
, 0, 2),
86 VMSTATE_UINT64_SUB_ARRAY(env
.vfp
.zregs
[31].d
, ARMCPU
, 0, 2),
88 /* The xregs array is a little awkward because element 1 (FPSCR)
89 * requires a specific accessor, so we have to split it up in
92 VMSTATE_UINT32(env
.vfp
.xregs
[0], ARMCPU
),
93 VMSTATE_UINT32_SUB_ARRAY(env
.vfp
.xregs
, ARMCPU
, 2, 14),
97 .size
= sizeof(uint32_t),
98 .info
= &vmstate_fpscr
,
102 VMSTATE_END_OF_LIST()
106 static bool iwmmxt_needed(void *opaque
)
108 ARMCPU
*cpu
= opaque
;
109 CPUARMState
*env
= &cpu
->env
;
111 return arm_feature(env
, ARM_FEATURE_IWMMXT
);
114 static const VMStateDescription vmstate_iwmmxt
= {
115 .name
= "cpu/iwmmxt",
117 .minimum_version_id
= 1,
118 .needed
= iwmmxt_needed
,
119 .fields
= (const VMStateField
[]) {
120 VMSTATE_UINT64_ARRAY(env
.iwmmxt
.regs
, ARMCPU
, 16),
121 VMSTATE_UINT32_ARRAY(env
.iwmmxt
.cregs
, ARMCPU
, 16),
122 VMSTATE_END_OF_LIST()
126 #ifdef TARGET_AARCH64
127 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
128 * and ARMPredicateReg is actively empty. This triggers errors
129 * in the expansion of the VMSTATE macros.
132 static bool sve_needed(void *opaque
)
134 ARMCPU
*cpu
= opaque
;
136 return cpu_isar_feature(aa64_sve
, cpu
);
139 /* The first two words of each Zreg is stored in VFP state. */
140 static const VMStateDescription vmstate_zreg_hi_reg
= {
141 .name
= "cpu/sve/zreg_hi",
143 .minimum_version_id
= 1,
144 .fields
= (const VMStateField
[]) {
145 VMSTATE_UINT64_SUB_ARRAY(d
, ARMVectorReg
, 2, ARM_MAX_VQ
- 2),
146 VMSTATE_END_OF_LIST()
150 static const VMStateDescription vmstate_preg_reg
= {
151 .name
= "cpu/sve/preg",
153 .minimum_version_id
= 1,
154 .fields
= (const VMStateField
[]) {
155 VMSTATE_UINT64_ARRAY(p
, ARMPredicateReg
, 2 * ARM_MAX_VQ
/ 8),
156 VMSTATE_END_OF_LIST()
160 static const VMStateDescription vmstate_sve
= {
163 .minimum_version_id
= 1,
164 .needed
= sve_needed
,
165 .fields
= (const VMStateField
[]) {
166 VMSTATE_STRUCT_ARRAY(env
.vfp
.zregs
, ARMCPU
, 32, 0,
167 vmstate_zreg_hi_reg
, ARMVectorReg
),
168 VMSTATE_STRUCT_ARRAY(env
.vfp
.pregs
, ARMCPU
, 17, 0,
169 vmstate_preg_reg
, ARMPredicateReg
),
170 VMSTATE_END_OF_LIST()
174 static const VMStateDescription vmstate_vreg
= {
177 .minimum_version_id
= 1,
178 .fields
= (const VMStateField
[]) {
179 VMSTATE_UINT64_ARRAY(d
, ARMVectorReg
, ARM_MAX_VQ
* 2),
180 VMSTATE_END_OF_LIST()
184 static bool za_needed(void *opaque
)
186 ARMCPU
*cpu
= opaque
;
189 * When ZA storage is disabled, its contents are discarded.
190 * It will be zeroed when ZA storage is re-enabled.
192 return FIELD_EX64(cpu
->env
.svcr
, SVCR
, ZA
);
195 static const VMStateDescription vmstate_za
= {
198 .minimum_version_id
= 1,
200 .fields
= (const VMStateField
[]) {
201 VMSTATE_STRUCT_ARRAY(env
.zarray
, ARMCPU
, ARM_MAX_VQ
* 16, 0,
202 vmstate_vreg
, ARMVectorReg
),
203 VMSTATE_END_OF_LIST()
208 static bool serror_needed(void *opaque
)
210 ARMCPU
*cpu
= opaque
;
211 CPUARMState
*env
= &cpu
->env
;
213 return env
->serror
.pending
!= 0;
216 static const VMStateDescription vmstate_serror
= {
217 .name
= "cpu/serror",
219 .minimum_version_id
= 1,
220 .needed
= serror_needed
,
221 .fields
= (const VMStateField
[]) {
222 VMSTATE_UINT8(env
.serror
.pending
, ARMCPU
),
223 VMSTATE_UINT8(env
.serror
.has_esr
, ARMCPU
),
224 VMSTATE_UINT64(env
.serror
.esr
, ARMCPU
),
225 VMSTATE_END_OF_LIST()
229 static bool irq_line_state_needed(void *opaque
)
234 static const VMStateDescription vmstate_irq_line_state
= {
235 .name
= "cpu/irq-line-state",
237 .minimum_version_id
= 1,
238 .needed
= irq_line_state_needed
,
239 .fields
= (const VMStateField
[]) {
240 VMSTATE_UINT32(env
.irq_line_state
, ARMCPU
),
241 VMSTATE_END_OF_LIST()
245 static bool wfxt_timer_needed(void *opaque
)
247 ARMCPU
*cpu
= opaque
;
249 /* We'll only have the timer object if FEAT_WFxT is implemented */
250 return cpu
->wfxt_timer
;
253 static const VMStateDescription vmstate_wfxt_timer
= {
254 .name
= "cpu/wfxt-timer",
256 .minimum_version_id
= 1,
257 .needed
= wfxt_timer_needed
,
258 .fields
= (const VMStateField
[]) {
259 VMSTATE_TIMER_PTR(wfxt_timer
, ARMCPU
),
260 VMSTATE_END_OF_LIST()
264 static bool m_needed(void *opaque
)
266 ARMCPU
*cpu
= opaque
;
267 CPUARMState
*env
= &cpu
->env
;
269 return arm_feature(env
, ARM_FEATURE_M
);
272 static const VMStateDescription vmstate_m_faultmask_primask
= {
273 .name
= "cpu/m/faultmask-primask",
275 .minimum_version_id
= 1,
277 .fields
= (const VMStateField
[]) {
278 VMSTATE_UINT32(env
.v7m
.faultmask
[M_REG_NS
], ARMCPU
),
279 VMSTATE_UINT32(env
.v7m
.primask
[M_REG_NS
], ARMCPU
),
280 VMSTATE_END_OF_LIST()
284 /* CSSELR is in a subsection because we didn't implement it previously.
285 * Migration from an old implementation will leave it at zero, which
286 * is OK since the only CPUs in the old implementation make the
288 * Since there was no version of QEMU which implemented the CSSELR for
289 * just non-secure, we transfer both banks here rather than putting
290 * the secure banked version in the m-security subsection.
292 static bool csselr_vmstate_validate(void *opaque
, int version_id
)
294 ARMCPU
*cpu
= opaque
;
296 return cpu
->env
.v7m
.csselr
[M_REG_NS
] <= R_V7M_CSSELR_INDEX_MASK
297 && cpu
->env
.v7m
.csselr
[M_REG_S
] <= R_V7M_CSSELR_INDEX_MASK
;
300 static bool m_csselr_needed(void *opaque
)
302 ARMCPU
*cpu
= opaque
;
304 return !arm_v7m_csselr_razwi(cpu
);
307 static const VMStateDescription vmstate_m_csselr
= {
308 .name
= "cpu/m/csselr",
310 .minimum_version_id
= 1,
311 .needed
= m_csselr_needed
,
312 .fields
= (const VMStateField
[]) {
313 VMSTATE_UINT32_ARRAY(env
.v7m
.csselr
, ARMCPU
, M_REG_NUM_BANKS
),
314 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate
),
315 VMSTATE_END_OF_LIST()
319 static const VMStateDescription vmstate_m_scr
= {
322 .minimum_version_id
= 1,
324 .fields
= (const VMStateField
[]) {
325 VMSTATE_UINT32(env
.v7m
.scr
[M_REG_NS
], ARMCPU
),
326 VMSTATE_END_OF_LIST()
330 static const VMStateDescription vmstate_m_other_sp
= {
331 .name
= "cpu/m/other-sp",
333 .minimum_version_id
= 1,
335 .fields
= (const VMStateField
[]) {
336 VMSTATE_UINT32(env
.v7m
.other_sp
, ARMCPU
),
337 VMSTATE_END_OF_LIST()
341 static bool m_v8m_needed(void *opaque
)
343 ARMCPU
*cpu
= opaque
;
344 CPUARMState
*env
= &cpu
->env
;
346 return arm_feature(env
, ARM_FEATURE_M
) && arm_feature(env
, ARM_FEATURE_V8
);
349 static const VMStateDescription vmstate_m_v8m
= {
352 .minimum_version_id
= 1,
353 .needed
= m_v8m_needed
,
354 .fields
= (const VMStateField
[]) {
355 VMSTATE_UINT32_ARRAY(env
.v7m
.msplim
, ARMCPU
, M_REG_NUM_BANKS
),
356 VMSTATE_UINT32_ARRAY(env
.v7m
.psplim
, ARMCPU
, M_REG_NUM_BANKS
),
357 VMSTATE_END_OF_LIST()
361 static const VMStateDescription vmstate_m_fp
= {
364 .minimum_version_id
= 1,
365 .needed
= vfp_needed
,
366 .fields
= (const VMStateField
[]) {
367 VMSTATE_UINT32_ARRAY(env
.v7m
.fpcar
, ARMCPU
, M_REG_NUM_BANKS
),
368 VMSTATE_UINT32_ARRAY(env
.v7m
.fpccr
, ARMCPU
, M_REG_NUM_BANKS
),
369 VMSTATE_UINT32_ARRAY(env
.v7m
.fpdscr
, ARMCPU
, M_REG_NUM_BANKS
),
370 VMSTATE_UINT32_ARRAY(env
.v7m
.cpacr
, ARMCPU
, M_REG_NUM_BANKS
),
371 VMSTATE_UINT32(env
.v7m
.nsacr
, ARMCPU
),
372 VMSTATE_END_OF_LIST()
376 static bool mve_needed(void *opaque
)
378 ARMCPU
*cpu
= opaque
;
380 return cpu_isar_feature(aa32_mve
, cpu
);
383 static const VMStateDescription vmstate_m_mve
= {
386 .minimum_version_id
= 1,
387 .needed
= mve_needed
,
388 .fields
= (const VMStateField
[]) {
389 VMSTATE_UINT32(env
.v7m
.vpr
, ARMCPU
),
390 VMSTATE_UINT32(env
.v7m
.ltpsize
, ARMCPU
),
391 VMSTATE_END_OF_LIST()
395 static const VMStateDescription vmstate_m
= {
398 .minimum_version_id
= 4,
400 .fields
= (const VMStateField
[]) {
401 VMSTATE_UINT32(env
.v7m
.vecbase
[M_REG_NS
], ARMCPU
),
402 VMSTATE_UINT32(env
.v7m
.basepri
[M_REG_NS
], ARMCPU
),
403 VMSTATE_UINT32(env
.v7m
.control
[M_REG_NS
], ARMCPU
),
404 VMSTATE_UINT32(env
.v7m
.ccr
[M_REG_NS
], ARMCPU
),
405 VMSTATE_UINT32(env
.v7m
.cfsr
[M_REG_NS
], ARMCPU
),
406 VMSTATE_UINT32(env
.v7m
.hfsr
, ARMCPU
),
407 VMSTATE_UINT32(env
.v7m
.dfsr
, ARMCPU
),
408 VMSTATE_UINT32(env
.v7m
.mmfar
[M_REG_NS
], ARMCPU
),
409 VMSTATE_UINT32(env
.v7m
.bfar
, ARMCPU
),
410 VMSTATE_UINT32(env
.v7m
.mpu_ctrl
[M_REG_NS
], ARMCPU
),
411 VMSTATE_INT32(env
.v7m
.exception
, ARMCPU
),
412 VMSTATE_END_OF_LIST()
414 .subsections
= (const VMStateDescription
* const []) {
415 &vmstate_m_faultmask_primask
,
426 static bool thumb2ee_needed(void *opaque
)
428 ARMCPU
*cpu
= opaque
;
429 CPUARMState
*env
= &cpu
->env
;
431 return arm_feature(env
, ARM_FEATURE_THUMB2EE
);
434 static const VMStateDescription vmstate_thumb2ee
= {
435 .name
= "cpu/thumb2ee",
437 .minimum_version_id
= 1,
438 .needed
= thumb2ee_needed
,
439 .fields
= (const VMStateField
[]) {
440 VMSTATE_UINT32(env
.teecr
, ARMCPU
),
441 VMSTATE_UINT32(env
.teehbr
, ARMCPU
),
442 VMSTATE_END_OF_LIST()
446 static bool pmsav7_needed(void *opaque
)
448 ARMCPU
*cpu
= opaque
;
449 CPUARMState
*env
= &cpu
->env
;
451 return arm_feature(env
, ARM_FEATURE_PMSA
) &&
452 arm_feature(env
, ARM_FEATURE_V7
) &&
453 !arm_feature(env
, ARM_FEATURE_V8
);
456 static bool pmsav7_rgnr_vmstate_validate(void *opaque
, int version_id
)
458 ARMCPU
*cpu
= opaque
;
460 return cpu
->env
.pmsav7
.rnr
[M_REG_NS
] < cpu
->pmsav7_dregion
;
463 static const VMStateDescription vmstate_pmsav7
= {
464 .name
= "cpu/pmsav7",
466 .minimum_version_id
= 1,
467 .needed
= pmsav7_needed
,
468 .fields
= (const VMStateField
[]) {
469 VMSTATE_VARRAY_UINT32(env
.pmsav7
.drbar
, ARMCPU
, pmsav7_dregion
, 0,
470 vmstate_info_uint32
, uint32_t),
471 VMSTATE_VARRAY_UINT32(env
.pmsav7
.drsr
, ARMCPU
, pmsav7_dregion
, 0,
472 vmstate_info_uint32
, uint32_t),
473 VMSTATE_VARRAY_UINT32(env
.pmsav7
.dracr
, ARMCPU
, pmsav7_dregion
, 0,
474 vmstate_info_uint32
, uint32_t),
475 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate
),
476 VMSTATE_END_OF_LIST()
480 static bool pmsav7_rnr_needed(void *opaque
)
482 ARMCPU
*cpu
= opaque
;
483 CPUARMState
*env
= &cpu
->env
;
485 /* For R profile cores pmsav7.rnr is migrated via the cpreg
486 * "RGNR" definition in helper.h. For M profile we have to
487 * migrate it separately.
489 return arm_feature(env
, ARM_FEATURE_M
);
492 static const VMStateDescription vmstate_pmsav7_rnr
= {
493 .name
= "cpu/pmsav7-rnr",
495 .minimum_version_id
= 1,
496 .needed
= pmsav7_rnr_needed
,
497 .fields
= (const VMStateField
[]) {
498 VMSTATE_UINT32(env
.pmsav7
.rnr
[M_REG_NS
], ARMCPU
),
499 VMSTATE_END_OF_LIST()
503 static bool pmsav8_needed(void *opaque
)
505 ARMCPU
*cpu
= opaque
;
506 CPUARMState
*env
= &cpu
->env
;
508 return arm_feature(env
, ARM_FEATURE_PMSA
) &&
509 arm_feature(env
, ARM_FEATURE_V8
);
512 static bool pmsav8r_needed(void *opaque
)
514 ARMCPU
*cpu
= opaque
;
515 CPUARMState
*env
= &cpu
->env
;
517 return arm_feature(env
, ARM_FEATURE_PMSA
) &&
518 arm_feature(env
, ARM_FEATURE_V8
) &&
519 !arm_feature(env
, ARM_FEATURE_M
);
522 static const VMStateDescription vmstate_pmsav8r
= {
523 .name
= "cpu/pmsav8/pmsav8r",
525 .minimum_version_id
= 1,
526 .needed
= pmsav8r_needed
,
527 .fields
= (const VMStateField
[]) {
528 VMSTATE_VARRAY_UINT32(env
.pmsav8
.hprbar
, ARMCPU
,
529 pmsav8r_hdregion
, 0, vmstate_info_uint32
, uint32_t),
530 VMSTATE_VARRAY_UINT32(env
.pmsav8
.hprlar
, ARMCPU
,
531 pmsav8r_hdregion
, 0, vmstate_info_uint32
, uint32_t),
532 VMSTATE_END_OF_LIST()
536 static const VMStateDescription vmstate_pmsav8
= {
537 .name
= "cpu/pmsav8",
539 .minimum_version_id
= 1,
540 .needed
= pmsav8_needed
,
541 .fields
= (const VMStateField
[]) {
542 VMSTATE_VARRAY_UINT32(env
.pmsav8
.rbar
[M_REG_NS
], ARMCPU
, pmsav7_dregion
,
543 0, vmstate_info_uint32
, uint32_t),
544 VMSTATE_VARRAY_UINT32(env
.pmsav8
.rlar
[M_REG_NS
], ARMCPU
, pmsav7_dregion
,
545 0, vmstate_info_uint32
, uint32_t),
546 VMSTATE_UINT32(env
.pmsav8
.mair0
[M_REG_NS
], ARMCPU
),
547 VMSTATE_UINT32(env
.pmsav8
.mair1
[M_REG_NS
], ARMCPU
),
548 VMSTATE_END_OF_LIST()
550 .subsections
= (const VMStateDescription
* const []) {
556 static bool s_rnr_vmstate_validate(void *opaque
, int version_id
)
558 ARMCPU
*cpu
= opaque
;
560 return cpu
->env
.pmsav7
.rnr
[M_REG_S
] < cpu
->pmsav7_dregion
;
563 static bool sau_rnr_vmstate_validate(void *opaque
, int version_id
)
565 ARMCPU
*cpu
= opaque
;
567 return cpu
->env
.sau
.rnr
< cpu
->sau_sregion
;
570 static bool m_security_needed(void *opaque
)
572 ARMCPU
*cpu
= opaque
;
573 CPUARMState
*env
= &cpu
->env
;
575 return arm_feature(env
, ARM_FEATURE_M_SECURITY
);
578 static const VMStateDescription vmstate_m_security
= {
579 .name
= "cpu/m-security",
581 .minimum_version_id
= 1,
582 .needed
= m_security_needed
,
583 .fields
= (const VMStateField
[]) {
584 VMSTATE_UINT32(env
.v7m
.secure
, ARMCPU
),
585 VMSTATE_UINT32(env
.v7m
.other_ss_msp
, ARMCPU
),
586 VMSTATE_UINT32(env
.v7m
.other_ss_psp
, ARMCPU
),
587 VMSTATE_UINT32(env
.v7m
.basepri
[M_REG_S
], ARMCPU
),
588 VMSTATE_UINT32(env
.v7m
.primask
[M_REG_S
], ARMCPU
),
589 VMSTATE_UINT32(env
.v7m
.faultmask
[M_REG_S
], ARMCPU
),
590 VMSTATE_UINT32(env
.v7m
.control
[M_REG_S
], ARMCPU
),
591 VMSTATE_UINT32(env
.v7m
.vecbase
[M_REG_S
], ARMCPU
),
592 VMSTATE_UINT32(env
.pmsav8
.mair0
[M_REG_S
], ARMCPU
),
593 VMSTATE_UINT32(env
.pmsav8
.mair1
[M_REG_S
], ARMCPU
),
594 VMSTATE_VARRAY_UINT32(env
.pmsav8
.rbar
[M_REG_S
], ARMCPU
, pmsav7_dregion
,
595 0, vmstate_info_uint32
, uint32_t),
596 VMSTATE_VARRAY_UINT32(env
.pmsav8
.rlar
[M_REG_S
], ARMCPU
, pmsav7_dregion
,
597 0, vmstate_info_uint32
, uint32_t),
598 VMSTATE_UINT32(env
.pmsav7
.rnr
[M_REG_S
], ARMCPU
),
599 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate
),
600 VMSTATE_UINT32(env
.v7m
.mpu_ctrl
[M_REG_S
], ARMCPU
),
601 VMSTATE_UINT32(env
.v7m
.ccr
[M_REG_S
], ARMCPU
),
602 VMSTATE_UINT32(env
.v7m
.mmfar
[M_REG_S
], ARMCPU
),
603 VMSTATE_UINT32(env
.v7m
.cfsr
[M_REG_S
], ARMCPU
),
604 VMSTATE_UINT32(env
.v7m
.sfsr
, ARMCPU
),
605 VMSTATE_UINT32(env
.v7m
.sfar
, ARMCPU
),
606 VMSTATE_VARRAY_UINT32(env
.sau
.rbar
, ARMCPU
, sau_sregion
, 0,
607 vmstate_info_uint32
, uint32_t),
608 VMSTATE_VARRAY_UINT32(env
.sau
.rlar
, ARMCPU
, sau_sregion
, 0,
609 vmstate_info_uint32
, uint32_t),
610 VMSTATE_UINT32(env
.sau
.rnr
, ARMCPU
),
611 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate
),
612 VMSTATE_UINT32(env
.sau
.ctrl
, ARMCPU
),
613 VMSTATE_UINT32(env
.v7m
.scr
[M_REG_S
], ARMCPU
),
614 /* AIRCR is not secure-only, but our implementation is R/O if the
615 * security extension is unimplemented, so we migrate it here.
617 VMSTATE_UINT32(env
.v7m
.aircr
, ARMCPU
),
618 VMSTATE_END_OF_LIST()
622 static int get_cpsr(QEMUFile
*f
, void *opaque
, size_t size
,
623 const VMStateField
*field
)
625 ARMCPU
*cpu
= opaque
;
626 CPUARMState
*env
= &cpu
->env
;
627 uint32_t val
= qemu_get_be32(f
);
629 if (arm_feature(env
, ARM_FEATURE_M
)) {
630 if (val
& XPSR_EXCP
) {
631 /* This is a CPSR format value from an older QEMU. (We can tell
632 * because values transferred in XPSR format always have zero
633 * for the EXCP field, and CPSR format will always have bit 4
634 * set in CPSR_M.) Rearrange it into XPSR format. The significant
635 * differences are that the T bit is not in the same place, the
636 * primask/faultmask info may be in the CPSR I and F bits, and
637 * we do not want the mode bits.
638 * We know that this cleanup happened before v8M, so there
639 * is no complication with banked primask/faultmask.
641 uint32_t newval
= val
;
643 assert(!arm_feature(env
, ARM_FEATURE_M_SECURITY
));
645 newval
&= (CPSR_NZCV
| CPSR_Q
| CPSR_IT
| CPSR_GE
);
649 /* If the I or F bits are set then this is a migration from
650 * an old QEMU which still stored the M profile FAULTMASK
651 * and PRIMASK in env->daif. For a new QEMU, the data is
652 * transferred using the vmstate_m_faultmask_primask subsection.
655 env
->v7m
.faultmask
[M_REG_NS
] = 1;
658 env
->v7m
.primask
[M_REG_NS
] = 1;
662 /* Ignore the low bits, they are handled by vmstate_m. */
663 xpsr_write(env
, val
, ~XPSR_EXCP
);
667 env
->aarch64
= ((val
& PSTATE_nRW
) == 0);
670 pstate_write(env
, val
);
674 cpsr_write(env
, val
, 0xffffffff, CPSRWriteRaw
);
678 static int put_cpsr(QEMUFile
*f
, void *opaque
, size_t size
,
679 const VMStateField
*field
, JSONWriter
*vmdesc
)
681 ARMCPU
*cpu
= opaque
;
682 CPUARMState
*env
= &cpu
->env
;
685 if (arm_feature(env
, ARM_FEATURE_M
)) {
686 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
687 val
= xpsr_read(env
) & ~XPSR_EXCP
;
688 } else if (is_a64(env
)) {
689 val
= pstate_read(env
);
691 val
= cpsr_read(env
);
694 qemu_put_be32(f
, val
);
698 static const VMStateInfo vmstate_cpsr
= {
704 static int get_power(QEMUFile
*f
, void *opaque
, size_t size
,
705 const VMStateField
*field
)
707 ARMCPU
*cpu
= opaque
;
708 bool powered_off
= qemu_get_byte(f
);
709 cpu
->power_state
= powered_off
? PSCI_OFF
: PSCI_ON
;
713 static int put_power(QEMUFile
*f
, void *opaque
, size_t size
,
714 const VMStateField
*field
, JSONWriter
*vmdesc
)
716 ARMCPU
*cpu
= opaque
;
718 /* Migration should never happen while we transition power states */
720 if (cpu
->power_state
== PSCI_ON
||
721 cpu
->power_state
== PSCI_OFF
) {
722 bool powered_off
= (cpu
->power_state
== PSCI_OFF
) ? true : false;
723 qemu_put_byte(f
, powered_off
);
730 static const VMStateInfo vmstate_powered_off
= {
731 .name
= "powered_off",
736 static int cpu_pre_save(void *opaque
)
738 ARMCPU
*cpu
= opaque
;
740 if (!kvm_enabled()) {
741 pmu_op_start(&cpu
->env
);
745 if (!write_kvmstate_to_list(cpu
)) {
746 /* This should never fail */
747 g_assert_not_reached();
751 * kvm_arm_cpu_pre_save() must be called after
752 * write_kvmstate_to_list()
754 kvm_arm_cpu_pre_save(cpu
);
756 if (!write_cpustate_to_list(cpu
, false)) {
757 /* This should never fail. */
758 g_assert_not_reached();
762 cpu
->cpreg_vmstate_array_len
= cpu
->cpreg_array_len
;
763 memcpy(cpu
->cpreg_vmstate_indexes
, cpu
->cpreg_indexes
,
764 cpu
->cpreg_array_len
* sizeof(uint64_t));
765 memcpy(cpu
->cpreg_vmstate_values
, cpu
->cpreg_values
,
766 cpu
->cpreg_array_len
* sizeof(uint64_t));
771 static int cpu_post_save(void *opaque
)
773 ARMCPU
*cpu
= opaque
;
775 if (!kvm_enabled()) {
776 pmu_op_finish(&cpu
->env
);
782 static int cpu_pre_load(void *opaque
)
784 ARMCPU
*cpu
= opaque
;
785 CPUARMState
*env
= &cpu
->env
;
788 * Pre-initialize irq_line_state to a value that's never valid as
789 * real data, so cpu_post_load() can tell whether we've seen the
790 * irq-line-state subsection in the incoming migration state.
792 env
->irq_line_state
= UINT32_MAX
;
794 if (!kvm_enabled()) {
801 static int cpu_post_load(void *opaque
, int version_id
)
803 ARMCPU
*cpu
= opaque
;
804 CPUARMState
*env
= &cpu
->env
;
808 * Handle migration compatibility from old QEMU which didn't
809 * send the irq-line-state subsection. A QEMU without it did not
810 * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
811 * so for TCG the line state matches the bits set in cs->interrupt_request.
812 * For KVM the line state is not stored in cs->interrupt_request
813 * and so this will leave irq_line_state as 0, but this is OK because
814 * we only need to care about it for TCG.
816 if (env
->irq_line_state
== UINT32_MAX
) {
817 CPUState
*cs
= CPU(cpu
);
819 env
->irq_line_state
= cs
->interrupt_request
&
820 (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_FIQ
|
821 CPU_INTERRUPT_VIRQ
| CPU_INTERRUPT_VFIQ
);
824 /* Update the values list from the incoming migration data.
825 * Anything in the incoming data which we don't know about is
826 * a migration failure; anything we know about but the incoming
827 * data doesn't specify retains its current (reset) value.
828 * The indexes list remains untouched -- we only inspect the
829 * incoming migration index list so we can match the values array
830 * entries with the right slots in our own values array.
833 for (i
= 0, v
= 0; i
< cpu
->cpreg_array_len
834 && v
< cpu
->cpreg_vmstate_array_len
; i
++) {
835 if (cpu
->cpreg_vmstate_indexes
[v
] > cpu
->cpreg_indexes
[i
]) {
836 /* register in our list but not incoming : skip it */
839 if (cpu
->cpreg_vmstate_indexes
[v
] < cpu
->cpreg_indexes
[i
]) {
840 /* register in their list but not ours: fail migration */
843 /* matching register, copy the value over */
844 cpu
->cpreg_values
[i
] = cpu
->cpreg_vmstate_values
[v
];
849 if (!write_list_to_kvmstate(cpu
, KVM_PUT_FULL_STATE
)) {
852 /* Note that it's OK for the TCG side not to know about
853 * every register in the list; KVM is authoritative if
856 write_list_to_cpustate(cpu
);
857 kvm_arm_cpu_post_load(cpu
);
859 if (!write_list_to_cpustate(cpu
)) {
865 * Misaligned thumb pc is architecturally impossible. Fail the
866 * incoming migration. For TCG it would trigger the assert in
867 * thumb_tr_translate_insn().
869 if (!is_a64(env
) && env
->thumb
&& (env
->regs
[15] & 1)) {
874 hw_breakpoint_update_all(cpu
);
875 hw_watchpoint_update_all(cpu
);
879 * TCG gen_update_fp_context() relies on the invariant that
880 * FPDSCR.LTPSIZE is constant 4 for M-profile with the LOB extension;
881 * forbid bogus incoming data with some other value.
883 if (arm_feature(env
, ARM_FEATURE_M
) && cpu_isar_feature(aa32_lob
, cpu
)) {
884 if (extract32(env
->v7m
.fpdscr
[M_REG_NS
],
885 FPCR_LTPSIZE_SHIFT
, FPCR_LTPSIZE_LENGTH
) != 4 ||
886 extract32(env
->v7m
.fpdscr
[M_REG_S
],
887 FPCR_LTPSIZE_SHIFT
, FPCR_LTPSIZE_LENGTH
) != 4) {
892 if (!kvm_enabled()) {
897 arm_rebuild_hflags(env
);
903 const VMStateDescription vmstate_arm_cpu
= {
906 .minimum_version_id
= 22,
907 .pre_save
= cpu_pre_save
,
908 .post_save
= cpu_post_save
,
909 .pre_load
= cpu_pre_load
,
910 .post_load
= cpu_post_load
,
911 .fields
= (const VMStateField
[]) {
912 VMSTATE_UINT32_ARRAY(env
.regs
, ARMCPU
, 16),
913 VMSTATE_UINT64_ARRAY(env
.xregs
, ARMCPU
, 32),
914 VMSTATE_UINT64(env
.pc
, ARMCPU
),
918 .size
= sizeof(uint32_t),
919 .info
= &vmstate_cpsr
,
923 VMSTATE_UINT32(env
.spsr
, ARMCPU
),
924 VMSTATE_UINT64_ARRAY(env
.banked_spsr
, ARMCPU
, 8),
925 VMSTATE_UINT32_ARRAY(env
.banked_r13
, ARMCPU
, 8),
926 VMSTATE_UINT32_ARRAY(env
.banked_r14
, ARMCPU
, 8),
927 VMSTATE_UINT32_ARRAY(env
.usr_regs
, ARMCPU
, 5),
928 VMSTATE_UINT32_ARRAY(env
.fiq_regs
, ARMCPU
, 5),
929 VMSTATE_UINT64_ARRAY(env
.elr_el
, ARMCPU
, 4),
930 VMSTATE_UINT64_ARRAY(env
.sp_el
, ARMCPU
, 4),
931 /* The length-check must come before the arrays to avoid
932 * incoming data possibly overflowing the array.
934 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len
, ARMCPU
),
935 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes
, ARMCPU
,
936 cpreg_vmstate_array_len
,
937 0, vmstate_info_uint64
, uint64_t),
938 VMSTATE_VARRAY_INT32(cpreg_vmstate_values
, ARMCPU
,
939 cpreg_vmstate_array_len
,
940 0, vmstate_info_uint64
, uint64_t),
941 VMSTATE_UINT64(env
.exclusive_addr
, ARMCPU
),
942 VMSTATE_UINT64(env
.exclusive_val
, ARMCPU
),
943 VMSTATE_UINT64(env
.exclusive_high
, ARMCPU
),
944 VMSTATE_UNUSED(sizeof(uint64_t)),
945 VMSTATE_UINT32(env
.exception
.syndrome
, ARMCPU
),
946 VMSTATE_UINT32(env
.exception
.fsr
, ARMCPU
),
947 VMSTATE_UINT64(env
.exception
.vaddress
, ARMCPU
),
948 VMSTATE_TIMER_PTR(gt_timer
[GTIMER_PHYS
], ARMCPU
),
949 VMSTATE_TIMER_PTR(gt_timer
[GTIMER_VIRT
], ARMCPU
),
951 .name
= "power_state",
953 .size
= sizeof(bool),
954 .info
= &vmstate_powered_off
,
958 VMSTATE_END_OF_LIST()
960 .subsections
= (const VMStateDescription
* const []) {
965 /* pmsav7_rnr must come before pmsav7 so that we have the
966 * region number before we test it in the VMSTATE_VALIDATE
973 #ifdef TARGET_AARCH64
978 &vmstate_irq_line_state
,