sdl: add support for high resolution window icon
[qemu/ar7.git] / target / arm / machine.c
blob7a22ebc2098acb32b5e59c787e4c1d076805c06c
1 #include "qemu/osdep.h"
2 #include "qemu-common.h"
3 #include "cpu.h"
4 #include "hw/hw.h"
5 #include "hw/boards.h"
6 #include "qemu/error-report.h"
7 #include "sysemu/kvm.h"
8 #include "kvm_arm.h"
9 #include "internals.h"
10 #include "migration/cpu.h"
12 static bool vfp_needed(void *opaque)
14 ARMCPU *cpu = opaque;
15 CPUARMState *env = &cpu->env;
17 return arm_feature(env, ARM_FEATURE_VFP);
20 static int get_fpscr(QEMUFile *f, void *opaque, size_t size,
21 const VMStateField *field)
23 ARMCPU *cpu = opaque;
24 CPUARMState *env = &cpu->env;
25 uint32_t val = qemu_get_be32(f);
27 vfp_set_fpscr(env, val);
28 return 0;
31 static int put_fpscr(QEMUFile *f, void *opaque, size_t size,
32 const VMStateField *field, QJSON *vmdesc)
34 ARMCPU *cpu = opaque;
35 CPUARMState *env = &cpu->env;
37 qemu_put_be32(f, vfp_get_fpscr(env));
38 return 0;
41 static const VMStateInfo vmstate_fpscr = {
42 .name = "fpscr",
43 .get = get_fpscr,
44 .put = put_fpscr,
47 static const VMStateDescription vmstate_vfp = {
48 .name = "cpu/vfp",
49 .version_id = 3,
50 .minimum_version_id = 3,
51 .needed = vfp_needed,
52 .fields = (VMStateField[]) {
53 /* For compatibility, store Qn out of Zn here. */
54 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[0].d, ARMCPU, 0, 2),
55 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[1].d, ARMCPU, 0, 2),
56 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[2].d, ARMCPU, 0, 2),
57 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[3].d, ARMCPU, 0, 2),
58 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[4].d, ARMCPU, 0, 2),
59 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[5].d, ARMCPU, 0, 2),
60 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[6].d, ARMCPU, 0, 2),
61 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[7].d, ARMCPU, 0, 2),
62 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[8].d, ARMCPU, 0, 2),
63 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[9].d, ARMCPU, 0, 2),
64 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[10].d, ARMCPU, 0, 2),
65 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[11].d, ARMCPU, 0, 2),
66 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[12].d, ARMCPU, 0, 2),
67 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[13].d, ARMCPU, 0, 2),
68 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[14].d, ARMCPU, 0, 2),
69 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[15].d, ARMCPU, 0, 2),
70 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[16].d, ARMCPU, 0, 2),
71 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[17].d, ARMCPU, 0, 2),
72 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[18].d, ARMCPU, 0, 2),
73 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[19].d, ARMCPU, 0, 2),
74 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[20].d, ARMCPU, 0, 2),
75 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[21].d, ARMCPU, 0, 2),
76 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[22].d, ARMCPU, 0, 2),
77 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[23].d, ARMCPU, 0, 2),
78 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[24].d, ARMCPU, 0, 2),
79 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[25].d, ARMCPU, 0, 2),
80 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[26].d, ARMCPU, 0, 2),
81 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[27].d, ARMCPU, 0, 2),
82 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[28].d, ARMCPU, 0, 2),
83 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[29].d, ARMCPU, 0, 2),
84 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[30].d, ARMCPU, 0, 2),
85 VMSTATE_UINT64_SUB_ARRAY(env.vfp.zregs[31].d, ARMCPU, 0, 2),
87 /* The xregs array is a little awkward because element 1 (FPSCR)
88 * requires a specific accessor, so we have to split it up in
89 * the vmstate:
91 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
92 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
94 .name = "fpscr",
95 .version_id = 0,
96 .size = sizeof(uint32_t),
97 .info = &vmstate_fpscr,
98 .flags = VMS_SINGLE,
99 .offset = 0,
101 VMSTATE_END_OF_LIST()
105 static bool iwmmxt_needed(void *opaque)
107 ARMCPU *cpu = opaque;
108 CPUARMState *env = &cpu->env;
110 return arm_feature(env, ARM_FEATURE_IWMMXT);
113 static const VMStateDescription vmstate_iwmmxt = {
114 .name = "cpu/iwmmxt",
115 .version_id = 1,
116 .minimum_version_id = 1,
117 .needed = iwmmxt_needed,
118 .fields = (VMStateField[]) {
119 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
120 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
121 VMSTATE_END_OF_LIST()
125 #ifdef TARGET_AARCH64
126 /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build,
127 * and ARMPredicateReg is actively empty. This triggers errors
128 * in the expansion of the VMSTATE macros.
131 static bool sve_needed(void *opaque)
133 ARMCPU *cpu = opaque;
135 return cpu_isar_feature(aa64_sve, cpu);
138 /* The first two words of each Zreg is stored in VFP state. */
139 static const VMStateDescription vmstate_zreg_hi_reg = {
140 .name = "cpu/sve/zreg_hi",
141 .version_id = 1,
142 .minimum_version_id = 1,
143 .fields = (VMStateField[]) {
144 VMSTATE_UINT64_SUB_ARRAY(d, ARMVectorReg, 2, ARM_MAX_VQ - 2),
145 VMSTATE_END_OF_LIST()
149 static const VMStateDescription vmstate_preg_reg = {
150 .name = "cpu/sve/preg",
151 .version_id = 1,
152 .minimum_version_id = 1,
153 .fields = (VMStateField[]) {
154 VMSTATE_UINT64_ARRAY(p, ARMPredicateReg, 2 * ARM_MAX_VQ / 8),
155 VMSTATE_END_OF_LIST()
159 static const VMStateDescription vmstate_sve = {
160 .name = "cpu/sve",
161 .version_id = 1,
162 .minimum_version_id = 1,
163 .needed = sve_needed,
164 .fields = (VMStateField[]) {
165 VMSTATE_STRUCT_ARRAY(env.vfp.zregs, ARMCPU, 32, 0,
166 vmstate_zreg_hi_reg, ARMVectorReg),
167 VMSTATE_STRUCT_ARRAY(env.vfp.pregs, ARMCPU, 17, 0,
168 vmstate_preg_reg, ARMPredicateReg),
169 VMSTATE_END_OF_LIST()
172 #endif /* AARCH64 */
174 static bool serror_needed(void *opaque)
176 ARMCPU *cpu = opaque;
177 CPUARMState *env = &cpu->env;
179 return env->serror.pending != 0;
182 static const VMStateDescription vmstate_serror = {
183 .name = "cpu/serror",
184 .version_id = 1,
185 .minimum_version_id = 1,
186 .needed = serror_needed,
187 .fields = (VMStateField[]) {
188 VMSTATE_UINT8(env.serror.pending, ARMCPU),
189 VMSTATE_UINT8(env.serror.has_esr, ARMCPU),
190 VMSTATE_UINT64(env.serror.esr, ARMCPU),
191 VMSTATE_END_OF_LIST()
195 static bool irq_line_state_needed(void *opaque)
197 return true;
200 static const VMStateDescription vmstate_irq_line_state = {
201 .name = "cpu/irq-line-state",
202 .version_id = 1,
203 .minimum_version_id = 1,
204 .needed = irq_line_state_needed,
205 .fields = (VMStateField[]) {
206 VMSTATE_UINT32(env.irq_line_state, ARMCPU),
207 VMSTATE_END_OF_LIST()
211 static bool m_needed(void *opaque)
213 ARMCPU *cpu = opaque;
214 CPUARMState *env = &cpu->env;
216 return arm_feature(env, ARM_FEATURE_M);
219 static const VMStateDescription vmstate_m_faultmask_primask = {
220 .name = "cpu/m/faultmask-primask",
221 .version_id = 1,
222 .minimum_version_id = 1,
223 .needed = m_needed,
224 .fields = (VMStateField[]) {
225 VMSTATE_UINT32(env.v7m.faultmask[M_REG_NS], ARMCPU),
226 VMSTATE_UINT32(env.v7m.primask[M_REG_NS], ARMCPU),
227 VMSTATE_END_OF_LIST()
231 /* CSSELR is in a subsection because we didn't implement it previously.
232 * Migration from an old implementation will leave it at zero, which
233 * is OK since the only CPUs in the old implementation make the
234 * register RAZ/WI.
235 * Since there was no version of QEMU which implemented the CSSELR for
236 * just non-secure, we transfer both banks here rather than putting
237 * the secure banked version in the m-security subsection.
239 static bool csselr_vmstate_validate(void *opaque, int version_id)
241 ARMCPU *cpu = opaque;
243 return cpu->env.v7m.csselr[M_REG_NS] <= R_V7M_CSSELR_INDEX_MASK
244 && cpu->env.v7m.csselr[M_REG_S] <= R_V7M_CSSELR_INDEX_MASK;
247 static bool m_csselr_needed(void *opaque)
249 ARMCPU *cpu = opaque;
251 return !arm_v7m_csselr_razwi(cpu);
254 static const VMStateDescription vmstate_m_csselr = {
255 .name = "cpu/m/csselr",
256 .version_id = 1,
257 .minimum_version_id = 1,
258 .needed = m_csselr_needed,
259 .fields = (VMStateField[]) {
260 VMSTATE_UINT32_ARRAY(env.v7m.csselr, ARMCPU, M_REG_NUM_BANKS),
261 VMSTATE_VALIDATE("CSSELR is valid", csselr_vmstate_validate),
262 VMSTATE_END_OF_LIST()
266 static const VMStateDescription vmstate_m_scr = {
267 .name = "cpu/m/scr",
268 .version_id = 1,
269 .minimum_version_id = 1,
270 .needed = m_needed,
271 .fields = (VMStateField[]) {
272 VMSTATE_UINT32(env.v7m.scr[M_REG_NS], ARMCPU),
273 VMSTATE_END_OF_LIST()
277 static const VMStateDescription vmstate_m_other_sp = {
278 .name = "cpu/m/other-sp",
279 .version_id = 1,
280 .minimum_version_id = 1,
281 .needed = m_needed,
282 .fields = (VMStateField[]) {
283 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
284 VMSTATE_END_OF_LIST()
288 static bool m_v8m_needed(void *opaque)
290 ARMCPU *cpu = opaque;
291 CPUARMState *env = &cpu->env;
293 return arm_feature(env, ARM_FEATURE_M) && arm_feature(env, ARM_FEATURE_V8);
296 static const VMStateDescription vmstate_m_v8m = {
297 .name = "cpu/m/v8m",
298 .version_id = 1,
299 .minimum_version_id = 1,
300 .needed = m_v8m_needed,
301 .fields = (VMStateField[]) {
302 VMSTATE_UINT32_ARRAY(env.v7m.msplim, ARMCPU, M_REG_NUM_BANKS),
303 VMSTATE_UINT32_ARRAY(env.v7m.psplim, ARMCPU, M_REG_NUM_BANKS),
304 VMSTATE_END_OF_LIST()
308 static const VMStateDescription vmstate_m = {
309 .name = "cpu/m",
310 .version_id = 4,
311 .minimum_version_id = 4,
312 .needed = m_needed,
313 .fields = (VMStateField[]) {
314 VMSTATE_UINT32(env.v7m.vecbase[M_REG_NS], ARMCPU),
315 VMSTATE_UINT32(env.v7m.basepri[M_REG_NS], ARMCPU),
316 VMSTATE_UINT32(env.v7m.control[M_REG_NS], ARMCPU),
317 VMSTATE_UINT32(env.v7m.ccr[M_REG_NS], ARMCPU),
318 VMSTATE_UINT32(env.v7m.cfsr[M_REG_NS], ARMCPU),
319 VMSTATE_UINT32(env.v7m.hfsr, ARMCPU),
320 VMSTATE_UINT32(env.v7m.dfsr, ARMCPU),
321 VMSTATE_UINT32(env.v7m.mmfar[M_REG_NS], ARMCPU),
322 VMSTATE_UINT32(env.v7m.bfar, ARMCPU),
323 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_NS], ARMCPU),
324 VMSTATE_INT32(env.v7m.exception, ARMCPU),
325 VMSTATE_END_OF_LIST()
327 .subsections = (const VMStateDescription*[]) {
328 &vmstate_m_faultmask_primask,
329 &vmstate_m_csselr,
330 &vmstate_m_scr,
331 &vmstate_m_other_sp,
332 &vmstate_m_v8m,
333 NULL
337 static bool thumb2ee_needed(void *opaque)
339 ARMCPU *cpu = opaque;
340 CPUARMState *env = &cpu->env;
342 return arm_feature(env, ARM_FEATURE_THUMB2EE);
345 static const VMStateDescription vmstate_thumb2ee = {
346 .name = "cpu/thumb2ee",
347 .version_id = 1,
348 .minimum_version_id = 1,
349 .needed = thumb2ee_needed,
350 .fields = (VMStateField[]) {
351 VMSTATE_UINT32(env.teecr, ARMCPU),
352 VMSTATE_UINT32(env.teehbr, ARMCPU),
353 VMSTATE_END_OF_LIST()
357 static bool pmsav7_needed(void *opaque)
359 ARMCPU *cpu = opaque;
360 CPUARMState *env = &cpu->env;
362 return arm_feature(env, ARM_FEATURE_PMSA) &&
363 arm_feature(env, ARM_FEATURE_V7) &&
364 !arm_feature(env, ARM_FEATURE_V8);
367 static bool pmsav7_rgnr_vmstate_validate(void *opaque, int version_id)
369 ARMCPU *cpu = opaque;
371 return cpu->env.pmsav7.rnr[M_REG_NS] < cpu->pmsav7_dregion;
374 static const VMStateDescription vmstate_pmsav7 = {
375 .name = "cpu/pmsav7",
376 .version_id = 1,
377 .minimum_version_id = 1,
378 .needed = pmsav7_needed,
379 .fields = (VMStateField[]) {
380 VMSTATE_VARRAY_UINT32(env.pmsav7.drbar, ARMCPU, pmsav7_dregion, 0,
381 vmstate_info_uint32, uint32_t),
382 VMSTATE_VARRAY_UINT32(env.pmsav7.drsr, ARMCPU, pmsav7_dregion, 0,
383 vmstate_info_uint32, uint32_t),
384 VMSTATE_VARRAY_UINT32(env.pmsav7.dracr, ARMCPU, pmsav7_dregion, 0,
385 vmstate_info_uint32, uint32_t),
386 VMSTATE_VALIDATE("rgnr is valid", pmsav7_rgnr_vmstate_validate),
387 VMSTATE_END_OF_LIST()
391 static bool pmsav7_rnr_needed(void *opaque)
393 ARMCPU *cpu = opaque;
394 CPUARMState *env = &cpu->env;
396 /* For R profile cores pmsav7.rnr is migrated via the cpreg
397 * "RGNR" definition in helper.h. For M profile we have to
398 * migrate it separately.
400 return arm_feature(env, ARM_FEATURE_M);
403 static const VMStateDescription vmstate_pmsav7_rnr = {
404 .name = "cpu/pmsav7-rnr",
405 .version_id = 1,
406 .minimum_version_id = 1,
407 .needed = pmsav7_rnr_needed,
408 .fields = (VMStateField[]) {
409 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_NS], ARMCPU),
410 VMSTATE_END_OF_LIST()
414 static bool pmsav8_needed(void *opaque)
416 ARMCPU *cpu = opaque;
417 CPUARMState *env = &cpu->env;
419 return arm_feature(env, ARM_FEATURE_PMSA) &&
420 arm_feature(env, ARM_FEATURE_V8);
423 static const VMStateDescription vmstate_pmsav8 = {
424 .name = "cpu/pmsav8",
425 .version_id = 1,
426 .minimum_version_id = 1,
427 .needed = pmsav8_needed,
428 .fields = (VMStateField[]) {
429 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_NS], ARMCPU, pmsav7_dregion,
430 0, vmstate_info_uint32, uint32_t),
431 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_NS], ARMCPU, pmsav7_dregion,
432 0, vmstate_info_uint32, uint32_t),
433 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_NS], ARMCPU),
434 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_NS], ARMCPU),
435 VMSTATE_END_OF_LIST()
439 static bool s_rnr_vmstate_validate(void *opaque, int version_id)
441 ARMCPU *cpu = opaque;
443 return cpu->env.pmsav7.rnr[M_REG_S] < cpu->pmsav7_dregion;
446 static bool sau_rnr_vmstate_validate(void *opaque, int version_id)
448 ARMCPU *cpu = opaque;
450 return cpu->env.sau.rnr < cpu->sau_sregion;
453 static bool m_security_needed(void *opaque)
455 ARMCPU *cpu = opaque;
456 CPUARMState *env = &cpu->env;
458 return arm_feature(env, ARM_FEATURE_M_SECURITY);
461 static const VMStateDescription vmstate_m_security = {
462 .name = "cpu/m-security",
463 .version_id = 1,
464 .minimum_version_id = 1,
465 .needed = m_security_needed,
466 .fields = (VMStateField[]) {
467 VMSTATE_UINT32(env.v7m.secure, ARMCPU),
468 VMSTATE_UINT32(env.v7m.other_ss_msp, ARMCPU),
469 VMSTATE_UINT32(env.v7m.other_ss_psp, ARMCPU),
470 VMSTATE_UINT32(env.v7m.basepri[M_REG_S], ARMCPU),
471 VMSTATE_UINT32(env.v7m.primask[M_REG_S], ARMCPU),
472 VMSTATE_UINT32(env.v7m.faultmask[M_REG_S], ARMCPU),
473 VMSTATE_UINT32(env.v7m.control[M_REG_S], ARMCPU),
474 VMSTATE_UINT32(env.v7m.vecbase[M_REG_S], ARMCPU),
475 VMSTATE_UINT32(env.pmsav8.mair0[M_REG_S], ARMCPU),
476 VMSTATE_UINT32(env.pmsav8.mair1[M_REG_S], ARMCPU),
477 VMSTATE_VARRAY_UINT32(env.pmsav8.rbar[M_REG_S], ARMCPU, pmsav7_dregion,
478 0, vmstate_info_uint32, uint32_t),
479 VMSTATE_VARRAY_UINT32(env.pmsav8.rlar[M_REG_S], ARMCPU, pmsav7_dregion,
480 0, vmstate_info_uint32, uint32_t),
481 VMSTATE_UINT32(env.pmsav7.rnr[M_REG_S], ARMCPU),
482 VMSTATE_VALIDATE("secure MPU_RNR is valid", s_rnr_vmstate_validate),
483 VMSTATE_UINT32(env.v7m.mpu_ctrl[M_REG_S], ARMCPU),
484 VMSTATE_UINT32(env.v7m.ccr[M_REG_S], ARMCPU),
485 VMSTATE_UINT32(env.v7m.mmfar[M_REG_S], ARMCPU),
486 VMSTATE_UINT32(env.v7m.cfsr[M_REG_S], ARMCPU),
487 VMSTATE_UINT32(env.v7m.sfsr, ARMCPU),
488 VMSTATE_UINT32(env.v7m.sfar, ARMCPU),
489 VMSTATE_VARRAY_UINT32(env.sau.rbar, ARMCPU, sau_sregion, 0,
490 vmstate_info_uint32, uint32_t),
491 VMSTATE_VARRAY_UINT32(env.sau.rlar, ARMCPU, sau_sregion, 0,
492 vmstate_info_uint32, uint32_t),
493 VMSTATE_UINT32(env.sau.rnr, ARMCPU),
494 VMSTATE_VALIDATE("SAU_RNR is valid", sau_rnr_vmstate_validate),
495 VMSTATE_UINT32(env.sau.ctrl, ARMCPU),
496 VMSTATE_UINT32(env.v7m.scr[M_REG_S], ARMCPU),
497 /* AIRCR is not secure-only, but our implementation is R/O if the
498 * security extension is unimplemented, so we migrate it here.
500 VMSTATE_UINT32(env.v7m.aircr, ARMCPU),
501 VMSTATE_END_OF_LIST()
505 static int get_cpsr(QEMUFile *f, void *opaque, size_t size,
506 const VMStateField *field)
508 ARMCPU *cpu = opaque;
509 CPUARMState *env = &cpu->env;
510 uint32_t val = qemu_get_be32(f);
512 if (arm_feature(env, ARM_FEATURE_M)) {
513 if (val & XPSR_EXCP) {
514 /* This is a CPSR format value from an older QEMU. (We can tell
515 * because values transferred in XPSR format always have zero
516 * for the EXCP field, and CPSR format will always have bit 4
517 * set in CPSR_M.) Rearrange it into XPSR format. The significant
518 * differences are that the T bit is not in the same place, the
519 * primask/faultmask info may be in the CPSR I and F bits, and
520 * we do not want the mode bits.
521 * We know that this cleanup happened before v8M, so there
522 * is no complication with banked primask/faultmask.
524 uint32_t newval = val;
526 assert(!arm_feature(env, ARM_FEATURE_M_SECURITY));
528 newval &= (CPSR_NZCV | CPSR_Q | CPSR_IT | CPSR_GE);
529 if (val & CPSR_T) {
530 newval |= XPSR_T;
532 /* If the I or F bits are set then this is a migration from
533 * an old QEMU which still stored the M profile FAULTMASK
534 * and PRIMASK in env->daif. For a new QEMU, the data is
535 * transferred using the vmstate_m_faultmask_primask subsection.
537 if (val & CPSR_F) {
538 env->v7m.faultmask[M_REG_NS] = 1;
540 if (val & CPSR_I) {
541 env->v7m.primask[M_REG_NS] = 1;
543 val = newval;
545 /* Ignore the low bits, they are handled by vmstate_m. */
546 xpsr_write(env, val, ~XPSR_EXCP);
547 return 0;
550 env->aarch64 = ((val & PSTATE_nRW) == 0);
552 if (is_a64(env)) {
553 pstate_write(env, val);
554 return 0;
557 cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
558 return 0;
561 static int put_cpsr(QEMUFile *f, void *opaque, size_t size,
562 const VMStateField *field, QJSON *vmdesc)
564 ARMCPU *cpu = opaque;
565 CPUARMState *env = &cpu->env;
566 uint32_t val;
568 if (arm_feature(env, ARM_FEATURE_M)) {
569 /* The low 9 bits are v7m.exception, which is handled by vmstate_m. */
570 val = xpsr_read(env) & ~XPSR_EXCP;
571 } else if (is_a64(env)) {
572 val = pstate_read(env);
573 } else {
574 val = cpsr_read(env);
577 qemu_put_be32(f, val);
578 return 0;
581 static const VMStateInfo vmstate_cpsr = {
582 .name = "cpsr",
583 .get = get_cpsr,
584 .put = put_cpsr,
587 static int get_power(QEMUFile *f, void *opaque, size_t size,
588 const VMStateField *field)
590 ARMCPU *cpu = opaque;
591 bool powered_off = qemu_get_byte(f);
592 cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON;
593 return 0;
596 static int put_power(QEMUFile *f, void *opaque, size_t size,
597 const VMStateField *field, QJSON *vmdesc)
599 ARMCPU *cpu = opaque;
601 /* Migration should never happen while we transition power states */
603 if (cpu->power_state == PSCI_ON ||
604 cpu->power_state == PSCI_OFF) {
605 bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false;
606 qemu_put_byte(f, powered_off);
607 return 0;
608 } else {
609 return 1;
613 static const VMStateInfo vmstate_powered_off = {
614 .name = "powered_off",
615 .get = get_power,
616 .put = put_power,
619 static int cpu_pre_save(void *opaque)
621 ARMCPU *cpu = opaque;
623 if (kvm_enabled()) {
624 if (!write_kvmstate_to_list(cpu)) {
625 /* This should never fail */
626 abort();
628 } else {
629 if (!write_cpustate_to_list(cpu)) {
630 /* This should never fail. */
631 abort();
635 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
636 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
637 cpu->cpreg_array_len * sizeof(uint64_t));
638 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
639 cpu->cpreg_array_len * sizeof(uint64_t));
641 return 0;
644 static int cpu_pre_load(void *opaque)
646 ARMCPU *cpu = opaque;
647 CPUARMState *env = &cpu->env;
650 * Pre-initialize irq_line_state to a value that's never valid as
651 * real data, so cpu_post_load() can tell whether we've seen the
652 * irq-line-state subsection in the incoming migration state.
654 env->irq_line_state = UINT32_MAX;
656 return 0;
659 static int cpu_post_load(void *opaque, int version_id)
661 ARMCPU *cpu = opaque;
662 CPUARMState *env = &cpu->env;
663 int i, v;
666 * Handle migration compatibility from old QEMU which didn't
667 * send the irq-line-state subsection. A QEMU without it did not
668 * implement the HCR_EL2.{VI,VF} bits as generating interrupts,
669 * so for TCG the line state matches the bits set in cs->interrupt_request.
670 * For KVM the line state is not stored in cs->interrupt_request
671 * and so this will leave irq_line_state as 0, but this is OK because
672 * we only need to care about it for TCG.
674 if (env->irq_line_state == UINT32_MAX) {
675 CPUState *cs = CPU(cpu);
677 env->irq_line_state = cs->interrupt_request &
678 (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ |
679 CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VFIQ);
682 /* Update the values list from the incoming migration data.
683 * Anything in the incoming data which we don't know about is
684 * a migration failure; anything we know about but the incoming
685 * data doesn't specify retains its current (reset) value.
686 * The indexes list remains untouched -- we only inspect the
687 * incoming migration index list so we can match the values array
688 * entries with the right slots in our own values array.
691 for (i = 0, v = 0; i < cpu->cpreg_array_len
692 && v < cpu->cpreg_vmstate_array_len; i++) {
693 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
694 /* register in our list but not incoming : skip it */
695 continue;
697 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
698 /* register in their list but not ours: fail migration */
699 return -1;
701 /* matching register, copy the value over */
702 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
703 v++;
706 if (kvm_enabled()) {
707 if (!write_list_to_kvmstate(cpu, KVM_PUT_FULL_STATE)) {
708 return -1;
710 /* Note that it's OK for the TCG side not to know about
711 * every register in the list; KVM is authoritative if
712 * we're using it.
714 write_list_to_cpustate(cpu);
715 } else {
716 if (!write_list_to_cpustate(cpu)) {
717 return -1;
721 hw_breakpoint_update_all(cpu);
722 hw_watchpoint_update_all(cpu);
724 return 0;
727 const VMStateDescription vmstate_arm_cpu = {
728 .name = "cpu",
729 .version_id = 22,
730 .minimum_version_id = 22,
731 .pre_save = cpu_pre_save,
732 .pre_load = cpu_pre_load,
733 .post_load = cpu_post_load,
734 .fields = (VMStateField[]) {
735 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
736 VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
737 VMSTATE_UINT64(env.pc, ARMCPU),
739 .name = "cpsr",
740 .version_id = 0,
741 .size = sizeof(uint32_t),
742 .info = &vmstate_cpsr,
743 .flags = VMS_SINGLE,
744 .offset = 0,
746 VMSTATE_UINT32(env.spsr, ARMCPU),
747 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
748 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
749 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
750 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
751 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
752 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
753 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
754 /* The length-check must come before the arrays to avoid
755 * incoming data possibly overflowing the array.
757 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
758 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
759 cpreg_vmstate_array_len,
760 0, vmstate_info_uint64, uint64_t),
761 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
762 cpreg_vmstate_array_len,
763 0, vmstate_info_uint64, uint64_t),
764 VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
765 VMSTATE_UINT64(env.exclusive_val, ARMCPU),
766 VMSTATE_UINT64(env.exclusive_high, ARMCPU),
767 VMSTATE_UINT64(env.features, ARMCPU),
768 VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
769 VMSTATE_UINT32(env.exception.fsr, ARMCPU),
770 VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
771 VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
772 VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
774 .name = "power_state",
775 .version_id = 0,
776 .size = sizeof(bool),
777 .info = &vmstate_powered_off,
778 .flags = VMS_SINGLE,
779 .offset = 0,
781 VMSTATE_END_OF_LIST()
783 .subsections = (const VMStateDescription*[]) {
784 &vmstate_vfp,
785 &vmstate_iwmmxt,
786 &vmstate_m,
787 &vmstate_thumb2ee,
788 /* pmsav7_rnr must come before pmsav7 so that we have the
789 * region number before we test it in the VMSTATE_VALIDATE
790 * in vmstate_pmsav7.
792 &vmstate_pmsav7_rnr,
793 &vmstate_pmsav7,
794 &vmstate_pmsav8,
795 &vmstate_m_security,
796 #ifdef TARGET_AARCH64
797 &vmstate_sve,
798 #endif
799 &vmstate_serror,
800 &vmstate_irq_line_state,
801 NULL