qxl: call qemu_spice_display_init_common for secondary devices
[qemu/ar7.git] / target / ppc / machine.c
blobabe0a1cdf021cf3ee5cc0449c1bf2631bed018b2
1 #include "qemu/osdep.h"
2 #include "qemu-common.h"
3 #include "cpu.h"
4 #include "exec/exec-all.h"
5 #include "hw/hw.h"
6 #include "hw/boards.h"
7 #include "sysemu/kvm.h"
8 #include "helper_regs.h"
9 #include "mmu-hash64.h"
10 #include "migration/cpu.h"
11 #include "qapi/error.h"
13 static int cpu_load_old(QEMUFile *f, void *opaque, int version_id)
15 PowerPCCPU *cpu = opaque;
16 CPUPPCState *env = &cpu->env;
17 unsigned int i, j;
18 target_ulong sdr1;
19 uint32_t fpscr;
20 target_ulong xer;
22 for (i = 0; i < 32; i++)
23 qemu_get_betls(f, &env->gpr[i]);
24 #if !defined(TARGET_PPC64)
25 for (i = 0; i < 32; i++)
26 qemu_get_betls(f, &env->gprh[i]);
27 #endif
28 qemu_get_betls(f, &env->lr);
29 qemu_get_betls(f, &env->ctr);
30 for (i = 0; i < 8; i++)
31 qemu_get_be32s(f, &env->crf[i]);
32 qemu_get_betls(f, &xer);
33 cpu_write_xer(env, xer);
34 qemu_get_betls(f, &env->reserve_addr);
35 qemu_get_betls(f, &env->msr);
36 for (i = 0; i < 4; i++)
37 qemu_get_betls(f, &env->tgpr[i]);
38 for (i = 0; i < 32; i++) {
39 union {
40 float64 d;
41 uint64_t l;
42 } u;
43 u.l = qemu_get_be64(f);
44 env->fpr[i] = u.d;
46 qemu_get_be32s(f, &fpscr);
47 env->fpscr = fpscr;
48 qemu_get_sbe32s(f, &env->access_type);
49 #if defined(TARGET_PPC64)
50 qemu_get_betls(f, &env->spr[SPR_ASR]);
51 qemu_get_sbe32s(f, &env->slb_nr);
52 #endif
53 qemu_get_betls(f, &sdr1);
54 for (i = 0; i < 32; i++)
55 qemu_get_betls(f, &env->sr[i]);
56 for (i = 0; i < 2; i++)
57 for (j = 0; j < 8; j++)
58 qemu_get_betls(f, &env->DBAT[i][j]);
59 for (i = 0; i < 2; i++)
60 for (j = 0; j < 8; j++)
61 qemu_get_betls(f, &env->IBAT[i][j]);
62 qemu_get_sbe32s(f, &env->nb_tlb);
63 qemu_get_sbe32s(f, &env->tlb_per_way);
64 qemu_get_sbe32s(f, &env->nb_ways);
65 qemu_get_sbe32s(f, &env->last_way);
66 qemu_get_sbe32s(f, &env->id_tlbs);
67 qemu_get_sbe32s(f, &env->nb_pids);
68 if (env->tlb.tlb6) {
69 // XXX assumes 6xx
70 for (i = 0; i < env->nb_tlb; i++) {
71 qemu_get_betls(f, &env->tlb.tlb6[i].pte0);
72 qemu_get_betls(f, &env->tlb.tlb6[i].pte1);
73 qemu_get_betls(f, &env->tlb.tlb6[i].EPN);
76 for (i = 0; i < 4; i++)
77 qemu_get_betls(f, &env->pb[i]);
78 for (i = 0; i < 1024; i++)
79 qemu_get_betls(f, &env->spr[i]);
80 if (!cpu->vhyp) {
81 ppc_store_sdr1(env, sdr1);
83 qemu_get_be32s(f, &env->vscr);
84 qemu_get_be64s(f, &env->spe_acc);
85 qemu_get_be32s(f, &env->spe_fscr);
86 qemu_get_betls(f, &env->msr_mask);
87 qemu_get_be32s(f, &env->flags);
88 qemu_get_sbe32s(f, &env->error_code);
89 qemu_get_be32s(f, &env->pending_interrupts);
90 qemu_get_be32s(f, &env->irq_input_state);
91 for (i = 0; i < POWERPC_EXCP_NB; i++)
92 qemu_get_betls(f, &env->excp_vectors[i]);
93 qemu_get_betls(f, &env->excp_prefix);
94 qemu_get_betls(f, &env->ivor_mask);
95 qemu_get_betls(f, &env->ivpr_mask);
96 qemu_get_betls(f, &env->hreset_vector);
97 qemu_get_betls(f, &env->nip);
98 qemu_get_betls(f, &env->hflags);
99 qemu_get_betls(f, &env->hflags_nmsr);
100 qemu_get_sbe32(f); /* Discard unused mmu_idx */
101 qemu_get_sbe32(f); /* Discard unused power_mode */
103 /* Recompute mmu indices */
104 hreg_compute_mem_idx(env);
106 return 0;
109 static int get_avr(QEMUFile *f, void *pv, size_t size, VMStateField *field)
111 ppc_avr_t *v = pv;
113 v->u64[0] = qemu_get_be64(f);
114 v->u64[1] = qemu_get_be64(f);
116 return 0;
119 static int put_avr(QEMUFile *f, void *pv, size_t size, VMStateField *field,
120 QJSON *vmdesc)
122 ppc_avr_t *v = pv;
124 qemu_put_be64(f, v->u64[0]);
125 qemu_put_be64(f, v->u64[1]);
126 return 0;
129 static const VMStateInfo vmstate_info_avr = {
130 .name = "avr",
131 .get = get_avr,
132 .put = put_avr,
135 #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \
136 VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_avr, ppc_avr_t)
138 #define VMSTATE_AVR_ARRAY(_f, _s, _n) \
139 VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
141 static bool cpu_pre_2_8_migration(void *opaque, int version_id)
143 PowerPCCPU *cpu = opaque;
145 return cpu->pre_2_8_migration;
148 static void cpu_pre_save(void *opaque)
150 PowerPCCPU *cpu = opaque;
151 CPUPPCState *env = &cpu->env;
152 int i;
153 uint64_t insns_compat_mask =
154 PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
155 | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
156 | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
157 | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
158 | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
159 | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
160 | PPC_64B | PPC_64BX | PPC_ALTIVEC
161 | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
162 uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
163 | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
164 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
165 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
166 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
167 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
169 env->spr[SPR_LR] = env->lr;
170 env->spr[SPR_CTR] = env->ctr;
171 env->spr[SPR_XER] = cpu_read_xer(env);
172 #if defined(TARGET_PPC64)
173 env->spr[SPR_CFAR] = env->cfar;
174 #endif
175 env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
177 for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
178 env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i];
179 env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i];
180 env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i];
181 env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i];
183 for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
184 env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4];
185 env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4];
186 env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4];
187 env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4];
190 /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
191 if (cpu->pre_2_8_migration) {
192 cpu->mig_msr_mask = env->msr_mask;
193 cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
194 cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
195 cpu->mig_nb_BATs = env->nb_BATs;
200 * Determine if a given PVR is a "close enough" match to the CPU
201 * object. For TCG and KVM PR it would probably be sufficient to
202 * require an exact PVR match. However for KVM HV the user is
203 * restricted to a PVR exactly matching the host CPU. The correct way
204 * to handle this is to put the guest into an architected
205 * compatibility mode. However, to allow a more forgiving transition
206 * and migration from before this was widely done, we allow migration
207 * between sufficiently similar PVRs, as determined by the CPU class's
208 * pvr_match() hook.
210 static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
212 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
214 if (pvr == pcc->pvr) {
215 return true;
217 return pcc->pvr_match(pcc, pvr);
220 static int cpu_post_load(void *opaque, int version_id)
222 PowerPCCPU *cpu = opaque;
223 CPUPPCState *env = &cpu->env;
224 int i;
225 target_ulong msr;
228 * If we're operating in compat mode, we should be ok as long as
229 * the destination supports the same compatiblity mode.
231 * Otherwise, however, we require that the destination has exactly
232 * the same CPU model as the source.
235 #if defined(TARGET_PPC64)
236 if (cpu->compat_pvr) {
237 Error *local_err = NULL;
239 ppc_set_compat(cpu, cpu->compat_pvr, &local_err);
240 if (local_err) {
241 error_report_err(local_err);
242 return -1;
244 } else
245 #endif
247 if (!pvr_match(cpu, env->spr[SPR_PVR])) {
248 return -1;
252 env->lr = env->spr[SPR_LR];
253 env->ctr = env->spr[SPR_CTR];
254 cpu_write_xer(env, env->spr[SPR_XER]);
255 #if defined(TARGET_PPC64)
256 env->cfar = env->spr[SPR_CFAR];
257 #endif
258 env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
260 for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
261 env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i];
262 env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1];
263 env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i];
264 env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1];
266 for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) {
267 env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i];
268 env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1];
269 env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i];
270 env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1];
273 if (!cpu->vhyp) {
274 ppc_store_sdr1(env, env->spr[SPR_SDR1]);
277 /* Invalidate all msr bits except MSR_TGPR/MSR_HVB before restoring */
278 msr = env->msr;
279 env->msr ^= ~((1ULL << MSR_TGPR) | MSR_HVB);
280 ppc_store_msr(env, msr);
282 hreg_compute_mem_idx(env);
284 return 0;
287 static bool fpu_needed(void *opaque)
289 PowerPCCPU *cpu = opaque;
291 return (cpu->env.insns_flags & PPC_FLOAT);
294 static const VMStateDescription vmstate_fpu = {
295 .name = "cpu/fpu",
296 .version_id = 1,
297 .minimum_version_id = 1,
298 .needed = fpu_needed,
299 .fields = (VMStateField[]) {
300 VMSTATE_FLOAT64_ARRAY(env.fpr, PowerPCCPU, 32),
301 VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
302 VMSTATE_END_OF_LIST()
306 static bool altivec_needed(void *opaque)
308 PowerPCCPU *cpu = opaque;
310 return (cpu->env.insns_flags & PPC_ALTIVEC);
313 static const VMStateDescription vmstate_altivec = {
314 .name = "cpu/altivec",
315 .version_id = 1,
316 .minimum_version_id = 1,
317 .needed = altivec_needed,
318 .fields = (VMStateField[]) {
319 VMSTATE_AVR_ARRAY(env.avr, PowerPCCPU, 32),
320 VMSTATE_UINT32(env.vscr, PowerPCCPU),
321 VMSTATE_END_OF_LIST()
325 static bool vsx_needed(void *opaque)
327 PowerPCCPU *cpu = opaque;
329 return (cpu->env.insns_flags2 & PPC2_VSX);
332 static const VMStateDescription vmstate_vsx = {
333 .name = "cpu/vsx",
334 .version_id = 1,
335 .minimum_version_id = 1,
336 .needed = vsx_needed,
337 .fields = (VMStateField[]) {
338 VMSTATE_UINT64_ARRAY(env.vsr, PowerPCCPU, 32),
339 VMSTATE_END_OF_LIST()
343 #ifdef TARGET_PPC64
344 /* Transactional memory state */
345 static bool tm_needed(void *opaque)
347 PowerPCCPU *cpu = opaque;
348 CPUPPCState *env = &cpu->env;
349 return msr_ts;
352 static const VMStateDescription vmstate_tm = {
353 .name = "cpu/tm",
354 .version_id = 1,
355 .minimum_version_id = 1,
356 .minimum_version_id_old = 1,
357 .needed = tm_needed,
358 .fields = (VMStateField []) {
359 VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
360 VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
361 VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
362 VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
363 VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
364 VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
365 VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
366 VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
367 VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
368 VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
369 VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
370 VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
371 VMSTATE_END_OF_LIST()
374 #endif
376 static bool sr_needed(void *opaque)
378 #ifdef TARGET_PPC64
379 PowerPCCPU *cpu = opaque;
381 return !(cpu->env.mmu_model & POWERPC_MMU_64);
382 #else
383 return true;
384 #endif
387 static const VMStateDescription vmstate_sr = {
388 .name = "cpu/sr",
389 .version_id = 1,
390 .minimum_version_id = 1,
391 .needed = sr_needed,
392 .fields = (VMStateField[]) {
393 VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
394 VMSTATE_END_OF_LIST()
398 #ifdef TARGET_PPC64
399 static int get_slbe(QEMUFile *f, void *pv, size_t size, VMStateField *field)
401 ppc_slb_t *v = pv;
403 v->esid = qemu_get_be64(f);
404 v->vsid = qemu_get_be64(f);
406 return 0;
409 static int put_slbe(QEMUFile *f, void *pv, size_t size, VMStateField *field,
410 QJSON *vmdesc)
412 ppc_slb_t *v = pv;
414 qemu_put_be64(f, v->esid);
415 qemu_put_be64(f, v->vsid);
416 return 0;
419 static const VMStateInfo vmstate_info_slbe = {
420 .name = "slbe",
421 .get = get_slbe,
422 .put = put_slbe,
425 #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \
426 VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
428 #define VMSTATE_SLB_ARRAY(_f, _s, _n) \
429 VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
431 static bool slb_needed(void *opaque)
433 PowerPCCPU *cpu = opaque;
435 /* We don't support any of the old segment table based 64-bit CPUs */
436 return (cpu->env.mmu_model & POWERPC_MMU_64);
439 static int slb_post_load(void *opaque, int version_id)
441 PowerPCCPU *cpu = opaque;
442 CPUPPCState *env = &cpu->env;
443 int i;
445 /* We've pulled in the raw esid and vsid values from the migration
446 * stream, but we need to recompute the page size pointers */
447 for (i = 0; i < env->slb_nr; i++) {
448 if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
449 /* Migration source had bad values in its SLB */
450 return -1;
454 return 0;
457 static const VMStateDescription vmstate_slb = {
458 .name = "cpu/slb",
459 .version_id = 1,
460 .minimum_version_id = 1,
461 .needed = slb_needed,
462 .post_load = slb_post_load,
463 .fields = (VMStateField[]) {
464 VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU, NULL),
465 VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
466 VMSTATE_END_OF_LIST()
469 #endif /* TARGET_PPC64 */
471 static const VMStateDescription vmstate_tlb6xx_entry = {
472 .name = "cpu/tlb6xx_entry",
473 .version_id = 1,
474 .minimum_version_id = 1,
475 .fields = (VMStateField[]) {
476 VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
477 VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
478 VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
479 VMSTATE_END_OF_LIST()
483 static bool tlb6xx_needed(void *opaque)
485 PowerPCCPU *cpu = opaque;
486 CPUPPCState *env = &cpu->env;
488 return env->nb_tlb && (env->tlb_type == TLB_6XX);
491 static const VMStateDescription vmstate_tlb6xx = {
492 .name = "cpu/tlb6xx",
493 .version_id = 1,
494 .minimum_version_id = 1,
495 .needed = tlb6xx_needed,
496 .fields = (VMStateField[]) {
497 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
498 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
499 env.nb_tlb,
500 vmstate_tlb6xx_entry,
501 ppc6xx_tlb_t),
502 VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
503 VMSTATE_END_OF_LIST()
507 static const VMStateDescription vmstate_tlbemb_entry = {
508 .name = "cpu/tlbemb_entry",
509 .version_id = 1,
510 .minimum_version_id = 1,
511 .fields = (VMStateField[]) {
512 VMSTATE_UINT64(RPN, ppcemb_tlb_t),
513 VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
514 VMSTATE_UINTTL(PID, ppcemb_tlb_t),
515 VMSTATE_UINTTL(size, ppcemb_tlb_t),
516 VMSTATE_UINT32(prot, ppcemb_tlb_t),
517 VMSTATE_UINT32(attr, ppcemb_tlb_t),
518 VMSTATE_END_OF_LIST()
522 static bool tlbemb_needed(void *opaque)
524 PowerPCCPU *cpu = opaque;
525 CPUPPCState *env = &cpu->env;
527 return env->nb_tlb && (env->tlb_type == TLB_EMB);
530 static bool pbr403_needed(void *opaque)
532 PowerPCCPU *cpu = opaque;
533 uint32_t pvr = cpu->env.spr[SPR_PVR];
535 return (pvr & 0xffff0000) == 0x00200000;
538 static const VMStateDescription vmstate_pbr403 = {
539 .name = "cpu/pbr403",
540 .version_id = 1,
541 .minimum_version_id = 1,
542 .needed = pbr403_needed,
543 .fields = (VMStateField[]) {
544 VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4),
545 VMSTATE_END_OF_LIST()
549 static const VMStateDescription vmstate_tlbemb = {
550 .name = "cpu/tlb6xx",
551 .version_id = 1,
552 .minimum_version_id = 1,
553 .needed = tlbemb_needed,
554 .fields = (VMStateField[]) {
555 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
556 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
557 env.nb_tlb,
558 vmstate_tlbemb_entry,
559 ppcemb_tlb_t),
560 /* 403 protection registers */
561 VMSTATE_END_OF_LIST()
563 .subsections = (const VMStateDescription*[]) {
564 &vmstate_pbr403,
565 NULL
569 static const VMStateDescription vmstate_tlbmas_entry = {
570 .name = "cpu/tlbmas_entry",
571 .version_id = 1,
572 .minimum_version_id = 1,
573 .fields = (VMStateField[]) {
574 VMSTATE_UINT32(mas8, ppcmas_tlb_t),
575 VMSTATE_UINT32(mas1, ppcmas_tlb_t),
576 VMSTATE_UINT64(mas2, ppcmas_tlb_t),
577 VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
578 VMSTATE_END_OF_LIST()
582 static bool tlbmas_needed(void *opaque)
584 PowerPCCPU *cpu = opaque;
585 CPUPPCState *env = &cpu->env;
587 return env->nb_tlb && (env->tlb_type == TLB_MAS);
590 static const VMStateDescription vmstate_tlbmas = {
591 .name = "cpu/tlbmas",
592 .version_id = 1,
593 .minimum_version_id = 1,
594 .needed = tlbmas_needed,
595 .fields = (VMStateField[]) {
596 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
597 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
598 env.nb_tlb,
599 vmstate_tlbmas_entry,
600 ppcmas_tlb_t),
601 VMSTATE_END_OF_LIST()
605 static bool compat_needed(void *opaque)
607 PowerPCCPU *cpu = opaque;
609 assert(!(cpu->compat_pvr && !cpu->vhyp));
610 return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
613 static const VMStateDescription vmstate_compat = {
614 .name = "cpu/compat",
615 .version_id = 1,
616 .minimum_version_id = 1,
617 .needed = compat_needed,
618 .fields = (VMStateField[]) {
619 VMSTATE_UINT32(compat_pvr, PowerPCCPU),
620 VMSTATE_END_OF_LIST()
624 const VMStateDescription vmstate_ppc_cpu = {
625 .name = "cpu",
626 .version_id = 5,
627 .minimum_version_id = 5,
628 .minimum_version_id_old = 4,
629 .load_state_old = cpu_load_old,
630 .pre_save = cpu_pre_save,
631 .post_load = cpu_post_load,
632 .fields = (VMStateField[]) {
633 VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
635 /* User mode architected state */
636 VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
637 #if !defined(TARGET_PPC64)
638 VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
639 #endif
640 VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
641 VMSTATE_UINTTL(env.nip, PowerPCCPU),
643 /* SPRs */
644 VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
645 VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
647 /* Reservation */
648 VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
650 /* Supervisor mode architected state */
651 VMSTATE_UINTTL(env.msr, PowerPCCPU),
653 /* Internal state */
654 VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU),
655 /* FIXME: access_type? */
657 /* Sanity checking */
658 VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
659 VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
660 VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
661 cpu_pre_2_8_migration),
662 VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
663 VMSTATE_END_OF_LIST()
665 .subsections = (const VMStateDescription*[]) {
666 &vmstate_fpu,
667 &vmstate_altivec,
668 &vmstate_vsx,
669 &vmstate_sr,
670 #ifdef TARGET_PPC64
671 &vmstate_tm,
672 &vmstate_slb,
673 #endif /* TARGET_PPC64 */
674 &vmstate_tlb6xx,
675 &vmstate_tlbemb,
676 &vmstate_tlbmas,
677 &vmstate_compat,
678 NULL