block/archipelago: Add support for creating images
[qemu/qmp-unstable.git] / target-arm / machine.c
blob3bcc7cc833e0735974ec36676b9f74cbf7988daa
1 #include "hw/hw.h"
2 #include "hw/boards.h"
3 #include "sysemu/kvm.h"
4 #include "kvm_arm.h"
6 static bool vfp_needed(void *opaque)
8 ARMCPU *cpu = opaque;
9 CPUARMState *env = &cpu->env;
11 return arm_feature(env, ARM_FEATURE_VFP);
14 static int get_fpscr(QEMUFile *f, void *opaque, size_t size)
16 ARMCPU *cpu = opaque;
17 CPUARMState *env = &cpu->env;
18 uint32_t val = qemu_get_be32(f);
20 vfp_set_fpscr(env, val);
21 return 0;
24 static void put_fpscr(QEMUFile *f, void *opaque, size_t size)
26 ARMCPU *cpu = opaque;
27 CPUARMState *env = &cpu->env;
29 qemu_put_be32(f, vfp_get_fpscr(env));
32 static const VMStateInfo vmstate_fpscr = {
33 .name = "fpscr",
34 .get = get_fpscr,
35 .put = put_fpscr,
38 static const VMStateDescription vmstate_vfp = {
39 .name = "cpu/vfp",
40 .version_id = 3,
41 .minimum_version_id = 3,
42 .fields = (VMStateField[]) {
43 VMSTATE_FLOAT64_ARRAY(env.vfp.regs, ARMCPU, 64),
44 /* The xregs array is a little awkward because element 1 (FPSCR)
45 * requires a specific accessor, so we have to split it up in
46 * the vmstate:
48 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
49 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
51 .name = "fpscr",
52 .version_id = 0,
53 .size = sizeof(uint32_t),
54 .info = &vmstate_fpscr,
55 .flags = VMS_SINGLE,
56 .offset = 0,
58 VMSTATE_END_OF_LIST()
62 static bool iwmmxt_needed(void *opaque)
64 ARMCPU *cpu = opaque;
65 CPUARMState *env = &cpu->env;
67 return arm_feature(env, ARM_FEATURE_IWMMXT);
70 static const VMStateDescription vmstate_iwmmxt = {
71 .name = "cpu/iwmmxt",
72 .version_id = 1,
73 .minimum_version_id = 1,
74 .fields = (VMStateField[]) {
75 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
76 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
77 VMSTATE_END_OF_LIST()
81 static bool m_needed(void *opaque)
83 ARMCPU *cpu = opaque;
84 CPUARMState *env = &cpu->env;
86 return arm_feature(env, ARM_FEATURE_M);
89 static const VMStateDescription vmstate_m = {
90 .name = "cpu/m",
91 .version_id = 1,
92 .minimum_version_id = 1,
93 .fields = (VMStateField[]) {
94 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
95 VMSTATE_UINT32(env.v7m.vecbase, ARMCPU),
96 VMSTATE_UINT32(env.v7m.basepri, ARMCPU),
97 VMSTATE_UINT32(env.v7m.control, ARMCPU),
98 VMSTATE_INT32(env.v7m.current_sp, ARMCPU),
99 VMSTATE_INT32(env.v7m.exception, ARMCPU),
100 VMSTATE_END_OF_LIST()
104 static bool thumb2ee_needed(void *opaque)
106 ARMCPU *cpu = opaque;
107 CPUARMState *env = &cpu->env;
109 return arm_feature(env, ARM_FEATURE_THUMB2EE);
112 static const VMStateDescription vmstate_thumb2ee = {
113 .name = "cpu/thumb2ee",
114 .version_id = 1,
115 .minimum_version_id = 1,
116 .fields = (VMStateField[]) {
117 VMSTATE_UINT32(env.teecr, ARMCPU),
118 VMSTATE_UINT32(env.teehbr, ARMCPU),
119 VMSTATE_END_OF_LIST()
123 static int get_cpsr(QEMUFile *f, void *opaque, size_t size)
125 ARMCPU *cpu = opaque;
126 CPUARMState *env = &cpu->env;
127 uint32_t val = qemu_get_be32(f);
129 /* Avoid mode switch when restoring CPSR */
130 env->uncached_cpsr = val & CPSR_M;
131 cpsr_write(env, val, 0xffffffff);
132 return 0;
135 static void put_cpsr(QEMUFile *f, void *opaque, size_t size)
137 ARMCPU *cpu = opaque;
138 CPUARMState *env = &cpu->env;
140 qemu_put_be32(f, cpsr_read(env));
143 static const VMStateInfo vmstate_cpsr = {
144 .name = "cpsr",
145 .get = get_cpsr,
146 .put = put_cpsr,
149 static void cpu_pre_save(void *opaque)
151 ARMCPU *cpu = opaque;
153 if (kvm_enabled()) {
154 if (!write_kvmstate_to_list(cpu)) {
155 /* This should never fail */
156 abort();
158 } else {
159 if (!write_cpustate_to_list(cpu)) {
160 /* This should never fail. */
161 abort();
165 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
166 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
167 cpu->cpreg_array_len * sizeof(uint64_t));
168 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
169 cpu->cpreg_array_len * sizeof(uint64_t));
172 static int cpu_post_load(void *opaque, int version_id)
174 ARMCPU *cpu = opaque;
175 int i, v;
177 /* Update the values list from the incoming migration data.
178 * Anything in the incoming data which we don't know about is
179 * a migration failure; anything we know about but the incoming
180 * data doesn't specify retains its current (reset) value.
181 * The indexes list remains untouched -- we only inspect the
182 * incoming migration index list so we can match the values array
183 * entries with the right slots in our own values array.
186 for (i = 0, v = 0; i < cpu->cpreg_array_len
187 && v < cpu->cpreg_vmstate_array_len; i++) {
188 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
189 /* register in our list but not incoming : skip it */
190 continue;
192 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
193 /* register in their list but not ours: fail migration */
194 return -1;
196 /* matching register, copy the value over */
197 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
198 v++;
201 if (kvm_enabled()) {
202 if (!write_list_to_kvmstate(cpu)) {
203 return -1;
205 /* Note that it's OK for the TCG side not to know about
206 * every register in the list; KVM is authoritative if
207 * we're using it.
209 write_list_to_cpustate(cpu);
210 } else {
211 if (!write_list_to_cpustate(cpu)) {
212 return -1;
216 return 0;
219 const VMStateDescription vmstate_arm_cpu = {
220 .name = "cpu",
221 .version_id = 20,
222 .minimum_version_id = 20,
223 .pre_save = cpu_pre_save,
224 .post_load = cpu_post_load,
225 .fields = (VMStateField[]) {
226 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
228 .name = "cpsr",
229 .version_id = 0,
230 .size = sizeof(uint32_t),
231 .info = &vmstate_cpsr,
232 .flags = VMS_SINGLE,
233 .offset = 0,
235 VMSTATE_UINT32(env.spsr, ARMCPU),
236 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
237 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 6),
238 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 6),
239 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
240 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
241 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
242 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
243 /* The length-check must come before the arrays to avoid
244 * incoming data possibly overflowing the array.
246 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
247 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
248 cpreg_vmstate_array_len,
249 0, vmstate_info_uint64, uint64_t),
250 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
251 cpreg_vmstate_array_len,
252 0, vmstate_info_uint64, uint64_t),
253 VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
254 VMSTATE_UINT64(env.exclusive_val, ARMCPU),
255 VMSTATE_UINT64(env.exclusive_high, ARMCPU),
256 VMSTATE_UINT64(env.features, ARMCPU),
257 VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
258 VMSTATE_UINT32(env.exception.fsr, ARMCPU),
259 VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
260 VMSTATE_TIMER(gt_timer[GTIMER_PHYS], ARMCPU),
261 VMSTATE_TIMER(gt_timer[GTIMER_VIRT], ARMCPU),
262 VMSTATE_END_OF_LIST()
264 .subsections = (VMStateSubsection[]) {
266 .vmsd = &vmstate_vfp,
267 .needed = vfp_needed,
268 } , {
269 .vmsd = &vmstate_iwmmxt,
270 .needed = iwmmxt_needed,
271 } , {
272 .vmsd = &vmstate_m,
273 .needed = m_needed,
274 } , {
275 .vmsd = &vmstate_thumb2ee,
276 .needed = thumb2ee_needed,
277 } , {
278 /* empty */