cutils: refine strtol error handling in parse_debug_env
[qemu/ar7.git] / target-arm / machine.c
blob9446e5a8ab84e4506f11c7479b95734f26d40483
1 #include "hw/hw.h"
2 #include "hw/boards.h"
3 #include "sysemu/kvm.h"
4 #include "kvm_arm.h"
5 #include "internals.h"
7 static bool vfp_needed(void *opaque)
9 ARMCPU *cpu = opaque;
10 CPUARMState *env = &cpu->env;
12 return arm_feature(env, ARM_FEATURE_VFP);
15 static int get_fpscr(QEMUFile *f, void *opaque, size_t size)
17 ARMCPU *cpu = opaque;
18 CPUARMState *env = &cpu->env;
19 uint32_t val = qemu_get_be32(f);
21 vfp_set_fpscr(env, val);
22 return 0;
25 static void put_fpscr(QEMUFile *f, void *opaque, size_t size)
27 ARMCPU *cpu = opaque;
28 CPUARMState *env = &cpu->env;
30 qemu_put_be32(f, vfp_get_fpscr(env));
33 static const VMStateInfo vmstate_fpscr = {
34 .name = "fpscr",
35 .get = get_fpscr,
36 .put = put_fpscr,
39 static const VMStateDescription vmstate_vfp = {
40 .name = "cpu/vfp",
41 .version_id = 3,
42 .minimum_version_id = 3,
43 .fields = (VMStateField[]) {
44 VMSTATE_FLOAT64_ARRAY(env.vfp.regs, ARMCPU, 64),
45 /* The xregs array is a little awkward because element 1 (FPSCR)
46 * requires a specific accessor, so we have to split it up in
47 * the vmstate:
49 VMSTATE_UINT32(env.vfp.xregs[0], ARMCPU),
50 VMSTATE_UINT32_SUB_ARRAY(env.vfp.xregs, ARMCPU, 2, 14),
52 .name = "fpscr",
53 .version_id = 0,
54 .size = sizeof(uint32_t),
55 .info = &vmstate_fpscr,
56 .flags = VMS_SINGLE,
57 .offset = 0,
59 VMSTATE_END_OF_LIST()
63 static bool iwmmxt_needed(void *opaque)
65 ARMCPU *cpu = opaque;
66 CPUARMState *env = &cpu->env;
68 return arm_feature(env, ARM_FEATURE_IWMMXT);
71 static const VMStateDescription vmstate_iwmmxt = {
72 .name = "cpu/iwmmxt",
73 .version_id = 1,
74 .minimum_version_id = 1,
75 .fields = (VMStateField[]) {
76 VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16),
77 VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16),
78 VMSTATE_END_OF_LIST()
82 static bool m_needed(void *opaque)
84 ARMCPU *cpu = opaque;
85 CPUARMState *env = &cpu->env;
87 return arm_feature(env, ARM_FEATURE_M);
90 static const VMStateDescription vmstate_m = {
91 .name = "cpu/m",
92 .version_id = 1,
93 .minimum_version_id = 1,
94 .fields = (VMStateField[]) {
95 VMSTATE_UINT32(env.v7m.other_sp, ARMCPU),
96 VMSTATE_UINT32(env.v7m.vecbase, ARMCPU),
97 VMSTATE_UINT32(env.v7m.basepri, ARMCPU),
98 VMSTATE_UINT32(env.v7m.control, ARMCPU),
99 VMSTATE_INT32(env.v7m.current_sp, ARMCPU),
100 VMSTATE_INT32(env.v7m.exception, ARMCPU),
101 VMSTATE_END_OF_LIST()
105 static bool thumb2ee_needed(void *opaque)
107 ARMCPU *cpu = opaque;
108 CPUARMState *env = &cpu->env;
110 return arm_feature(env, ARM_FEATURE_THUMB2EE);
113 static const VMStateDescription vmstate_thumb2ee = {
114 .name = "cpu/thumb2ee",
115 .version_id = 1,
116 .minimum_version_id = 1,
117 .fields = (VMStateField[]) {
118 VMSTATE_UINT32(env.teecr, ARMCPU),
119 VMSTATE_UINT32(env.teehbr, ARMCPU),
120 VMSTATE_END_OF_LIST()
124 static int get_cpsr(QEMUFile *f, void *opaque, size_t size)
126 ARMCPU *cpu = opaque;
127 CPUARMState *env = &cpu->env;
128 uint32_t val = qemu_get_be32(f);
130 env->aarch64 = ((val & PSTATE_nRW) == 0);
132 if (is_a64(env)) {
133 pstate_write(env, val);
134 return 0;
137 /* Avoid mode switch when restoring CPSR */
138 env->uncached_cpsr = val & CPSR_M;
139 cpsr_write(env, val, 0xffffffff);
140 return 0;
143 static void put_cpsr(QEMUFile *f, void *opaque, size_t size)
145 ARMCPU *cpu = opaque;
146 CPUARMState *env = &cpu->env;
147 uint32_t val;
149 if (is_a64(env)) {
150 val = pstate_read(env);
151 } else {
152 val = cpsr_read(env);
155 qemu_put_be32(f, val);
158 static const VMStateInfo vmstate_cpsr = {
159 .name = "cpsr",
160 .get = get_cpsr,
161 .put = put_cpsr,
164 static void cpu_pre_save(void *opaque)
166 ARMCPU *cpu = opaque;
168 if (kvm_enabled()) {
169 if (!write_kvmstate_to_list(cpu)) {
170 /* This should never fail */
171 abort();
173 } else {
174 if (!write_cpustate_to_list(cpu)) {
175 /* This should never fail. */
176 abort();
180 cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len;
181 memcpy(cpu->cpreg_vmstate_indexes, cpu->cpreg_indexes,
182 cpu->cpreg_array_len * sizeof(uint64_t));
183 memcpy(cpu->cpreg_vmstate_values, cpu->cpreg_values,
184 cpu->cpreg_array_len * sizeof(uint64_t));
187 static int cpu_post_load(void *opaque, int version_id)
189 ARMCPU *cpu = opaque;
190 int i, v;
192 /* Update the values list from the incoming migration data.
193 * Anything in the incoming data which we don't know about is
194 * a migration failure; anything we know about but the incoming
195 * data doesn't specify retains its current (reset) value.
196 * The indexes list remains untouched -- we only inspect the
197 * incoming migration index list so we can match the values array
198 * entries with the right slots in our own values array.
201 for (i = 0, v = 0; i < cpu->cpreg_array_len
202 && v < cpu->cpreg_vmstate_array_len; i++) {
203 if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) {
204 /* register in our list but not incoming : skip it */
205 continue;
207 if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) {
208 /* register in their list but not ours: fail migration */
209 return -1;
211 /* matching register, copy the value over */
212 cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v];
213 v++;
216 if (kvm_enabled()) {
217 if (!write_list_to_kvmstate(cpu)) {
218 return -1;
220 /* Note that it's OK for the TCG side not to know about
221 * every register in the list; KVM is authoritative if
222 * we're using it.
224 write_list_to_cpustate(cpu);
225 } else {
226 if (!write_list_to_cpustate(cpu)) {
227 return -1;
231 hw_breakpoint_update_all(cpu);
232 hw_watchpoint_update_all(cpu);
234 return 0;
237 const VMStateDescription vmstate_arm_cpu = {
238 .name = "cpu",
239 .version_id = 22,
240 .minimum_version_id = 22,
241 .pre_save = cpu_pre_save,
242 .post_load = cpu_post_load,
243 .fields = (VMStateField[]) {
244 VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16),
245 VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32),
246 VMSTATE_UINT64(env.pc, ARMCPU),
248 .name = "cpsr",
249 .version_id = 0,
250 .size = sizeof(uint32_t),
251 .info = &vmstate_cpsr,
252 .flags = VMS_SINGLE,
253 .offset = 0,
255 VMSTATE_UINT32(env.spsr, ARMCPU),
256 VMSTATE_UINT64_ARRAY(env.banked_spsr, ARMCPU, 8),
257 VMSTATE_UINT32_ARRAY(env.banked_r13, ARMCPU, 8),
258 VMSTATE_UINT32_ARRAY(env.banked_r14, ARMCPU, 8),
259 VMSTATE_UINT32_ARRAY(env.usr_regs, ARMCPU, 5),
260 VMSTATE_UINT32_ARRAY(env.fiq_regs, ARMCPU, 5),
261 VMSTATE_UINT64_ARRAY(env.elr_el, ARMCPU, 4),
262 VMSTATE_UINT64_ARRAY(env.sp_el, ARMCPU, 4),
263 /* The length-check must come before the arrays to avoid
264 * incoming data possibly overflowing the array.
266 VMSTATE_INT32_POSITIVE_LE(cpreg_vmstate_array_len, ARMCPU),
267 VMSTATE_VARRAY_INT32(cpreg_vmstate_indexes, ARMCPU,
268 cpreg_vmstate_array_len,
269 0, vmstate_info_uint64, uint64_t),
270 VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
271 cpreg_vmstate_array_len,
272 0, vmstate_info_uint64, uint64_t),
273 VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
274 VMSTATE_UINT64(env.exclusive_val, ARMCPU),
275 VMSTATE_UINT64(env.exclusive_high, ARMCPU),
276 VMSTATE_UINT64(env.features, ARMCPU),
277 VMSTATE_UINT32(env.exception.syndrome, ARMCPU),
278 VMSTATE_UINT32(env.exception.fsr, ARMCPU),
279 VMSTATE_UINT64(env.exception.vaddress, ARMCPU),
280 VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU),
281 VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU),
282 VMSTATE_BOOL(powered_off, ARMCPU),
283 VMSTATE_END_OF_LIST()
285 .subsections = (VMStateSubsection[]) {
287 .vmsd = &vmstate_vfp,
288 .needed = vfp_needed,
289 } , {
290 .vmsd = &vmstate_iwmmxt,
291 .needed = iwmmxt_needed,
292 } , {
293 .vmsd = &vmstate_m,
294 .needed = m_needed,
295 } , {
296 .vmsd = &vmstate_thumb2ee,
297 .needed = thumb2ee_needed,
298 } , {
299 /* empty */