ARM host fixes
[qemu/aliguori-queue.git] / target-i386 / machine.c
blob8bf13cce832b05eacfd09438aa487006e3be544b
1 #include "hw/hw.h"
2 #include "hw/boards.h"
3 #include "hw/pc.h"
4 #include "hw/isa.h"
5 #include "host-utils.h"
7 #include "exec-all.h"
8 #include "kvm.h"
10 static void cpu_put_seg(QEMUFile *f, SegmentCache *dt)
12 qemu_put_be32(f, dt->selector);
13 qemu_put_betl(f, dt->base);
14 qemu_put_be32(f, dt->limit);
15 qemu_put_be32(f, dt->flags);
18 static void cpu_get_seg(QEMUFile *f, SegmentCache *dt)
20 dt->selector = qemu_get_be32(f);
21 dt->base = qemu_get_betl(f);
22 dt->limit = qemu_get_be32(f);
23 dt->flags = qemu_get_be32(f);
26 void cpu_save(QEMUFile *f, void *opaque)
28 CPUState *env = opaque;
29 uint16_t fptag, fpus, fpuc, fpregs_format;
30 uint32_t hflags;
31 int32_t a20_mask;
32 int32_t pending_irq;
33 int i, bit;
35 cpu_synchronize_state(env, 0);
37 for(i = 0; i < CPU_NB_REGS; i++)
38 qemu_put_betls(f, &env->regs[i]);
39 qemu_put_betls(f, &env->eip);
40 qemu_put_betls(f, &env->eflags);
41 hflags = env->hflags; /* XXX: suppress most of the redundant hflags */
42 qemu_put_be32s(f, &hflags);
44 /* FPU */
45 fpuc = env->fpuc;
46 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
47 fptag = 0;
48 for(i = 0; i < 8; i++) {
49 fptag |= ((!env->fptags[i]) << i);
52 qemu_put_be16s(f, &fpuc);
53 qemu_put_be16s(f, &fpus);
54 qemu_put_be16s(f, &fptag);
56 #ifdef USE_X86LDOUBLE
57 fpregs_format = 0;
58 #else
59 fpregs_format = 1;
60 #endif
61 qemu_put_be16s(f, &fpregs_format);
63 for(i = 0; i < 8; i++) {
64 #ifdef USE_X86LDOUBLE
66 uint64_t mant;
67 uint16_t exp;
68 /* we save the real CPU data (in case of MMX usage only 'mant'
69 contains the MMX register */
70 cpu_get_fp80(&mant, &exp, env->fpregs[i].d);
71 qemu_put_be64(f, mant);
72 qemu_put_be16(f, exp);
74 #else
75 /* if we use doubles for float emulation, we save the doubles to
76 avoid losing information in case of MMX usage. It can give
77 problems if the image is restored on a CPU where long
78 doubles are used instead. */
79 qemu_put_be64(f, env->fpregs[i].mmx.MMX_Q(0));
80 #endif
83 for(i = 0; i < 6; i++)
84 cpu_put_seg(f, &env->segs[i]);
85 cpu_put_seg(f, &env->ldt);
86 cpu_put_seg(f, &env->tr);
87 cpu_put_seg(f, &env->gdt);
88 cpu_put_seg(f, &env->idt);
90 qemu_put_be32s(f, &env->sysenter_cs);
91 qemu_put_betls(f, &env->sysenter_esp);
92 qemu_put_betls(f, &env->sysenter_eip);
94 qemu_put_betls(f, &env->cr[0]);
95 qemu_put_betls(f, &env->cr[2]);
96 qemu_put_betls(f, &env->cr[3]);
97 qemu_put_betls(f, &env->cr[4]);
99 for(i = 0; i < 8; i++)
100 qemu_put_betls(f, &env->dr[i]);
102 /* MMU */
103 a20_mask = (int32_t) env->a20_mask;
104 qemu_put_sbe32s(f, &a20_mask);
106 /* XMM */
107 qemu_put_be32s(f, &env->mxcsr);
108 for(i = 0; i < CPU_NB_REGS; i++) {
109 qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(0));
110 qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(1));
113 #ifdef TARGET_X86_64
114 qemu_put_be64s(f, &env->efer);
115 qemu_put_be64s(f, &env->star);
116 qemu_put_be64s(f, &env->lstar);
117 qemu_put_be64s(f, &env->cstar);
118 qemu_put_be64s(f, &env->fmask);
119 qemu_put_be64s(f, &env->kernelgsbase);
120 #endif
121 qemu_put_be32s(f, &env->smbase);
123 qemu_put_be64s(f, &env->pat);
124 qemu_put_be32s(f, &env->hflags2);
126 qemu_put_be64s(f, &env->vm_hsave);
127 qemu_put_be64s(f, &env->vm_vmcb);
128 qemu_put_be64s(f, &env->tsc_offset);
129 qemu_put_be64s(f, &env->intercept);
130 qemu_put_be16s(f, &env->intercept_cr_read);
131 qemu_put_be16s(f, &env->intercept_cr_write);
132 qemu_put_be16s(f, &env->intercept_dr_read);
133 qemu_put_be16s(f, &env->intercept_dr_write);
134 qemu_put_be32s(f, &env->intercept_exceptions);
135 qemu_put_8s(f, &env->v_tpr);
137 /* MTRRs */
138 for(i = 0; i < 11; i++)
139 qemu_put_be64s(f, &env->mtrr_fixed[i]);
140 qemu_put_be64s(f, &env->mtrr_deftype);
141 for(i = 0; i < 8; i++) {
142 qemu_put_be64s(f, &env->mtrr_var[i].base);
143 qemu_put_be64s(f, &env->mtrr_var[i].mask);
146 /* KVM-related states */
148 /* There can only be one pending IRQ set in the bitmap at a time, so try
149 to find it and save its number instead (-1 for none). */
150 pending_irq = -1;
151 for (i = 0; i < ARRAY_SIZE(env->interrupt_bitmap); i++) {
152 if (env->interrupt_bitmap[i]) {
153 bit = ctz64(env->interrupt_bitmap[i]);
154 pending_irq = i * 64 + bit;
155 break;
158 qemu_put_sbe32s(f, &pending_irq);
159 qemu_put_be32s(f, &env->mp_state);
160 qemu_put_be64s(f, &env->tsc);
162 /* MCE */
163 qemu_put_be64s(f, &env->mcg_cap);
164 if (env->mcg_cap) {
165 qemu_put_be64s(f, &env->mcg_status);
166 qemu_put_be64s(f, &env->mcg_ctl);
167 for (i = 0; i < (env->mcg_cap & 0xff); i++) {
168 qemu_put_be64s(f, &env->mce_banks[4*i]);
169 qemu_put_be64s(f, &env->mce_banks[4*i + 1]);
170 qemu_put_be64s(f, &env->mce_banks[4*i + 2]);
171 qemu_put_be64s(f, &env->mce_banks[4*i + 3]);
176 #ifdef USE_X86LDOUBLE
177 /* XXX: add that in a FPU generic layer */
178 union x86_longdouble {
179 uint64_t mant;
180 uint16_t exp;
183 #define MANTD1(fp) (fp & ((1LL << 52) - 1))
184 #define EXPBIAS1 1023
185 #define EXPD1(fp) ((fp >> 52) & 0x7FF)
186 #define SIGND1(fp) ((fp >> 32) & 0x80000000)
188 static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
190 int e;
191 /* mantissa */
192 p->mant = (MANTD1(temp) << 11) | (1LL << 63);
193 /* exponent + sign */
194 e = EXPD1(temp) - EXPBIAS1 + 16383;
195 e |= SIGND1(temp) >> 16;
196 p->exp = e;
198 #endif
200 int cpu_load(QEMUFile *f, void *opaque, int version_id)
202 CPUState *env = opaque;
203 int i, guess_mmx;
204 uint32_t hflags;
205 uint16_t fpus, fpuc, fptag, fpregs_format;
206 int32_t a20_mask;
207 int32_t pending_irq;
209 if (version_id < 3 || version_id > CPU_SAVE_VERSION)
210 return -EINVAL;
211 for(i = 0; i < CPU_NB_REGS; i++)
212 qemu_get_betls(f, &env->regs[i]);
213 qemu_get_betls(f, &env->eip);
214 qemu_get_betls(f, &env->eflags);
215 qemu_get_be32s(f, &hflags);
217 qemu_get_be16s(f, &fpuc);
218 qemu_get_be16s(f, &fpus);
219 qemu_get_be16s(f, &fptag);
220 qemu_get_be16s(f, &fpregs_format);
222 /* NOTE: we cannot always restore the FPU state if the image come
223 from a host with a different 'USE_X86LDOUBLE' define. We guess
224 if we are in an MMX state to restore correctly in that case. */
225 guess_mmx = ((fptag == 0xff) && (fpus & 0x3800) == 0);
226 for(i = 0; i < 8; i++) {
227 uint64_t mant;
228 uint16_t exp;
230 switch(fpregs_format) {
231 case 0:
232 mant = qemu_get_be64(f);
233 exp = qemu_get_be16(f);
234 #ifdef USE_X86LDOUBLE
235 env->fpregs[i].d = cpu_set_fp80(mant, exp);
236 #else
237 /* difficult case */
238 if (guess_mmx)
239 env->fpregs[i].mmx.MMX_Q(0) = mant;
240 else
241 env->fpregs[i].d = cpu_set_fp80(mant, exp);
242 #endif
243 break;
244 case 1:
245 mant = qemu_get_be64(f);
246 #ifdef USE_X86LDOUBLE
248 union x86_longdouble *p;
249 /* difficult case */
250 p = (void *)&env->fpregs[i];
251 if (guess_mmx) {
252 p->mant = mant;
253 p->exp = 0xffff;
254 } else {
255 fp64_to_fp80(p, mant);
258 #else
259 env->fpregs[i].mmx.MMX_Q(0) = mant;
260 #endif
261 break;
262 default:
263 return -EINVAL;
267 env->fpuc = fpuc;
268 /* XXX: restore FPU round state */
269 env->fpstt = (fpus >> 11) & 7;
270 env->fpus = fpus & ~0x3800;
271 fptag ^= 0xff;
272 for(i = 0; i < 8; i++) {
273 env->fptags[i] = (fptag >> i) & 1;
276 for(i = 0; i < 6; i++)
277 cpu_get_seg(f, &env->segs[i]);
278 cpu_get_seg(f, &env->ldt);
279 cpu_get_seg(f, &env->tr);
280 cpu_get_seg(f, &env->gdt);
281 cpu_get_seg(f, &env->idt);
283 qemu_get_be32s(f, &env->sysenter_cs);
284 if (version_id >= 7) {
285 qemu_get_betls(f, &env->sysenter_esp);
286 qemu_get_betls(f, &env->sysenter_eip);
287 } else {
288 env->sysenter_esp = qemu_get_be32(f);
289 env->sysenter_eip = qemu_get_be32(f);
292 qemu_get_betls(f, &env->cr[0]);
293 qemu_get_betls(f, &env->cr[2]);
294 qemu_get_betls(f, &env->cr[3]);
295 qemu_get_betls(f, &env->cr[4]);
297 for(i = 0; i < 8; i++)
298 qemu_get_betls(f, &env->dr[i]);
299 cpu_breakpoint_remove_all(env, BP_CPU);
300 cpu_watchpoint_remove_all(env, BP_CPU);
301 for (i = 0; i < 4; i++)
302 hw_breakpoint_insert(env, i);
304 /* MMU */
305 qemu_get_sbe32s(f, &a20_mask);
306 env->a20_mask = a20_mask;
308 qemu_get_be32s(f, &env->mxcsr);
309 for(i = 0; i < CPU_NB_REGS; i++) {
310 qemu_get_be64s(f, &env->xmm_regs[i].XMM_Q(0));
311 qemu_get_be64s(f, &env->xmm_regs[i].XMM_Q(1));
314 #ifdef TARGET_X86_64
315 qemu_get_be64s(f, &env->efer);
316 qemu_get_be64s(f, &env->star);
317 qemu_get_be64s(f, &env->lstar);
318 qemu_get_be64s(f, &env->cstar);
319 qemu_get_be64s(f, &env->fmask);
320 qemu_get_be64s(f, &env->kernelgsbase);
321 #endif
322 if (version_id >= 4) {
323 qemu_get_be32s(f, &env->smbase);
325 if (version_id >= 5) {
326 qemu_get_be64s(f, &env->pat);
327 qemu_get_be32s(f, &env->hflags2);
328 if (version_id < 6)
329 qemu_get_be32s(f, &env->halted);
331 qemu_get_be64s(f, &env->vm_hsave);
332 qemu_get_be64s(f, &env->vm_vmcb);
333 qemu_get_be64s(f, &env->tsc_offset);
334 qemu_get_be64s(f, &env->intercept);
335 qemu_get_be16s(f, &env->intercept_cr_read);
336 qemu_get_be16s(f, &env->intercept_cr_write);
337 qemu_get_be16s(f, &env->intercept_dr_read);
338 qemu_get_be16s(f, &env->intercept_dr_write);
339 qemu_get_be32s(f, &env->intercept_exceptions);
340 qemu_get_8s(f, &env->v_tpr);
343 if (version_id >= 8) {
344 /* MTRRs */
345 for(i = 0; i < 11; i++)
346 qemu_get_be64s(f, &env->mtrr_fixed[i]);
347 qemu_get_be64s(f, &env->mtrr_deftype);
348 for(i = 0; i < 8; i++) {
349 qemu_get_be64s(f, &env->mtrr_var[i].base);
350 qemu_get_be64s(f, &env->mtrr_var[i].mask);
354 if (version_id >= 9) {
355 qemu_get_sbe32s(f, &pending_irq);
356 memset(&env->interrupt_bitmap, 0, sizeof(env->interrupt_bitmap));
357 if (pending_irq >= 0) {
358 env->interrupt_bitmap[pending_irq / 64] |=
359 (uint64_t)1 << (pending_irq % 64);
361 qemu_get_be32s(f, &env->mp_state);
362 qemu_get_be64s(f, &env->tsc);
365 if (version_id >= 10) {
366 qemu_get_be64s(f, &env->mcg_cap);
367 if (env->mcg_cap) {
368 qemu_get_be64s(f, &env->mcg_status);
369 qemu_get_be64s(f, &env->mcg_ctl);
370 for (i = 0; i < (env->mcg_cap & 0xff); i++) {
371 qemu_get_be64s(f, &env->mce_banks[4*i]);
372 qemu_get_be64s(f, &env->mce_banks[4*i + 1]);
373 qemu_get_be64s(f, &env->mce_banks[4*i + 2]);
374 qemu_get_be64s(f, &env->mce_banks[4*i + 3]);
379 /* XXX: ensure compatiblity for halted bit ? */
380 /* XXX: compute redundant hflags bits */
381 env->hflags = hflags;
382 tlb_flush(env, 1);
383 cpu_synchronize_state(env, 1);
384 return 0;