Fix elf loader range checking
[qemu/hppa.git] / target-i386 / machine.c
blobbb8b9dbd5ae39f0815e9ee14f9e076b2d785bb1b
1 #include "hw/hw.h"
2 #include "hw/boards.h"
3 #include "hw/pc.h"
4 #include "hw/isa.h"
6 #include "exec-all.h"
7 #include "kvm.h"
9 static void cpu_put_seg(QEMUFile *f, SegmentCache *dt)
11 qemu_put_be32(f, dt->selector);
12 qemu_put_betl(f, dt->base);
13 qemu_put_be32(f, dt->limit);
14 qemu_put_be32(f, dt->flags);
17 static void cpu_get_seg(QEMUFile *f, SegmentCache *dt)
19 dt->selector = qemu_get_be32(f);
20 dt->base = qemu_get_betl(f);
21 dt->limit = qemu_get_be32(f);
22 dt->flags = qemu_get_be32(f);
25 void cpu_save(QEMUFile *f, void *opaque)
27 CPUState *env = opaque;
28 uint16_t fptag, fpus, fpuc, fpregs_format;
29 uint32_t hflags;
30 int32_t a20_mask;
31 int i;
33 cpu_synchronize_state(env, 0);
35 for(i = 0; i < CPU_NB_REGS; i++)
36 qemu_put_betls(f, &env->regs[i]);
37 qemu_put_betls(f, &env->eip);
38 qemu_put_betls(f, &env->eflags);
39 hflags = env->hflags; /* XXX: suppress most of the redundant hflags */
40 qemu_put_be32s(f, &hflags);
42 /* FPU */
43 fpuc = env->fpuc;
44 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
45 fptag = 0;
46 for(i = 0; i < 8; i++) {
47 fptag |= ((!env->fptags[i]) << i);
50 qemu_put_be16s(f, &fpuc);
51 qemu_put_be16s(f, &fpus);
52 qemu_put_be16s(f, &fptag);
54 #ifdef USE_X86LDOUBLE
55 fpregs_format = 0;
56 #else
57 fpregs_format = 1;
58 #endif
59 qemu_put_be16s(f, &fpregs_format);
61 for(i = 0; i < 8; i++) {
62 #ifdef USE_X86LDOUBLE
64 uint64_t mant;
65 uint16_t exp;
66 /* we save the real CPU data (in case of MMX usage only 'mant'
67 contains the MMX register */
68 cpu_get_fp80(&mant, &exp, env->fpregs[i].d);
69 qemu_put_be64(f, mant);
70 qemu_put_be16(f, exp);
72 #else
73 /* if we use doubles for float emulation, we save the doubles to
74 avoid losing information in case of MMX usage. It can give
75 problems if the image is restored on a CPU where long
76 doubles are used instead. */
77 qemu_put_be64(f, env->fpregs[i].mmx.MMX_Q(0));
78 #endif
81 for(i = 0; i < 6; i++)
82 cpu_put_seg(f, &env->segs[i]);
83 cpu_put_seg(f, &env->ldt);
84 cpu_put_seg(f, &env->tr);
85 cpu_put_seg(f, &env->gdt);
86 cpu_put_seg(f, &env->idt);
88 qemu_put_be32s(f, &env->sysenter_cs);
89 qemu_put_betls(f, &env->sysenter_esp);
90 qemu_put_betls(f, &env->sysenter_eip);
92 qemu_put_betls(f, &env->cr[0]);
93 qemu_put_betls(f, &env->cr[2]);
94 qemu_put_betls(f, &env->cr[3]);
95 qemu_put_betls(f, &env->cr[4]);
97 for(i = 0; i < 8; i++)
98 qemu_put_betls(f, &env->dr[i]);
100 /* MMU */
101 a20_mask = (int32_t) env->a20_mask;
102 qemu_put_sbe32s(f, &a20_mask);
104 /* XMM */
105 qemu_put_be32s(f, &env->mxcsr);
106 for(i = 0; i < CPU_NB_REGS; i++) {
107 qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(0));
108 qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(1));
111 #ifdef TARGET_X86_64
112 qemu_put_be64s(f, &env->efer);
113 qemu_put_be64s(f, &env->star);
114 qemu_put_be64s(f, &env->lstar);
115 qemu_put_be64s(f, &env->cstar);
116 qemu_put_be64s(f, &env->fmask);
117 qemu_put_be64s(f, &env->kernelgsbase);
118 #endif
119 qemu_put_be32s(f, &env->smbase);
121 qemu_put_be64s(f, &env->pat);
122 qemu_put_be32s(f, &env->hflags2);
124 qemu_put_be64s(f, &env->vm_hsave);
125 qemu_put_be64s(f, &env->vm_vmcb);
126 qemu_put_be64s(f, &env->tsc_offset);
127 qemu_put_be64s(f, &env->intercept);
128 qemu_put_be16s(f, &env->intercept_cr_read);
129 qemu_put_be16s(f, &env->intercept_cr_write);
130 qemu_put_be16s(f, &env->intercept_dr_read);
131 qemu_put_be16s(f, &env->intercept_dr_write);
132 qemu_put_be32s(f, &env->intercept_exceptions);
133 qemu_put_8s(f, &env->v_tpr);
135 /* MTRRs */
136 for(i = 0; i < 11; i++)
137 qemu_put_be64s(f, &env->mtrr_fixed[i]);
138 qemu_put_be64s(f, &env->mtrr_deftype);
139 for(i = 0; i < 8; i++) {
140 qemu_put_be64s(f, &env->mtrr_var[i].base);
141 qemu_put_be64s(f, &env->mtrr_var[i].mask);
144 for (i = 0; i < sizeof(env->interrupt_bitmap)/8; i++) {
145 qemu_put_be64s(f, &env->interrupt_bitmap[i]);
147 qemu_put_be64s(f, &env->tsc);
148 qemu_put_be32s(f, &env->mp_state);
151 #ifdef USE_X86LDOUBLE
152 /* XXX: add that in a FPU generic layer */
153 union x86_longdouble {
154 uint64_t mant;
155 uint16_t exp;
158 #define MANTD1(fp) (fp & ((1LL << 52) - 1))
159 #define EXPBIAS1 1023
160 #define EXPD1(fp) ((fp >> 52) & 0x7FF)
161 #define SIGND1(fp) ((fp >> 32) & 0x80000000)
163 static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
165 int e;
166 /* mantissa */
167 p->mant = (MANTD1(temp) << 11) | (1LL << 63);
168 /* exponent + sign */
169 e = EXPD1(temp) - EXPBIAS1 + 16383;
170 e |= SIGND1(temp) >> 16;
171 p->exp = e;
173 #endif
175 int cpu_load(QEMUFile *f, void *opaque, int version_id)
177 CPUState *env = opaque;
178 int i, guess_mmx;
179 uint32_t hflags;
180 uint16_t fpus, fpuc, fptag, fpregs_format;
181 int32_t a20_mask;
183 if (version_id < 3 || version_id > CPU_SAVE_VERSION)
184 return -EINVAL;
185 for(i = 0; i < CPU_NB_REGS; i++)
186 qemu_get_betls(f, &env->regs[i]);
187 qemu_get_betls(f, &env->eip);
188 qemu_get_betls(f, &env->eflags);
189 qemu_get_be32s(f, &hflags);
191 qemu_get_be16s(f, &fpuc);
192 qemu_get_be16s(f, &fpus);
193 qemu_get_be16s(f, &fptag);
194 qemu_get_be16s(f, &fpregs_format);
196 /* NOTE: we cannot always restore the FPU state if the image come
197 from a host with a different 'USE_X86LDOUBLE' define. We guess
198 if we are in an MMX state to restore correctly in that case. */
199 guess_mmx = ((fptag == 0xff) && (fpus & 0x3800) == 0);
200 for(i = 0; i < 8; i++) {
201 uint64_t mant;
202 uint16_t exp;
204 switch(fpregs_format) {
205 case 0:
206 mant = qemu_get_be64(f);
207 exp = qemu_get_be16(f);
208 #ifdef USE_X86LDOUBLE
209 env->fpregs[i].d = cpu_set_fp80(mant, exp);
210 #else
211 /* difficult case */
212 if (guess_mmx)
213 env->fpregs[i].mmx.MMX_Q(0) = mant;
214 else
215 env->fpregs[i].d = cpu_set_fp80(mant, exp);
216 #endif
217 break;
218 case 1:
219 mant = qemu_get_be64(f);
220 #ifdef USE_X86LDOUBLE
222 union x86_longdouble *p;
223 /* difficult case */
224 p = (void *)&env->fpregs[i];
225 if (guess_mmx) {
226 p->mant = mant;
227 p->exp = 0xffff;
228 } else {
229 fp64_to_fp80(p, mant);
232 #else
233 env->fpregs[i].mmx.MMX_Q(0) = mant;
234 #endif
235 break;
236 default:
237 return -EINVAL;
241 env->fpuc = fpuc;
242 /* XXX: restore FPU round state */
243 env->fpstt = (fpus >> 11) & 7;
244 env->fpus = fpus & ~0x3800;
245 fptag ^= 0xff;
246 for(i = 0; i < 8; i++) {
247 env->fptags[i] = (fptag >> i) & 1;
250 for(i = 0; i < 6; i++)
251 cpu_get_seg(f, &env->segs[i]);
252 cpu_get_seg(f, &env->ldt);
253 cpu_get_seg(f, &env->tr);
254 cpu_get_seg(f, &env->gdt);
255 cpu_get_seg(f, &env->idt);
257 qemu_get_be32s(f, &env->sysenter_cs);
258 if (version_id >= 7) {
259 qemu_get_betls(f, &env->sysenter_esp);
260 qemu_get_betls(f, &env->sysenter_eip);
261 } else {
262 env->sysenter_esp = qemu_get_be32(f);
263 env->sysenter_eip = qemu_get_be32(f);
266 qemu_get_betls(f, &env->cr[0]);
267 qemu_get_betls(f, &env->cr[2]);
268 qemu_get_betls(f, &env->cr[3]);
269 qemu_get_betls(f, &env->cr[4]);
271 for(i = 0; i < 8; i++)
272 qemu_get_betls(f, &env->dr[i]);
273 cpu_breakpoint_remove_all(env, BP_CPU);
274 cpu_watchpoint_remove_all(env, BP_CPU);
275 for (i = 0; i < 4; i++)
276 hw_breakpoint_insert(env, i);
278 /* MMU */
279 qemu_get_sbe32s(f, &a20_mask);
280 env->a20_mask = a20_mask;
282 qemu_get_be32s(f, &env->mxcsr);
283 for(i = 0; i < CPU_NB_REGS; i++) {
284 qemu_get_be64s(f, &env->xmm_regs[i].XMM_Q(0));
285 qemu_get_be64s(f, &env->xmm_regs[i].XMM_Q(1));
288 #ifdef TARGET_X86_64
289 qemu_get_be64s(f, &env->efer);
290 qemu_get_be64s(f, &env->star);
291 qemu_get_be64s(f, &env->lstar);
292 qemu_get_be64s(f, &env->cstar);
293 qemu_get_be64s(f, &env->fmask);
294 qemu_get_be64s(f, &env->kernelgsbase);
295 #endif
296 if (version_id >= 4) {
297 qemu_get_be32s(f, &env->smbase);
299 if (version_id >= 5) {
300 qemu_get_be64s(f, &env->pat);
301 qemu_get_be32s(f, &env->hflags2);
302 if (version_id < 6)
303 qemu_get_be32s(f, &env->halted);
305 qemu_get_be64s(f, &env->vm_hsave);
306 qemu_get_be64s(f, &env->vm_vmcb);
307 qemu_get_be64s(f, &env->tsc_offset);
308 qemu_get_be64s(f, &env->intercept);
309 qemu_get_be16s(f, &env->intercept_cr_read);
310 qemu_get_be16s(f, &env->intercept_cr_write);
311 qemu_get_be16s(f, &env->intercept_dr_read);
312 qemu_get_be16s(f, &env->intercept_dr_write);
313 qemu_get_be32s(f, &env->intercept_exceptions);
314 qemu_get_8s(f, &env->v_tpr);
317 if (version_id >= 8) {
318 /* MTRRs */
319 for(i = 0; i < 11; i++)
320 qemu_get_be64s(f, &env->mtrr_fixed[i]);
321 qemu_get_be64s(f, &env->mtrr_deftype);
322 for(i = 0; i < 8; i++) {
323 qemu_get_be64s(f, &env->mtrr_var[i].base);
324 qemu_get_be64s(f, &env->mtrr_var[i].mask);
327 if (version_id >= 9) {
328 for (i = 0; i < sizeof(env->interrupt_bitmap)/8; i++) {
329 qemu_get_be64s(f, &env->interrupt_bitmap[i]);
331 qemu_get_be64s(f, &env->tsc);
332 qemu_get_be32s(f, &env->mp_state);
335 /* XXX: ensure compatiblity for halted bit ? */
336 /* XXX: compute redundant hflags bits */
337 env->hflags = hflags;
338 tlb_flush(env, 1);
339 cpu_synchronize_state(env, 1);
340 return 0;