target-arm: Pass fp status pointer explicitly to neon fp helpers
[qemu.git] / target-i386 / helper.c
blob509d68ca0f7c84a0955e28289d60df6c246a93fc
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #include "cpu.h"
26 #include "exec-all.h"
27 #include "qemu-common.h"
28 #include "kvm.h"
29 #ifndef CONFIG_USER_ONLY
30 #include "sysemu.h"
31 #include "monitor.h"
32 #endif
34 //#define DEBUG_MMU
36 /* NOTE: must be called outside the CPU execute loop */
37 void cpu_reset(CPUX86State *env)
39 int i;
41 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
42 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
43 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
46 memset(env, 0, offsetof(CPUX86State, breakpoints));
48 tlb_flush(env, 1);
50 env->old_exception = -1;
52 /* init to reset state */
54 #ifdef CONFIG_SOFTMMU
55 env->hflags |= HF_SOFTMMU_MASK;
56 #endif
57 env->hflags2 |= HF2_GIF_MASK;
59 cpu_x86_update_cr0(env, 0x60000010);
60 env->a20_mask = ~0x0;
61 env->smbase = 0x30000;
63 env->idt.limit = 0xffff;
64 env->gdt.limit = 0xffff;
65 env->ldt.limit = 0xffff;
66 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
67 env->tr.limit = 0xffff;
68 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
70 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
71 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
72 DESC_R_MASK | DESC_A_MASK);
73 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
74 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
75 DESC_A_MASK);
76 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
77 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
78 DESC_A_MASK);
79 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
80 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
81 DESC_A_MASK);
82 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
83 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
84 DESC_A_MASK);
85 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
86 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
87 DESC_A_MASK);
89 env->eip = 0xfff0;
90 env->regs[R_EDX] = env->cpuid_version;
92 env->eflags = 0x2;
94 /* FPU init */
95 for(i = 0;i < 8; i++)
96 env->fptags[i] = 1;
97 env->fpuc = 0x37f;
99 env->mxcsr = 0x1f80;
101 env->pat = 0x0007040600070406ULL;
103 memset(env->dr, 0, sizeof(env->dr));
104 env->dr[6] = DR6_FIXED_1;
105 env->dr[7] = DR7_FIXED_1;
106 cpu_breakpoint_remove_all(env, BP_CPU);
107 cpu_watchpoint_remove_all(env, BP_CPU);
110 void cpu_x86_close(CPUX86State *env)
112 qemu_free(env);
115 static void cpu_x86_version(CPUState *env, int *family, int *model)
117 int cpuver = env->cpuid_version;
119 if (family == NULL || model == NULL) {
120 return;
123 *family = (cpuver >> 8) & 0x0f;
124 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
127 /* Broadcast MCA signal for processor version 06H_EH and above */
128 int cpu_x86_support_mca_broadcast(CPUState *env)
130 int family = 0;
131 int model = 0;
133 cpu_x86_version(env, &family, &model);
134 if ((family == 6 && model >= 14) || family > 6) {
135 return 1;
138 return 0;
141 /***********************************************************/
142 /* x86 debug */
144 static const char *cc_op_str[] = {
145 "DYNAMIC",
146 "EFLAGS",
148 "MULB",
149 "MULW",
150 "MULL",
151 "MULQ",
153 "ADDB",
154 "ADDW",
155 "ADDL",
156 "ADDQ",
158 "ADCB",
159 "ADCW",
160 "ADCL",
161 "ADCQ",
163 "SUBB",
164 "SUBW",
165 "SUBL",
166 "SUBQ",
168 "SBBB",
169 "SBBW",
170 "SBBL",
171 "SBBQ",
173 "LOGICB",
174 "LOGICW",
175 "LOGICL",
176 "LOGICQ",
178 "INCB",
179 "INCW",
180 "INCL",
181 "INCQ",
183 "DECB",
184 "DECW",
185 "DECL",
186 "DECQ",
188 "SHLB",
189 "SHLW",
190 "SHLL",
191 "SHLQ",
193 "SARB",
194 "SARW",
195 "SARL",
196 "SARQ",
199 static void
200 cpu_x86_dump_seg_cache(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
201 const char *name, struct SegmentCache *sc)
203 #ifdef TARGET_X86_64
204 if (env->hflags & HF_CS64_MASK) {
205 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
206 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
207 } else
208 #endif
210 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
211 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
214 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
215 goto done;
217 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
218 if (sc->flags & DESC_S_MASK) {
219 if (sc->flags & DESC_CS_MASK) {
220 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
221 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
222 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
223 (sc->flags & DESC_R_MASK) ? 'R' : '-');
224 } else {
225 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
226 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
227 (sc->flags & DESC_W_MASK) ? 'W' : '-');
229 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
230 } else {
231 static const char *sys_type_name[2][16] = {
232 { /* 32 bit mode */
233 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
234 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
235 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
236 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
238 { /* 64 bit mode */
239 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
240 "Reserved", "Reserved", "Reserved", "Reserved",
241 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
242 "Reserved", "IntGate64", "TrapGate64"
245 cpu_fprintf(f, "%s",
246 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
247 [(sc->flags & DESC_TYPE_MASK)
248 >> DESC_TYPE_SHIFT]);
250 done:
251 cpu_fprintf(f, "\n");
254 #define DUMP_CODE_BYTES_TOTAL 50
255 #define DUMP_CODE_BYTES_BACKWARD 20
257 void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
258 int flags)
260 int eflags, i, nb;
261 char cc_op_name[32];
262 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
264 cpu_synchronize_state(env);
266 eflags = env->eflags;
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_CS64_MASK) {
269 cpu_fprintf(f,
270 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
271 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
272 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
273 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
274 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
275 env->regs[R_EAX],
276 env->regs[R_EBX],
277 env->regs[R_ECX],
278 env->regs[R_EDX],
279 env->regs[R_ESI],
280 env->regs[R_EDI],
281 env->regs[R_EBP],
282 env->regs[R_ESP],
283 env->regs[8],
284 env->regs[9],
285 env->regs[10],
286 env->regs[11],
287 env->regs[12],
288 env->regs[13],
289 env->regs[14],
290 env->regs[15],
291 env->eip, eflags,
292 eflags & DF_MASK ? 'D' : '-',
293 eflags & CC_O ? 'O' : '-',
294 eflags & CC_S ? 'S' : '-',
295 eflags & CC_Z ? 'Z' : '-',
296 eflags & CC_A ? 'A' : '-',
297 eflags & CC_P ? 'P' : '-',
298 eflags & CC_C ? 'C' : '-',
299 env->hflags & HF_CPL_MASK,
300 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
301 (env->a20_mask >> 20) & 1,
302 (env->hflags >> HF_SMM_SHIFT) & 1,
303 env->halted);
304 } else
305 #endif
307 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
308 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
309 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
310 (uint32_t)env->regs[R_EAX],
311 (uint32_t)env->regs[R_EBX],
312 (uint32_t)env->regs[R_ECX],
313 (uint32_t)env->regs[R_EDX],
314 (uint32_t)env->regs[R_ESI],
315 (uint32_t)env->regs[R_EDI],
316 (uint32_t)env->regs[R_EBP],
317 (uint32_t)env->regs[R_ESP],
318 (uint32_t)env->eip, eflags,
319 eflags & DF_MASK ? 'D' : '-',
320 eflags & CC_O ? 'O' : '-',
321 eflags & CC_S ? 'S' : '-',
322 eflags & CC_Z ? 'Z' : '-',
323 eflags & CC_A ? 'A' : '-',
324 eflags & CC_P ? 'P' : '-',
325 eflags & CC_C ? 'C' : '-',
326 env->hflags & HF_CPL_MASK,
327 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
328 (env->a20_mask >> 20) & 1,
329 (env->hflags >> HF_SMM_SHIFT) & 1,
330 env->halted);
333 for(i = 0; i < 6; i++) {
334 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
335 &env->segs[i]);
337 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
338 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
340 #ifdef TARGET_X86_64
341 if (env->hflags & HF_LMA_MASK) {
342 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
343 env->gdt.base, env->gdt.limit);
344 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
345 env->idt.base, env->idt.limit);
346 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
347 (uint32_t)env->cr[0],
348 env->cr[2],
349 env->cr[3],
350 (uint32_t)env->cr[4]);
351 for(i = 0; i < 4; i++)
352 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
353 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
354 env->dr[6], env->dr[7]);
355 } else
356 #endif
358 cpu_fprintf(f, "GDT= %08x %08x\n",
359 (uint32_t)env->gdt.base, env->gdt.limit);
360 cpu_fprintf(f, "IDT= %08x %08x\n",
361 (uint32_t)env->idt.base, env->idt.limit);
362 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
363 (uint32_t)env->cr[0],
364 (uint32_t)env->cr[2],
365 (uint32_t)env->cr[3],
366 (uint32_t)env->cr[4]);
367 for(i = 0; i < 4; i++) {
368 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
370 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
371 env->dr[6], env->dr[7]);
373 if (flags & X86_DUMP_CCOP) {
374 if ((unsigned)env->cc_op < CC_OP_NB)
375 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
376 else
377 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
378 #ifdef TARGET_X86_64
379 if (env->hflags & HF_CS64_MASK) {
380 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
381 env->cc_src, env->cc_dst,
382 cc_op_name);
383 } else
384 #endif
386 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
387 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
388 cc_op_name);
391 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
392 if (flags & X86_DUMP_FPU) {
393 int fptag;
394 fptag = 0;
395 for(i = 0; i < 8; i++) {
396 fptag |= ((!env->fptags[i]) << i);
398 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
399 env->fpuc,
400 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
401 env->fpstt,
402 fptag,
403 env->mxcsr);
404 for(i=0;i<8;i++) {
405 CPU_LDoubleU u;
406 u.d = env->fpregs[i].d;
407 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
408 i, u.l.lower, u.l.upper);
409 if ((i & 1) == 1)
410 cpu_fprintf(f, "\n");
411 else
412 cpu_fprintf(f, " ");
414 if (env->hflags & HF_CS64_MASK)
415 nb = 16;
416 else
417 nb = 8;
418 for(i=0;i<nb;i++) {
419 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
421 env->xmm_regs[i].XMM_L(3),
422 env->xmm_regs[i].XMM_L(2),
423 env->xmm_regs[i].XMM_L(1),
424 env->xmm_regs[i].XMM_L(0));
425 if ((i & 1) == 1)
426 cpu_fprintf(f, "\n");
427 else
428 cpu_fprintf(f, " ");
431 if (flags & CPU_DUMP_CODE) {
432 target_ulong base = env->segs[R_CS].base + env->eip;
433 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
434 uint8_t code;
435 char codestr[3];
437 cpu_fprintf(f, "Code=");
438 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
439 if (cpu_memory_rw_debug(env, base - offs + i, &code, 1, 0) == 0) {
440 snprintf(codestr, sizeof(codestr), "%02x", code);
441 } else {
442 snprintf(codestr, sizeof(codestr), "??");
444 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
445 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
447 cpu_fprintf(f, "\n");
451 /***********************************************************/
452 /* x86 mmu */
453 /* XXX: add PGE support */
455 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
457 a20_state = (a20_state != 0);
458 if (a20_state != ((env->a20_mask >> 20) & 1)) {
459 #if defined(DEBUG_MMU)
460 printf("A20 update: a20=%d\n", a20_state);
461 #endif
462 /* if the cpu is currently executing code, we must unlink it and
463 all the potentially executing TB */
464 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
466 /* when a20 is changed, all the MMU mappings are invalid, so
467 we must flush everything */
468 tlb_flush(env, 1);
469 env->a20_mask = ~(1 << 20) | (a20_state << 20);
473 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
475 int pe_state;
477 #if defined(DEBUG_MMU)
478 printf("CR0 update: CR0=0x%08x\n", new_cr0);
479 #endif
480 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
481 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
482 tlb_flush(env, 1);
485 #ifdef TARGET_X86_64
486 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
487 (env->efer & MSR_EFER_LME)) {
488 /* enter in long mode */
489 /* XXX: generate an exception */
490 if (!(env->cr[4] & CR4_PAE_MASK))
491 return;
492 env->efer |= MSR_EFER_LMA;
493 env->hflags |= HF_LMA_MASK;
494 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
495 (env->efer & MSR_EFER_LMA)) {
496 /* exit long mode */
497 env->efer &= ~MSR_EFER_LMA;
498 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
499 env->eip &= 0xffffffff;
501 #endif
502 env->cr[0] = new_cr0 | CR0_ET_MASK;
504 /* update PE flag in hidden flags */
505 pe_state = (env->cr[0] & CR0_PE_MASK);
506 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
507 /* ensure that ADDSEG is always set in real mode */
508 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
509 /* update FPU flags */
510 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
511 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
514 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
515 the PDPT */
516 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
518 env->cr[3] = new_cr3;
519 if (env->cr[0] & CR0_PG_MASK) {
520 #if defined(DEBUG_MMU)
521 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
522 #endif
523 tlb_flush(env, 0);
527 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
529 #if defined(DEBUG_MMU)
530 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
531 #endif
532 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
533 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
534 tlb_flush(env, 1);
536 /* SSE handling */
537 if (!(env->cpuid_features & CPUID_SSE))
538 new_cr4 &= ~CR4_OSFXSR_MASK;
539 if (new_cr4 & CR4_OSFXSR_MASK)
540 env->hflags |= HF_OSFXSR_MASK;
541 else
542 env->hflags &= ~HF_OSFXSR_MASK;
544 env->cr[4] = new_cr4;
547 #if defined(CONFIG_USER_ONLY)
549 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
550 int is_write, int mmu_idx, int is_softmmu)
552 /* user mode only emulation */
553 is_write &= 1;
554 env->cr[2] = addr;
555 env->error_code = (is_write << PG_ERROR_W_BIT);
556 env->error_code |= PG_ERROR_U_MASK;
557 env->exception_index = EXCP0E_PAGE;
558 return 1;
561 #else
563 /* XXX: This value should match the one returned by CPUID
564 * and in exec.c */
565 # if defined(TARGET_X86_64)
566 # define PHYS_ADDR_MASK 0xfffffff000LL
567 # else
568 # define PHYS_ADDR_MASK 0xffffff000LL
569 # endif
571 /* return value:
572 -1 = cannot handle fault
573 0 = nothing more to do
574 1 = generate PF fault
576 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
577 int is_write1, int mmu_idx, int is_softmmu)
579 uint64_t ptep, pte;
580 target_ulong pde_addr, pte_addr;
581 int error_code, is_dirty, prot, page_size, is_write, is_user;
582 target_phys_addr_t paddr;
583 uint32_t page_offset;
584 target_ulong vaddr, virt_addr;
586 is_user = mmu_idx == MMU_USER_IDX;
587 #if defined(DEBUG_MMU)
588 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
589 addr, is_write1, is_user, env->eip);
590 #endif
591 is_write = is_write1 & 1;
593 if (!(env->cr[0] & CR0_PG_MASK)) {
594 pte = addr;
595 virt_addr = addr & TARGET_PAGE_MASK;
596 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
597 page_size = 4096;
598 goto do_mapping;
601 if (env->cr[4] & CR4_PAE_MASK) {
602 uint64_t pde, pdpe;
603 target_ulong pdpe_addr;
605 #ifdef TARGET_X86_64
606 if (env->hflags & HF_LMA_MASK) {
607 uint64_t pml4e_addr, pml4e;
608 int32_t sext;
610 /* test virtual address sign extension */
611 sext = (int64_t)addr >> 47;
612 if (sext != 0 && sext != -1) {
613 env->error_code = 0;
614 env->exception_index = EXCP0D_GPF;
615 return 1;
618 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
619 env->a20_mask;
620 pml4e = ldq_phys(pml4e_addr);
621 if (!(pml4e & PG_PRESENT_MASK)) {
622 error_code = 0;
623 goto do_fault;
625 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
626 error_code = PG_ERROR_RSVD_MASK;
627 goto do_fault;
629 if (!(pml4e & PG_ACCESSED_MASK)) {
630 pml4e |= PG_ACCESSED_MASK;
631 stl_phys_notdirty(pml4e_addr, pml4e);
633 ptep = pml4e ^ PG_NX_MASK;
634 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
635 env->a20_mask;
636 pdpe = ldq_phys(pdpe_addr);
637 if (!(pdpe & PG_PRESENT_MASK)) {
638 error_code = 0;
639 goto do_fault;
641 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
642 error_code = PG_ERROR_RSVD_MASK;
643 goto do_fault;
645 ptep &= pdpe ^ PG_NX_MASK;
646 if (!(pdpe & PG_ACCESSED_MASK)) {
647 pdpe |= PG_ACCESSED_MASK;
648 stl_phys_notdirty(pdpe_addr, pdpe);
650 } else
651 #endif
653 /* XXX: load them when cr3 is loaded ? */
654 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
655 env->a20_mask;
656 pdpe = ldq_phys(pdpe_addr);
657 if (!(pdpe & PG_PRESENT_MASK)) {
658 error_code = 0;
659 goto do_fault;
661 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
664 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
665 env->a20_mask;
666 pde = ldq_phys(pde_addr);
667 if (!(pde & PG_PRESENT_MASK)) {
668 error_code = 0;
669 goto do_fault;
671 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
672 error_code = PG_ERROR_RSVD_MASK;
673 goto do_fault;
675 ptep &= pde ^ PG_NX_MASK;
676 if (pde & PG_PSE_MASK) {
677 /* 2 MB page */
678 page_size = 2048 * 1024;
679 ptep ^= PG_NX_MASK;
680 if ((ptep & PG_NX_MASK) && is_write1 == 2)
681 goto do_fault_protect;
682 if (is_user) {
683 if (!(ptep & PG_USER_MASK))
684 goto do_fault_protect;
685 if (is_write && !(ptep & PG_RW_MASK))
686 goto do_fault_protect;
687 } else {
688 if ((env->cr[0] & CR0_WP_MASK) &&
689 is_write && !(ptep & PG_RW_MASK))
690 goto do_fault_protect;
692 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
693 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
694 pde |= PG_ACCESSED_MASK;
695 if (is_dirty)
696 pde |= PG_DIRTY_MASK;
697 stl_phys_notdirty(pde_addr, pde);
699 /* align to page_size */
700 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
701 virt_addr = addr & ~(page_size - 1);
702 } else {
703 /* 4 KB page */
704 if (!(pde & PG_ACCESSED_MASK)) {
705 pde |= PG_ACCESSED_MASK;
706 stl_phys_notdirty(pde_addr, pde);
708 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
709 env->a20_mask;
710 pte = ldq_phys(pte_addr);
711 if (!(pte & PG_PRESENT_MASK)) {
712 error_code = 0;
713 goto do_fault;
715 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
716 error_code = PG_ERROR_RSVD_MASK;
717 goto do_fault;
719 /* combine pde and pte nx, user and rw protections */
720 ptep &= pte ^ PG_NX_MASK;
721 ptep ^= PG_NX_MASK;
722 if ((ptep & PG_NX_MASK) && is_write1 == 2)
723 goto do_fault_protect;
724 if (is_user) {
725 if (!(ptep & PG_USER_MASK))
726 goto do_fault_protect;
727 if (is_write && !(ptep & PG_RW_MASK))
728 goto do_fault_protect;
729 } else {
730 if ((env->cr[0] & CR0_WP_MASK) &&
731 is_write && !(ptep & PG_RW_MASK))
732 goto do_fault_protect;
734 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
735 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
736 pte |= PG_ACCESSED_MASK;
737 if (is_dirty)
738 pte |= PG_DIRTY_MASK;
739 stl_phys_notdirty(pte_addr, pte);
741 page_size = 4096;
742 virt_addr = addr & ~0xfff;
743 pte = pte & (PHYS_ADDR_MASK | 0xfff);
745 } else {
746 uint32_t pde;
748 /* page directory entry */
749 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
750 env->a20_mask;
751 pde = ldl_phys(pde_addr);
752 if (!(pde & PG_PRESENT_MASK)) {
753 error_code = 0;
754 goto do_fault;
756 /* if PSE bit is set, then we use a 4MB page */
757 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
758 page_size = 4096 * 1024;
759 if (is_user) {
760 if (!(pde & PG_USER_MASK))
761 goto do_fault_protect;
762 if (is_write && !(pde & PG_RW_MASK))
763 goto do_fault_protect;
764 } else {
765 if ((env->cr[0] & CR0_WP_MASK) &&
766 is_write && !(pde & PG_RW_MASK))
767 goto do_fault_protect;
769 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
770 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
771 pde |= PG_ACCESSED_MASK;
772 if (is_dirty)
773 pde |= PG_DIRTY_MASK;
774 stl_phys_notdirty(pde_addr, pde);
777 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
778 ptep = pte;
779 virt_addr = addr & ~(page_size - 1);
780 } else {
781 if (!(pde & PG_ACCESSED_MASK)) {
782 pde |= PG_ACCESSED_MASK;
783 stl_phys_notdirty(pde_addr, pde);
786 /* page directory entry */
787 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
788 env->a20_mask;
789 pte = ldl_phys(pte_addr);
790 if (!(pte & PG_PRESENT_MASK)) {
791 error_code = 0;
792 goto do_fault;
794 /* combine pde and pte user and rw protections */
795 ptep = pte & pde;
796 if (is_user) {
797 if (!(ptep & PG_USER_MASK))
798 goto do_fault_protect;
799 if (is_write && !(ptep & PG_RW_MASK))
800 goto do_fault_protect;
801 } else {
802 if ((env->cr[0] & CR0_WP_MASK) &&
803 is_write && !(ptep & PG_RW_MASK))
804 goto do_fault_protect;
806 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
807 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
808 pte |= PG_ACCESSED_MASK;
809 if (is_dirty)
810 pte |= PG_DIRTY_MASK;
811 stl_phys_notdirty(pte_addr, pte);
813 page_size = 4096;
814 virt_addr = addr & ~0xfff;
817 /* the page can be put in the TLB */
818 prot = PAGE_READ;
819 if (!(ptep & PG_NX_MASK))
820 prot |= PAGE_EXEC;
821 if (pte & PG_DIRTY_MASK) {
822 /* only set write access if already dirty... otherwise wait
823 for dirty access */
824 if (is_user) {
825 if (ptep & PG_RW_MASK)
826 prot |= PAGE_WRITE;
827 } else {
828 if (!(env->cr[0] & CR0_WP_MASK) ||
829 (ptep & PG_RW_MASK))
830 prot |= PAGE_WRITE;
833 do_mapping:
834 pte = pte & env->a20_mask;
836 /* Even if 4MB pages, we map only one 4KB page in the cache to
837 avoid filling it too fast */
838 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
839 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
840 vaddr = virt_addr + page_offset;
842 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
843 return 0;
844 do_fault_protect:
845 error_code = PG_ERROR_P_MASK;
846 do_fault:
847 error_code |= (is_write << PG_ERROR_W_BIT);
848 if (is_user)
849 error_code |= PG_ERROR_U_MASK;
850 if (is_write1 == 2 &&
851 (env->efer & MSR_EFER_NXE) &&
852 (env->cr[4] & CR4_PAE_MASK))
853 error_code |= PG_ERROR_I_D_MASK;
854 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
855 /* cr2 is not modified in case of exceptions */
856 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
857 addr);
858 } else {
859 env->cr[2] = addr;
861 env->error_code = error_code;
862 env->exception_index = EXCP0E_PAGE;
863 return 1;
866 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
868 target_ulong pde_addr, pte_addr;
869 uint64_t pte;
870 target_phys_addr_t paddr;
871 uint32_t page_offset;
872 int page_size;
874 if (env->cr[4] & CR4_PAE_MASK) {
875 target_ulong pdpe_addr;
876 uint64_t pde, pdpe;
878 #ifdef TARGET_X86_64
879 if (env->hflags & HF_LMA_MASK) {
880 uint64_t pml4e_addr, pml4e;
881 int32_t sext;
883 /* test virtual address sign extension */
884 sext = (int64_t)addr >> 47;
885 if (sext != 0 && sext != -1)
886 return -1;
888 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
889 env->a20_mask;
890 pml4e = ldq_phys(pml4e_addr);
891 if (!(pml4e & PG_PRESENT_MASK))
892 return -1;
894 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
895 env->a20_mask;
896 pdpe = ldq_phys(pdpe_addr);
897 if (!(pdpe & PG_PRESENT_MASK))
898 return -1;
899 } else
900 #endif
902 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
903 env->a20_mask;
904 pdpe = ldq_phys(pdpe_addr);
905 if (!(pdpe & PG_PRESENT_MASK))
906 return -1;
909 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
910 env->a20_mask;
911 pde = ldq_phys(pde_addr);
912 if (!(pde & PG_PRESENT_MASK)) {
913 return -1;
915 if (pde & PG_PSE_MASK) {
916 /* 2 MB page */
917 page_size = 2048 * 1024;
918 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
919 } else {
920 /* 4 KB page */
921 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
922 env->a20_mask;
923 page_size = 4096;
924 pte = ldq_phys(pte_addr);
926 if (!(pte & PG_PRESENT_MASK))
927 return -1;
928 } else {
929 uint32_t pde;
931 if (!(env->cr[0] & CR0_PG_MASK)) {
932 pte = addr;
933 page_size = 4096;
934 } else {
935 /* page directory entry */
936 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
937 pde = ldl_phys(pde_addr);
938 if (!(pde & PG_PRESENT_MASK))
939 return -1;
940 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
941 pte = pde & ~0x003ff000; /* align to 4MB */
942 page_size = 4096 * 1024;
943 } else {
944 /* page directory entry */
945 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
946 pte = ldl_phys(pte_addr);
947 if (!(pte & PG_PRESENT_MASK))
948 return -1;
949 page_size = 4096;
952 pte = pte & env->a20_mask;
955 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
956 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
957 return paddr;
960 void hw_breakpoint_insert(CPUState *env, int index)
962 int type, err = 0;
964 switch (hw_breakpoint_type(env->dr[7], index)) {
965 case 0:
966 if (hw_breakpoint_enabled(env->dr[7], index))
967 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
968 &env->cpu_breakpoint[index]);
969 break;
970 case 1:
971 type = BP_CPU | BP_MEM_WRITE;
972 goto insert_wp;
973 case 2:
974 /* No support for I/O watchpoints yet */
975 break;
976 case 3:
977 type = BP_CPU | BP_MEM_ACCESS;
978 insert_wp:
979 err = cpu_watchpoint_insert(env, env->dr[index],
980 hw_breakpoint_len(env->dr[7], index),
981 type, &env->cpu_watchpoint[index]);
982 break;
984 if (err)
985 env->cpu_breakpoint[index] = NULL;
988 void hw_breakpoint_remove(CPUState *env, int index)
990 if (!env->cpu_breakpoint[index])
991 return;
992 switch (hw_breakpoint_type(env->dr[7], index)) {
993 case 0:
994 if (hw_breakpoint_enabled(env->dr[7], index))
995 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
996 break;
997 case 1:
998 case 3:
999 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1000 break;
1001 case 2:
1002 /* No support for I/O watchpoints yet */
1003 break;
1007 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1009 target_ulong dr6;
1010 int reg, type;
1011 int hit_enabled = 0;
1013 dr6 = env->dr[6] & ~0xf;
1014 for (reg = 0; reg < 4; reg++) {
1015 type = hw_breakpoint_type(env->dr[7], reg);
1016 if ((type == 0 && env->dr[reg] == env->eip) ||
1017 ((type & 1) && env->cpu_watchpoint[reg] &&
1018 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1019 dr6 |= 1 << reg;
1020 if (hw_breakpoint_enabled(env->dr[7], reg))
1021 hit_enabled = 1;
1024 if (hit_enabled || force_dr6_update)
1025 env->dr[6] = dr6;
1026 return hit_enabled;
1029 static CPUDebugExcpHandler *prev_debug_excp_handler;
1031 void raise_exception_env(int exception_index, CPUState *env);
1033 static void breakpoint_handler(CPUState *env)
1035 CPUBreakpoint *bp;
1037 if (env->watchpoint_hit) {
1038 if (env->watchpoint_hit->flags & BP_CPU) {
1039 env->watchpoint_hit = NULL;
1040 if (check_hw_breakpoints(env, 0))
1041 raise_exception_env(EXCP01_DB, env);
1042 else
1043 cpu_resume_from_signal(env, NULL);
1045 } else {
1046 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1047 if (bp->pc == env->eip) {
1048 if (bp->flags & BP_CPU) {
1049 check_hw_breakpoints(env, 1);
1050 raise_exception_env(EXCP01_DB, env);
1052 break;
1055 if (prev_debug_excp_handler)
1056 prev_debug_excp_handler(env);
1059 typedef struct MCEInjectionParams {
1060 Monitor *mon;
1061 CPUState *env;
1062 int bank;
1063 uint64_t status;
1064 uint64_t mcg_status;
1065 uint64_t addr;
1066 uint64_t misc;
1067 int flags;
1068 } MCEInjectionParams;
1070 static void do_inject_x86_mce(void *data)
1072 MCEInjectionParams *params = data;
1073 CPUState *cenv = params->env;
1074 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1076 cpu_synchronize_state(cenv);
1079 * If there is an MCE exception being processed, ignore this SRAO MCE
1080 * unless unconditional injection was requested.
1082 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1083 && !(params->status & MCI_STATUS_AR)
1084 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1085 return;
1088 if (params->status & MCI_STATUS_UC) {
1090 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1091 * reporting is disabled
1093 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1094 monitor_printf(params->mon,
1095 "CPU %d: Uncorrected error reporting disabled\n",
1096 cenv->cpu_index);
1097 return;
1101 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1102 * reporting is disabled for the bank
1104 if (banks[0] != ~(uint64_t)0) {
1105 monitor_printf(params->mon,
1106 "CPU %d: Uncorrected error reporting disabled for"
1107 " bank %d\n",
1108 cenv->cpu_index, params->bank);
1109 return;
1112 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1113 !(cenv->cr[4] & CR4_MCE_MASK)) {
1114 monitor_printf(params->mon,
1115 "CPU %d: Previous MCE still in progress, raising"
1116 " triple fault\n",
1117 cenv->cpu_index);
1118 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1119 qemu_system_reset_request();
1120 return;
1122 if (banks[1] & MCI_STATUS_VAL) {
1123 params->status |= MCI_STATUS_OVER;
1125 banks[2] = params->addr;
1126 banks[3] = params->misc;
1127 cenv->mcg_status = params->mcg_status;
1128 banks[1] = params->status;
1129 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1130 } else if (!(banks[1] & MCI_STATUS_VAL)
1131 || !(banks[1] & MCI_STATUS_UC)) {
1132 if (banks[1] & MCI_STATUS_VAL) {
1133 params->status |= MCI_STATUS_OVER;
1135 banks[2] = params->addr;
1136 banks[3] = params->misc;
1137 banks[1] = params->status;
1138 } else {
1139 banks[1] |= MCI_STATUS_OVER;
1143 void cpu_x86_inject_mce(Monitor *mon, CPUState *cenv, int bank,
1144 uint64_t status, uint64_t mcg_status, uint64_t addr,
1145 uint64_t misc, int flags)
1147 MCEInjectionParams params = {
1148 .mon = mon,
1149 .env = cenv,
1150 .bank = bank,
1151 .status = status,
1152 .mcg_status = mcg_status,
1153 .addr = addr,
1154 .misc = misc,
1155 .flags = flags,
1157 unsigned bank_num = cenv->mcg_cap & 0xff;
1158 CPUState *env;
1160 if (!cenv->mcg_cap) {
1161 monitor_printf(mon, "MCE injection not supported\n");
1162 return;
1164 if (bank >= bank_num) {
1165 monitor_printf(mon, "Invalid MCE bank number\n");
1166 return;
1168 if (!(status & MCI_STATUS_VAL)) {
1169 monitor_printf(mon, "Invalid MCE status code\n");
1170 return;
1172 if ((flags & MCE_INJECT_BROADCAST)
1173 && !cpu_x86_support_mca_broadcast(cenv)) {
1174 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1175 return;
1178 run_on_cpu(cenv, do_inject_x86_mce, &params);
1179 if (flags & MCE_INJECT_BROADCAST) {
1180 params.bank = 1;
1181 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1182 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1183 params.addr = 0;
1184 params.misc = 0;
1185 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1186 if (cenv == env) {
1187 continue;
1189 params.env = env;
1190 run_on_cpu(cenv, do_inject_x86_mce, &params);
1194 #endif /* !CONFIG_USER_ONLY */
1196 static void mce_init(CPUX86State *cenv)
1198 unsigned int bank;
1200 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
1201 && (cenv->cpuid_features & (CPUID_MCE | CPUID_MCA)) ==
1202 (CPUID_MCE | CPUID_MCA)) {
1203 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1204 cenv->mcg_ctl = ~(uint64_t)0;
1205 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
1206 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
1211 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1212 target_ulong *base, unsigned int *limit,
1213 unsigned int *flags)
1215 SegmentCache *dt;
1216 target_ulong ptr;
1217 uint32_t e1, e2;
1218 int index;
1220 if (selector & 0x4)
1221 dt = &env->ldt;
1222 else
1223 dt = &env->gdt;
1224 index = selector & ~7;
1225 ptr = dt->base + index;
1226 if ((index + 7) > dt->limit
1227 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1228 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1229 return 0;
1231 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1232 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1233 if (e2 & DESC_G_MASK)
1234 *limit = (*limit << 12) | 0xfff;
1235 *flags = e2;
1237 return 1;
1240 CPUX86State *cpu_x86_init(const char *cpu_model)
1242 CPUX86State *env;
1243 static int inited;
1245 env = qemu_mallocz(sizeof(CPUX86State));
1246 cpu_exec_init(env);
1247 env->cpu_model_str = cpu_model;
1249 /* init various static tables */
1250 if (!inited) {
1251 inited = 1;
1252 optimize_flags_init();
1253 #ifndef CONFIG_USER_ONLY
1254 prev_debug_excp_handler =
1255 cpu_set_debug_excp_handler(breakpoint_handler);
1256 #endif
1258 if (cpu_x86_register(env, cpu_model) < 0) {
1259 cpu_x86_close(env);
1260 return NULL;
1262 mce_init(env);
1264 qemu_init_vcpu(env);
1266 return env;
1269 #if !defined(CONFIG_USER_ONLY)
1270 void do_cpu_init(CPUState *env)
1272 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1273 uint64_t pat = env->pat;
1275 cpu_reset(env);
1276 env->interrupt_request = sipi;
1277 env->pat = pat;
1278 apic_init_reset(env->apic_state);
1279 env->halted = !cpu_is_bsp(env);
1282 void do_cpu_sipi(CPUState *env)
1284 apic_sipi(env->apic_state);
1286 #else
1287 void do_cpu_init(CPUState *env)
1290 void do_cpu_sipi(CPUState *env)
1293 #endif