exec: Make cpu_memory_rw_debug use the CPUs AS
[qemu/ar7.git] / target-i386 / helper.c
blob55c04577dc1ea1f84f92545982af0d2043accd69
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
177 done:
178 cpu_fprintf(f, "\n");
181 #define DUMP_CODE_BYTES_TOTAL 50
182 #define DUMP_CODE_BYTES_BACKWARD 20
184 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
374 cpu_fprintf(f, "\n");
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
384 CPUX86State *env = &cpu->env;
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 #if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390 #endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
402 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
404 int pe_state;
406 #if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408 #endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
414 #ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
430 #endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
443 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 #if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 #endif
452 tlb_flush(env, 0);
456 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
458 #if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460 #endif
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
464 tlb_flush(env, 1);
466 /* SSE handling */
467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
468 new_cr4 &= ~CR4_OSFXSR_MASK;
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
472 env->hflags |= HF_OSFXSR_MASK;
475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
476 new_cr4 &= ~CR4_SMAP_MASK;
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
483 env->cr[4] = new_cr4;
486 #if defined(CONFIG_USER_ONLY)
488 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
489 int is_write, int mmu_idx)
491 /* user mode only emulation */
492 is_write &= 1;
493 env->cr[2] = addr;
494 env->error_code = (is_write << PG_ERROR_W_BIT);
495 env->error_code |= PG_ERROR_U_MASK;
496 env->exception_index = EXCP0E_PAGE;
497 return 1;
500 #else
502 /* XXX: This value should match the one returned by CPUID
503 * and in exec.c */
504 # if defined(TARGET_X86_64)
505 # define PHYS_ADDR_MASK 0xfffffff000LL
506 # else
507 # define PHYS_ADDR_MASK 0xffffff000LL
508 # endif
510 /* return value:
511 -1 = cannot handle fault
512 0 = nothing more to do
513 1 = generate PF fault
515 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
516 int is_write1, int mmu_idx)
518 CPUState *cs = ENV_GET_CPU(env);
519 uint64_t ptep, pte;
520 target_ulong pde_addr, pte_addr;
521 int error_code, is_dirty, prot, page_size, is_write, is_user;
522 hwaddr paddr;
523 uint32_t page_offset;
524 target_ulong vaddr, virt_addr;
526 is_user = mmu_idx == MMU_USER_IDX;
527 #if defined(DEBUG_MMU)
528 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
529 addr, is_write1, is_user, env->eip);
530 #endif
531 is_write = is_write1 & 1;
533 if (!(env->cr[0] & CR0_PG_MASK)) {
534 pte = addr;
535 #ifdef TARGET_X86_64
536 if (!(env->hflags & HF_LMA_MASK)) {
537 /* Without long mode we can only address 32bits in real mode */
538 pte = (uint32_t)pte;
540 #endif
541 virt_addr = addr & TARGET_PAGE_MASK;
542 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
543 page_size = 4096;
544 goto do_mapping;
547 if (env->cr[4] & CR4_PAE_MASK) {
548 uint64_t pde, pdpe;
549 target_ulong pdpe_addr;
551 #ifdef TARGET_X86_64
552 if (env->hflags & HF_LMA_MASK) {
553 uint64_t pml4e_addr, pml4e;
554 int32_t sext;
556 /* test virtual address sign extension */
557 sext = (int64_t)addr >> 47;
558 if (sext != 0 && sext != -1) {
559 env->error_code = 0;
560 env->exception_index = EXCP0D_GPF;
561 return 1;
564 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
565 env->a20_mask;
566 pml4e = ldq_phys(cs->as, pml4e_addr);
567 if (!(pml4e & PG_PRESENT_MASK)) {
568 error_code = 0;
569 goto do_fault;
571 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
572 error_code = PG_ERROR_RSVD_MASK;
573 goto do_fault;
575 if (!(pml4e & PG_ACCESSED_MASK)) {
576 pml4e |= PG_ACCESSED_MASK;
577 stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
579 ptep = pml4e ^ PG_NX_MASK;
580 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
581 env->a20_mask;
582 pdpe = ldq_phys(cs->as, pdpe_addr);
583 if (!(pdpe & PG_PRESENT_MASK)) {
584 error_code = 0;
585 goto do_fault;
587 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
588 error_code = PG_ERROR_RSVD_MASK;
589 goto do_fault;
591 ptep &= pdpe ^ PG_NX_MASK;
592 if (!(pdpe & PG_ACCESSED_MASK)) {
593 pdpe |= PG_ACCESSED_MASK;
594 stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
596 } else
597 #endif
599 /* XXX: load them when cr3 is loaded ? */
600 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
601 env->a20_mask;
602 pdpe = ldq_phys(cs->as, pdpe_addr);
603 if (!(pdpe & PG_PRESENT_MASK)) {
604 error_code = 0;
605 goto do_fault;
607 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
610 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
611 env->a20_mask;
612 pde = ldq_phys(cs->as, pde_addr);
613 if (!(pde & PG_PRESENT_MASK)) {
614 error_code = 0;
615 goto do_fault;
617 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
618 error_code = PG_ERROR_RSVD_MASK;
619 goto do_fault;
621 ptep &= pde ^ PG_NX_MASK;
622 if (pde & PG_PSE_MASK) {
623 /* 2 MB page */
624 page_size = 2048 * 1024;
625 ptep ^= PG_NX_MASK;
626 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
627 goto do_fault_protect;
629 switch (mmu_idx) {
630 case MMU_USER_IDX:
631 if (!(ptep & PG_USER_MASK)) {
632 goto do_fault_protect;
634 if (is_write && !(ptep & PG_RW_MASK)) {
635 goto do_fault_protect;
637 break;
639 case MMU_KERNEL_IDX:
640 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
641 (ptep & PG_USER_MASK)) {
642 goto do_fault_protect;
644 /* fall through */
645 case MMU_KSMAP_IDX:
646 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
647 (ptep & PG_USER_MASK)) {
648 goto do_fault_protect;
650 if ((env->cr[0] & CR0_WP_MASK) &&
651 is_write && !(ptep & PG_RW_MASK)) {
652 goto do_fault_protect;
654 break;
656 default: /* cannot happen */
657 break;
659 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
660 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
661 pde |= PG_ACCESSED_MASK;
662 if (is_dirty)
663 pde |= PG_DIRTY_MASK;
664 stl_phys_notdirty(cs->as, pde_addr, pde);
666 /* align to page_size */
667 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
668 virt_addr = addr & ~(page_size - 1);
669 } else {
670 /* 4 KB page */
671 if (!(pde & PG_ACCESSED_MASK)) {
672 pde |= PG_ACCESSED_MASK;
673 stl_phys_notdirty(cs->as, pde_addr, pde);
675 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
676 env->a20_mask;
677 pte = ldq_phys(cs->as, pte_addr);
678 if (!(pte & PG_PRESENT_MASK)) {
679 error_code = 0;
680 goto do_fault;
682 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
683 error_code = PG_ERROR_RSVD_MASK;
684 goto do_fault;
686 /* combine pde and pte nx, user and rw protections */
687 ptep &= pte ^ PG_NX_MASK;
688 ptep ^= PG_NX_MASK;
689 if ((ptep & PG_NX_MASK) && is_write1 == 2)
690 goto do_fault_protect;
691 switch (mmu_idx) {
692 case MMU_USER_IDX:
693 if (!(ptep & PG_USER_MASK)) {
694 goto do_fault_protect;
696 if (is_write && !(ptep & PG_RW_MASK)) {
697 goto do_fault_protect;
699 break;
701 case MMU_KERNEL_IDX:
702 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
703 (ptep & PG_USER_MASK)) {
704 goto do_fault_protect;
706 /* fall through */
707 case MMU_KSMAP_IDX:
708 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
709 (ptep & PG_USER_MASK)) {
710 goto do_fault_protect;
712 if ((env->cr[0] & CR0_WP_MASK) &&
713 is_write && !(ptep & PG_RW_MASK)) {
714 goto do_fault_protect;
716 break;
718 default: /* cannot happen */
719 break;
721 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
722 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
723 pte |= PG_ACCESSED_MASK;
724 if (is_dirty)
725 pte |= PG_DIRTY_MASK;
726 stl_phys_notdirty(cs->as, pte_addr, pte);
728 page_size = 4096;
729 virt_addr = addr & ~0xfff;
730 pte = pte & (PHYS_ADDR_MASK | 0xfff);
732 } else {
733 uint32_t pde;
735 /* page directory entry */
736 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
737 env->a20_mask;
738 pde = ldl_phys(cs->as, pde_addr);
739 if (!(pde & PG_PRESENT_MASK)) {
740 error_code = 0;
741 goto do_fault;
743 /* if PSE bit is set, then we use a 4MB page */
744 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
745 page_size = 4096 * 1024;
746 switch (mmu_idx) {
747 case MMU_USER_IDX:
748 if (!(pde & PG_USER_MASK)) {
749 goto do_fault_protect;
751 if (is_write && !(pde & PG_RW_MASK)) {
752 goto do_fault_protect;
754 break;
756 case MMU_KERNEL_IDX:
757 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
758 (pde & PG_USER_MASK)) {
759 goto do_fault_protect;
761 /* fall through */
762 case MMU_KSMAP_IDX:
763 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
764 (pde & PG_USER_MASK)) {
765 goto do_fault_protect;
767 if ((env->cr[0] & CR0_WP_MASK) &&
768 is_write && !(pde & PG_RW_MASK)) {
769 goto do_fault_protect;
771 break;
773 default: /* cannot happen */
774 break;
776 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
777 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
778 pde |= PG_ACCESSED_MASK;
779 if (is_dirty)
780 pde |= PG_DIRTY_MASK;
781 stl_phys_notdirty(cs->as, pde_addr, pde);
784 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
785 ptep = pte;
786 virt_addr = addr & ~(page_size - 1);
787 } else {
788 if (!(pde & PG_ACCESSED_MASK)) {
789 pde |= PG_ACCESSED_MASK;
790 stl_phys_notdirty(cs->as, pde_addr, pde);
793 /* page directory entry */
794 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
795 env->a20_mask;
796 pte = ldl_phys(cs->as, pte_addr);
797 if (!(pte & PG_PRESENT_MASK)) {
798 error_code = 0;
799 goto do_fault;
801 /* combine pde and pte user and rw protections */
802 ptep = pte & pde;
803 switch (mmu_idx) {
804 case MMU_USER_IDX:
805 if (!(ptep & PG_USER_MASK)) {
806 goto do_fault_protect;
808 if (is_write && !(ptep & PG_RW_MASK)) {
809 goto do_fault_protect;
811 break;
813 case MMU_KERNEL_IDX:
814 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
815 (ptep & PG_USER_MASK)) {
816 goto do_fault_protect;
818 /* fall through */
819 case MMU_KSMAP_IDX:
820 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
821 (ptep & PG_USER_MASK)) {
822 goto do_fault_protect;
824 if ((env->cr[0] & CR0_WP_MASK) &&
825 is_write && !(ptep & PG_RW_MASK)) {
826 goto do_fault_protect;
828 break;
830 default: /* cannot happen */
831 break;
833 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
834 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
835 pte |= PG_ACCESSED_MASK;
836 if (is_dirty)
837 pte |= PG_DIRTY_MASK;
838 stl_phys_notdirty(cs->as, pte_addr, pte);
840 page_size = 4096;
841 virt_addr = addr & ~0xfff;
844 /* the page can be put in the TLB */
845 prot = PAGE_READ;
846 if (!(ptep & PG_NX_MASK))
847 prot |= PAGE_EXEC;
848 if (pte & PG_DIRTY_MASK) {
849 /* only set write access if already dirty... otherwise wait
850 for dirty access */
851 if (is_user) {
852 if (ptep & PG_RW_MASK)
853 prot |= PAGE_WRITE;
854 } else {
855 if (!(env->cr[0] & CR0_WP_MASK) ||
856 (ptep & PG_RW_MASK))
857 prot |= PAGE_WRITE;
860 do_mapping:
861 pte = pte & env->a20_mask;
863 /* Even if 4MB pages, we map only one 4KB page in the cache to
864 avoid filling it too fast */
865 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
866 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
867 vaddr = virt_addr + page_offset;
869 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
870 return 0;
871 do_fault_protect:
872 error_code = PG_ERROR_P_MASK;
873 do_fault:
874 error_code |= (is_write << PG_ERROR_W_BIT);
875 if (is_user)
876 error_code |= PG_ERROR_U_MASK;
877 if (is_write1 == 2 &&
878 (((env->efer & MSR_EFER_NXE) &&
879 (env->cr[4] & CR4_PAE_MASK)) ||
880 (env->cr[4] & CR4_SMEP_MASK)))
881 error_code |= PG_ERROR_I_D_MASK;
882 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
883 /* cr2 is not modified in case of exceptions */
884 stq_phys(cs->as,
885 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
886 addr);
887 } else {
888 env->cr[2] = addr;
890 env->error_code = error_code;
891 env->exception_index = EXCP0E_PAGE;
892 return 1;
895 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
897 X86CPU *cpu = X86_CPU(cs);
898 CPUX86State *env = &cpu->env;
899 target_ulong pde_addr, pte_addr;
900 uint64_t pte;
901 hwaddr paddr;
902 uint32_t page_offset;
903 int page_size;
905 if (!(env->cr[0] & CR0_PG_MASK)) {
906 pte = addr & env->a20_mask;
907 page_size = 4096;
908 } else if (env->cr[4] & CR4_PAE_MASK) {
909 target_ulong pdpe_addr;
910 uint64_t pde, pdpe;
912 #ifdef TARGET_X86_64
913 if (env->hflags & HF_LMA_MASK) {
914 uint64_t pml4e_addr, pml4e;
915 int32_t sext;
917 /* test virtual address sign extension */
918 sext = (int64_t)addr >> 47;
919 if (sext != 0 && sext != -1)
920 return -1;
922 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
923 env->a20_mask;
924 pml4e = ldq_phys(cs->as, pml4e_addr);
925 if (!(pml4e & PG_PRESENT_MASK))
926 return -1;
928 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
929 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
930 pdpe = ldq_phys(cs->as, pdpe_addr);
931 if (!(pdpe & PG_PRESENT_MASK))
932 return -1;
933 } else
934 #endif
936 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
937 env->a20_mask;
938 pdpe = ldq_phys(cs->as, pdpe_addr);
939 if (!(pdpe & PG_PRESENT_MASK))
940 return -1;
943 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
944 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
945 pde = ldq_phys(cs->as, pde_addr);
946 if (!(pde & PG_PRESENT_MASK)) {
947 return -1;
949 if (pde & PG_PSE_MASK) {
950 /* 2 MB page */
951 page_size = 2048 * 1024;
952 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
953 } else {
954 /* 4 KB page */
955 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
956 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
957 page_size = 4096;
958 pte = ldq_phys(cs->as, pte_addr);
960 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
961 if (!(pte & PG_PRESENT_MASK))
962 return -1;
963 } else {
964 uint32_t pde;
966 /* page directory entry */
967 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
968 pde = ldl_phys(cs->as, pde_addr);
969 if (!(pde & PG_PRESENT_MASK))
970 return -1;
971 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
972 pte = pde & ~0x003ff000; /* align to 4MB */
973 page_size = 4096 * 1024;
974 } else {
975 /* page directory entry */
976 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
977 pte = ldl_phys(cs->as, pte_addr);
978 if (!(pte & PG_PRESENT_MASK))
979 return -1;
980 page_size = 4096;
982 pte = pte & env->a20_mask;
985 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
986 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
987 return paddr;
990 void hw_breakpoint_insert(CPUX86State *env, int index)
992 int type = 0, err = 0;
994 switch (hw_breakpoint_type(env->dr[7], index)) {
995 case DR7_TYPE_BP_INST:
996 if (hw_breakpoint_enabled(env->dr[7], index)) {
997 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
998 &env->cpu_breakpoint[index]);
1000 break;
1001 case DR7_TYPE_DATA_WR:
1002 type = BP_CPU | BP_MEM_WRITE;
1003 break;
1004 case DR7_TYPE_IO_RW:
1005 /* No support for I/O watchpoints yet */
1006 break;
1007 case DR7_TYPE_DATA_RW:
1008 type = BP_CPU | BP_MEM_ACCESS;
1009 break;
1012 if (type != 0) {
1013 err = cpu_watchpoint_insert(env, env->dr[index],
1014 hw_breakpoint_len(env->dr[7], index),
1015 type, &env->cpu_watchpoint[index]);
1018 if (err) {
1019 env->cpu_breakpoint[index] = NULL;
1023 void hw_breakpoint_remove(CPUX86State *env, int index)
1025 if (!env->cpu_breakpoint[index])
1026 return;
1027 switch (hw_breakpoint_type(env->dr[7], index)) {
1028 case DR7_TYPE_BP_INST:
1029 if (hw_breakpoint_enabled(env->dr[7], index)) {
1030 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1032 break;
1033 case DR7_TYPE_DATA_WR:
1034 case DR7_TYPE_DATA_RW:
1035 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1036 break;
1037 case DR7_TYPE_IO_RW:
1038 /* No support for I/O watchpoints yet */
1039 break;
1043 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1045 target_ulong dr6;
1046 int reg;
1047 bool hit_enabled = false;
1049 dr6 = env->dr[6] & ~0xf;
1050 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1051 bool bp_match = false;
1052 bool wp_match = false;
1054 switch (hw_breakpoint_type(env->dr[7], reg)) {
1055 case DR7_TYPE_BP_INST:
1056 if (env->dr[reg] == env->eip) {
1057 bp_match = true;
1059 break;
1060 case DR7_TYPE_DATA_WR:
1061 case DR7_TYPE_DATA_RW:
1062 if (env->cpu_watchpoint[reg] &&
1063 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1064 wp_match = true;
1066 break;
1067 case DR7_TYPE_IO_RW:
1068 break;
1070 if (bp_match || wp_match) {
1071 dr6 |= 1 << reg;
1072 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1073 hit_enabled = true;
1078 if (hit_enabled || force_dr6_update) {
1079 env->dr[6] = dr6;
1082 return hit_enabled;
1085 void breakpoint_handler(CPUX86State *env)
1087 CPUBreakpoint *bp;
1089 if (env->watchpoint_hit) {
1090 if (env->watchpoint_hit->flags & BP_CPU) {
1091 env->watchpoint_hit = NULL;
1092 if (check_hw_breakpoints(env, false)) {
1093 raise_exception(env, EXCP01_DB);
1094 } else {
1095 cpu_resume_from_signal(env, NULL);
1098 } else {
1099 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1100 if (bp->pc == env->eip) {
1101 if (bp->flags & BP_CPU) {
1102 check_hw_breakpoints(env, true);
1103 raise_exception(env, EXCP01_DB);
1105 break;
1110 typedef struct MCEInjectionParams {
1111 Monitor *mon;
1112 X86CPU *cpu;
1113 int bank;
1114 uint64_t status;
1115 uint64_t mcg_status;
1116 uint64_t addr;
1117 uint64_t misc;
1118 int flags;
1119 } MCEInjectionParams;
1121 static void do_inject_x86_mce(void *data)
1123 MCEInjectionParams *params = data;
1124 CPUX86State *cenv = &params->cpu->env;
1125 CPUState *cpu = CPU(params->cpu);
1126 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1128 cpu_synchronize_state(cpu);
1131 * If there is an MCE exception being processed, ignore this SRAO MCE
1132 * unless unconditional injection was requested.
1134 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1135 && !(params->status & MCI_STATUS_AR)
1136 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1137 return;
1140 if (params->status & MCI_STATUS_UC) {
1142 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1143 * reporting is disabled
1145 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1146 monitor_printf(params->mon,
1147 "CPU %d: Uncorrected error reporting disabled\n",
1148 cpu->cpu_index);
1149 return;
1153 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1154 * reporting is disabled for the bank
1156 if (banks[0] != ~(uint64_t)0) {
1157 monitor_printf(params->mon,
1158 "CPU %d: Uncorrected error reporting disabled for"
1159 " bank %d\n",
1160 cpu->cpu_index, params->bank);
1161 return;
1164 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1165 !(cenv->cr[4] & CR4_MCE_MASK)) {
1166 monitor_printf(params->mon,
1167 "CPU %d: Previous MCE still in progress, raising"
1168 " triple fault\n",
1169 cpu->cpu_index);
1170 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1171 qemu_system_reset_request();
1172 return;
1174 if (banks[1] & MCI_STATUS_VAL) {
1175 params->status |= MCI_STATUS_OVER;
1177 banks[2] = params->addr;
1178 banks[3] = params->misc;
1179 cenv->mcg_status = params->mcg_status;
1180 banks[1] = params->status;
1181 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1182 } else if (!(banks[1] & MCI_STATUS_VAL)
1183 || !(banks[1] & MCI_STATUS_UC)) {
1184 if (banks[1] & MCI_STATUS_VAL) {
1185 params->status |= MCI_STATUS_OVER;
1187 banks[2] = params->addr;
1188 banks[3] = params->misc;
1189 banks[1] = params->status;
1190 } else {
1191 banks[1] |= MCI_STATUS_OVER;
1195 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1196 uint64_t status, uint64_t mcg_status, uint64_t addr,
1197 uint64_t misc, int flags)
1199 CPUState *cs = CPU(cpu);
1200 CPUX86State *cenv = &cpu->env;
1201 MCEInjectionParams params = {
1202 .mon = mon,
1203 .cpu = cpu,
1204 .bank = bank,
1205 .status = status,
1206 .mcg_status = mcg_status,
1207 .addr = addr,
1208 .misc = misc,
1209 .flags = flags,
1211 unsigned bank_num = cenv->mcg_cap & 0xff;
1213 if (!cenv->mcg_cap) {
1214 monitor_printf(mon, "MCE injection not supported\n");
1215 return;
1217 if (bank >= bank_num) {
1218 monitor_printf(mon, "Invalid MCE bank number\n");
1219 return;
1221 if (!(status & MCI_STATUS_VAL)) {
1222 monitor_printf(mon, "Invalid MCE status code\n");
1223 return;
1225 if ((flags & MCE_INJECT_BROADCAST)
1226 && !cpu_x86_support_mca_broadcast(cenv)) {
1227 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1228 return;
1231 run_on_cpu(cs, do_inject_x86_mce, &params);
1232 if (flags & MCE_INJECT_BROADCAST) {
1233 CPUState *other_cs;
1235 params.bank = 1;
1236 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1237 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1238 params.addr = 0;
1239 params.misc = 0;
1240 CPU_FOREACH(other_cs) {
1241 if (other_cs == cs) {
1242 continue;
1244 params.cpu = X86_CPU(other_cs);
1245 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1250 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1252 X86CPU *cpu = x86_env_get_cpu(env);
1254 if (kvm_enabled()) {
1255 env->tpr_access_type = access;
1257 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_TPR);
1258 } else {
1259 cpu_restore_state(env, env->mem_io_pc);
1261 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1264 #endif /* !CONFIG_USER_ONLY */
1266 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1267 target_ulong *base, unsigned int *limit,
1268 unsigned int *flags)
1270 X86CPU *cpu = x86_env_get_cpu(env);
1271 CPUState *cs = CPU(cpu);
1272 SegmentCache *dt;
1273 target_ulong ptr;
1274 uint32_t e1, e2;
1275 int index;
1277 if (selector & 0x4)
1278 dt = &env->ldt;
1279 else
1280 dt = &env->gdt;
1281 index = selector & ~7;
1282 ptr = dt->base + index;
1283 if ((index + 7) > dt->limit
1284 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1285 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1286 return 0;
1288 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1289 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1290 if (e2 & DESC_G_MASK)
1291 *limit = (*limit << 12) | 0xfff;
1292 *flags = e2;
1294 return 1;
1297 #if !defined(CONFIG_USER_ONLY)
1298 void do_cpu_init(X86CPU *cpu)
1300 CPUState *cs = CPU(cpu);
1301 CPUX86State *env = &cpu->env;
1302 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1303 uint64_t pat = env->pat;
1305 cpu_reset(cs);
1306 cs->interrupt_request = sipi;
1307 env->pat = pat;
1308 apic_init_reset(cpu->apic_state);
1311 void do_cpu_sipi(X86CPU *cpu)
1313 apic_sipi(cpu->apic_state);
1315 #else
1316 void do_cpu_init(X86CPU *cpu)
1319 void do_cpu_sipi(X86CPU *cpu)
1322 #endif