Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20140324' into...
[qemu/ar7.git] / target-i386 / helper.c
blob4f447b8cf961d66deadc248d7a60eaf984485a6c
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
177 done:
178 cpu_fprintf(f, "\n");
181 #define DUMP_CODE_BYTES_TOTAL 50
182 #define DUMP_CODE_BYTES_BACKWARD 20
184 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
374 cpu_fprintf(f, "\n");
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
384 CPUX86State *env = &cpu->env;
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 CPUState *cs = CPU(cpu);
390 #if defined(DEBUG_MMU)
391 printf("A20 update: a20=%d\n", a20_state);
392 #endif
393 /* if the cpu is currently executing code, we must unlink it and
394 all the potentially executing TB */
395 cpu_interrupt(cs, CPU_INTERRUPT_EXITTB);
397 /* when a20 is changed, all the MMU mappings are invalid, so
398 we must flush everything */
399 tlb_flush(cs, 1);
400 env->a20_mask = ~(1 << 20) | (a20_state << 20);
404 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
406 X86CPU *cpu = x86_env_get_cpu(env);
407 int pe_state;
409 #if defined(DEBUG_MMU)
410 printf("CR0 update: CR0=0x%08x\n", new_cr0);
411 #endif
412 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
413 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
414 tlb_flush(CPU(cpu), 1);
417 #ifdef TARGET_X86_64
418 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
419 (env->efer & MSR_EFER_LME)) {
420 /* enter in long mode */
421 /* XXX: generate an exception */
422 if (!(env->cr[4] & CR4_PAE_MASK))
423 return;
424 env->efer |= MSR_EFER_LMA;
425 env->hflags |= HF_LMA_MASK;
426 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
427 (env->efer & MSR_EFER_LMA)) {
428 /* exit long mode */
429 env->efer &= ~MSR_EFER_LMA;
430 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
431 env->eip &= 0xffffffff;
433 #endif
434 env->cr[0] = new_cr0 | CR0_ET_MASK;
436 /* update PE flag in hidden flags */
437 pe_state = (env->cr[0] & CR0_PE_MASK);
438 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
439 /* ensure that ADDSEG is always set in real mode */
440 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
441 /* update FPU flags */
442 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
443 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
446 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
447 the PDPT */
448 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
450 X86CPU *cpu = x86_env_get_cpu(env);
452 env->cr[3] = new_cr3;
453 if (env->cr[0] & CR0_PG_MASK) {
454 #if defined(DEBUG_MMU)
455 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
456 #endif
457 tlb_flush(CPU(cpu), 0);
461 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
463 X86CPU *cpu = x86_env_get_cpu(env);
465 #if defined(DEBUG_MMU)
466 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
467 #endif
468 if ((new_cr4 ^ env->cr[4]) &
469 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
470 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
471 tlb_flush(CPU(cpu), 1);
473 /* SSE handling */
474 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
475 new_cr4 &= ~CR4_OSFXSR_MASK;
477 env->hflags &= ~HF_OSFXSR_MASK;
478 if (new_cr4 & CR4_OSFXSR_MASK) {
479 env->hflags |= HF_OSFXSR_MASK;
482 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
483 new_cr4 &= ~CR4_SMAP_MASK;
485 env->hflags &= ~HF_SMAP_MASK;
486 if (new_cr4 & CR4_SMAP_MASK) {
487 env->hflags |= HF_SMAP_MASK;
490 env->cr[4] = new_cr4;
493 #if defined(CONFIG_USER_ONLY)
495 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
496 int is_write, int mmu_idx)
498 X86CPU *cpu = X86_CPU(cs);
499 CPUX86State *env = &cpu->env;
501 /* user mode only emulation */
502 is_write &= 1;
503 env->cr[2] = addr;
504 env->error_code = (is_write << PG_ERROR_W_BIT);
505 env->error_code |= PG_ERROR_U_MASK;
506 cs->exception_index = EXCP0E_PAGE;
507 return 1;
510 #else
512 /* XXX: This value should match the one returned by CPUID
513 * and in exec.c */
514 # if defined(TARGET_X86_64)
515 # define PHYS_ADDR_MASK 0xfffffff000LL
516 # else
517 # define PHYS_ADDR_MASK 0xffffff000LL
518 # endif
520 /* return value:
521 * -1 = cannot handle fault
522 * 0 = nothing more to do
523 * 1 = generate PF fault
525 int x86_cpu_handle_mmu_fault(CPUState *cs, vaddr addr,
526 int is_write1, int mmu_idx)
528 X86CPU *cpu = X86_CPU(cs);
529 CPUX86State *env = &cpu->env;
530 uint64_t ptep, pte;
531 target_ulong pde_addr, pte_addr;
532 int error_code, is_dirty, prot, page_size, is_write, is_user;
533 hwaddr paddr;
534 uint32_t page_offset;
535 target_ulong vaddr, virt_addr;
537 is_user = mmu_idx == MMU_USER_IDX;
538 #if defined(DEBUG_MMU)
539 printf("MMU fault: addr=%" VADDR_PRIx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
540 addr, is_write1, is_user, env->eip);
541 #endif
542 is_write = is_write1 & 1;
544 if (!(env->cr[0] & CR0_PG_MASK)) {
545 pte = addr;
546 #ifdef TARGET_X86_64
547 if (!(env->hflags & HF_LMA_MASK)) {
548 /* Without long mode we can only address 32bits in real mode */
549 pte = (uint32_t)pte;
551 #endif
552 virt_addr = addr & TARGET_PAGE_MASK;
553 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
554 page_size = 4096;
555 goto do_mapping;
558 if (env->cr[4] & CR4_PAE_MASK) {
559 uint64_t pde, pdpe;
560 target_ulong pdpe_addr;
562 #ifdef TARGET_X86_64
563 if (env->hflags & HF_LMA_MASK) {
564 uint64_t pml4e_addr, pml4e;
565 int32_t sext;
567 /* test virtual address sign extension */
568 sext = (int64_t)addr >> 47;
569 if (sext != 0 && sext != -1) {
570 env->error_code = 0;
571 cs->exception_index = EXCP0D_GPF;
572 return 1;
575 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
576 env->a20_mask;
577 pml4e = ldq_phys(cs->as, pml4e_addr);
578 if (!(pml4e & PG_PRESENT_MASK)) {
579 error_code = 0;
580 goto do_fault;
582 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
583 error_code = PG_ERROR_RSVD_MASK;
584 goto do_fault;
586 if (!(pml4e & PG_ACCESSED_MASK)) {
587 pml4e |= PG_ACCESSED_MASK;
588 stl_phys_notdirty(cs->as, pml4e_addr, pml4e);
590 ptep = pml4e ^ PG_NX_MASK;
591 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
592 env->a20_mask;
593 pdpe = ldq_phys(cs->as, pdpe_addr);
594 if (!(pdpe & PG_PRESENT_MASK)) {
595 error_code = 0;
596 goto do_fault;
598 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
599 error_code = PG_ERROR_RSVD_MASK;
600 goto do_fault;
602 ptep &= pdpe ^ PG_NX_MASK;
603 if (!(pdpe & PG_ACCESSED_MASK)) {
604 pdpe |= PG_ACCESSED_MASK;
605 stl_phys_notdirty(cs->as, pdpe_addr, pdpe);
607 } else
608 #endif
610 /* XXX: load them when cr3 is loaded ? */
611 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
612 env->a20_mask;
613 pdpe = ldq_phys(cs->as, pdpe_addr);
614 if (!(pdpe & PG_PRESENT_MASK)) {
615 error_code = 0;
616 goto do_fault;
618 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
621 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
622 env->a20_mask;
623 pde = ldq_phys(cs->as, pde_addr);
624 if (!(pde & PG_PRESENT_MASK)) {
625 error_code = 0;
626 goto do_fault;
628 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
629 error_code = PG_ERROR_RSVD_MASK;
630 goto do_fault;
632 ptep &= pde ^ PG_NX_MASK;
633 if (pde & PG_PSE_MASK) {
634 /* 2 MB page */
635 page_size = 2048 * 1024;
636 ptep ^= PG_NX_MASK;
637 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
638 goto do_fault_protect;
640 switch (mmu_idx) {
641 case MMU_USER_IDX:
642 if (!(ptep & PG_USER_MASK)) {
643 goto do_fault_protect;
645 if (is_write && !(ptep & PG_RW_MASK)) {
646 goto do_fault_protect;
648 break;
650 case MMU_KERNEL_IDX:
651 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
652 (ptep & PG_USER_MASK)) {
653 goto do_fault_protect;
655 /* fall through */
656 case MMU_KSMAP_IDX:
657 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
658 (ptep & PG_USER_MASK)) {
659 goto do_fault_protect;
661 if ((env->cr[0] & CR0_WP_MASK) &&
662 is_write && !(ptep & PG_RW_MASK)) {
663 goto do_fault_protect;
665 break;
667 default: /* cannot happen */
668 break;
670 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
671 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
672 pde |= PG_ACCESSED_MASK;
673 if (is_dirty)
674 pde |= PG_DIRTY_MASK;
675 stl_phys_notdirty(cs->as, pde_addr, pde);
677 /* align to page_size */
678 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
679 virt_addr = addr & ~(page_size - 1);
680 } else {
681 /* 4 KB page */
682 if (!(pde & PG_ACCESSED_MASK)) {
683 pde |= PG_ACCESSED_MASK;
684 stl_phys_notdirty(cs->as, pde_addr, pde);
686 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
687 env->a20_mask;
688 pte = ldq_phys(cs->as, pte_addr);
689 if (!(pte & PG_PRESENT_MASK)) {
690 error_code = 0;
691 goto do_fault;
693 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
694 error_code = PG_ERROR_RSVD_MASK;
695 goto do_fault;
697 /* combine pde and pte nx, user and rw protections */
698 ptep &= pte ^ PG_NX_MASK;
699 ptep ^= PG_NX_MASK;
700 if ((ptep & PG_NX_MASK) && is_write1 == 2)
701 goto do_fault_protect;
702 switch (mmu_idx) {
703 case MMU_USER_IDX:
704 if (!(ptep & PG_USER_MASK)) {
705 goto do_fault_protect;
707 if (is_write && !(ptep & PG_RW_MASK)) {
708 goto do_fault_protect;
710 break;
712 case MMU_KERNEL_IDX:
713 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
714 (ptep & PG_USER_MASK)) {
715 goto do_fault_protect;
717 /* fall through */
718 case MMU_KSMAP_IDX:
719 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
720 (ptep & PG_USER_MASK)) {
721 goto do_fault_protect;
723 if ((env->cr[0] & CR0_WP_MASK) &&
724 is_write && !(ptep & PG_RW_MASK)) {
725 goto do_fault_protect;
727 break;
729 default: /* cannot happen */
730 break;
732 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
733 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
734 pte |= PG_ACCESSED_MASK;
735 if (is_dirty)
736 pte |= PG_DIRTY_MASK;
737 stl_phys_notdirty(cs->as, pte_addr, pte);
739 page_size = 4096;
740 virt_addr = addr & ~0xfff;
741 pte = pte & (PHYS_ADDR_MASK | 0xfff);
743 } else {
744 uint32_t pde;
746 /* page directory entry */
747 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
748 env->a20_mask;
749 pde = ldl_phys(cs->as, pde_addr);
750 if (!(pde & PG_PRESENT_MASK)) {
751 error_code = 0;
752 goto do_fault;
754 /* if PSE bit is set, then we use a 4MB page */
755 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
756 page_size = 4096 * 1024;
757 switch (mmu_idx) {
758 case MMU_USER_IDX:
759 if (!(pde & PG_USER_MASK)) {
760 goto do_fault_protect;
762 if (is_write && !(pde & PG_RW_MASK)) {
763 goto do_fault_protect;
765 break;
767 case MMU_KERNEL_IDX:
768 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
769 (pde & PG_USER_MASK)) {
770 goto do_fault_protect;
772 /* fall through */
773 case MMU_KSMAP_IDX:
774 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
775 (pde & PG_USER_MASK)) {
776 goto do_fault_protect;
778 if ((env->cr[0] & CR0_WP_MASK) &&
779 is_write && !(pde & PG_RW_MASK)) {
780 goto do_fault_protect;
782 break;
784 default: /* cannot happen */
785 break;
787 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
788 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
789 pde |= PG_ACCESSED_MASK;
790 if (is_dirty)
791 pde |= PG_DIRTY_MASK;
792 stl_phys_notdirty(cs->as, pde_addr, pde);
795 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
796 ptep = pte;
797 virt_addr = addr & ~(page_size - 1);
798 } else {
799 if (!(pde & PG_ACCESSED_MASK)) {
800 pde |= PG_ACCESSED_MASK;
801 stl_phys_notdirty(cs->as, pde_addr, pde);
804 /* page directory entry */
805 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
806 env->a20_mask;
807 pte = ldl_phys(cs->as, pte_addr);
808 if (!(pte & PG_PRESENT_MASK)) {
809 error_code = 0;
810 goto do_fault;
812 /* combine pde and pte user and rw protections */
813 ptep = pte & pde;
814 switch (mmu_idx) {
815 case MMU_USER_IDX:
816 if (!(ptep & PG_USER_MASK)) {
817 goto do_fault_protect;
819 if (is_write && !(ptep & PG_RW_MASK)) {
820 goto do_fault_protect;
822 break;
824 case MMU_KERNEL_IDX:
825 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
826 (ptep & PG_USER_MASK)) {
827 goto do_fault_protect;
829 /* fall through */
830 case MMU_KSMAP_IDX:
831 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
832 (ptep & PG_USER_MASK)) {
833 goto do_fault_protect;
835 if ((env->cr[0] & CR0_WP_MASK) &&
836 is_write && !(ptep & PG_RW_MASK)) {
837 goto do_fault_protect;
839 break;
841 default: /* cannot happen */
842 break;
844 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
845 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
846 pte |= PG_ACCESSED_MASK;
847 if (is_dirty)
848 pte |= PG_DIRTY_MASK;
849 stl_phys_notdirty(cs->as, pte_addr, pte);
851 page_size = 4096;
852 virt_addr = addr & ~0xfff;
855 /* the page can be put in the TLB */
856 prot = PAGE_READ;
857 if (!(ptep & PG_NX_MASK))
858 prot |= PAGE_EXEC;
859 if (pte & PG_DIRTY_MASK) {
860 /* only set write access if already dirty... otherwise wait
861 for dirty access */
862 if (is_user) {
863 if (ptep & PG_RW_MASK)
864 prot |= PAGE_WRITE;
865 } else {
866 if (!(env->cr[0] & CR0_WP_MASK) ||
867 (ptep & PG_RW_MASK))
868 prot |= PAGE_WRITE;
871 do_mapping:
872 pte = pte & env->a20_mask;
874 /* Even if 4MB pages, we map only one 4KB page in the cache to
875 avoid filling it too fast */
876 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
877 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
878 vaddr = virt_addr + page_offset;
880 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
881 return 0;
882 do_fault_protect:
883 error_code = PG_ERROR_P_MASK;
884 do_fault:
885 error_code |= (is_write << PG_ERROR_W_BIT);
886 if (is_user)
887 error_code |= PG_ERROR_U_MASK;
888 if (is_write1 == 2 &&
889 (((env->efer & MSR_EFER_NXE) &&
890 (env->cr[4] & CR4_PAE_MASK)) ||
891 (env->cr[4] & CR4_SMEP_MASK)))
892 error_code |= PG_ERROR_I_D_MASK;
893 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
894 /* cr2 is not modified in case of exceptions */
895 stq_phys(cs->as,
896 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
897 addr);
898 } else {
899 env->cr[2] = addr;
901 env->error_code = error_code;
902 cs->exception_index = EXCP0E_PAGE;
903 return 1;
906 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
908 X86CPU *cpu = X86_CPU(cs);
909 CPUX86State *env = &cpu->env;
910 target_ulong pde_addr, pte_addr;
911 uint64_t pte;
912 hwaddr paddr;
913 uint32_t page_offset;
914 int page_size;
916 if (!(env->cr[0] & CR0_PG_MASK)) {
917 pte = addr & env->a20_mask;
918 page_size = 4096;
919 } else if (env->cr[4] & CR4_PAE_MASK) {
920 target_ulong pdpe_addr;
921 uint64_t pde, pdpe;
923 #ifdef TARGET_X86_64
924 if (env->hflags & HF_LMA_MASK) {
925 uint64_t pml4e_addr, pml4e;
926 int32_t sext;
928 /* test virtual address sign extension */
929 sext = (int64_t)addr >> 47;
930 if (sext != 0 && sext != -1)
931 return -1;
933 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
934 env->a20_mask;
935 pml4e = ldq_phys(cs->as, pml4e_addr);
936 if (!(pml4e & PG_PRESENT_MASK))
937 return -1;
939 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
940 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
941 pdpe = ldq_phys(cs->as, pdpe_addr);
942 if (!(pdpe & PG_PRESENT_MASK))
943 return -1;
944 } else
945 #endif
947 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
948 env->a20_mask;
949 pdpe = ldq_phys(cs->as, pdpe_addr);
950 if (!(pdpe & PG_PRESENT_MASK))
951 return -1;
954 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
955 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
956 pde = ldq_phys(cs->as, pde_addr);
957 if (!(pde & PG_PRESENT_MASK)) {
958 return -1;
960 if (pde & PG_PSE_MASK) {
961 /* 2 MB page */
962 page_size = 2048 * 1024;
963 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
964 } else {
965 /* 4 KB page */
966 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
967 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
968 page_size = 4096;
969 pte = ldq_phys(cs->as, pte_addr);
971 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
972 if (!(pte & PG_PRESENT_MASK))
973 return -1;
974 } else {
975 uint32_t pde;
977 /* page directory entry */
978 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
979 pde = ldl_phys(cs->as, pde_addr);
980 if (!(pde & PG_PRESENT_MASK))
981 return -1;
982 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
983 pte = pde & ~0x003ff000; /* align to 4MB */
984 page_size = 4096 * 1024;
985 } else {
986 /* page directory entry */
987 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
988 pte = ldl_phys(cs->as, pte_addr);
989 if (!(pte & PG_PRESENT_MASK))
990 return -1;
991 page_size = 4096;
993 pte = pte & env->a20_mask;
996 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
997 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
998 return paddr;
1001 void hw_breakpoint_insert(CPUX86State *env, int index)
1003 CPUState *cs = CPU(x86_env_get_cpu(env));
1004 int type = 0, err = 0;
1006 switch (hw_breakpoint_type(env->dr[7], index)) {
1007 case DR7_TYPE_BP_INST:
1008 if (hw_breakpoint_enabled(env->dr[7], index)) {
1009 err = cpu_breakpoint_insert(cs, env->dr[index], BP_CPU,
1010 &env->cpu_breakpoint[index]);
1012 break;
1013 case DR7_TYPE_DATA_WR:
1014 type = BP_CPU | BP_MEM_WRITE;
1015 break;
1016 case DR7_TYPE_IO_RW:
1017 /* No support for I/O watchpoints yet */
1018 break;
1019 case DR7_TYPE_DATA_RW:
1020 type = BP_CPU | BP_MEM_ACCESS;
1021 break;
1024 if (type != 0) {
1025 err = cpu_watchpoint_insert(cs, env->dr[index],
1026 hw_breakpoint_len(env->dr[7], index),
1027 type, &env->cpu_watchpoint[index]);
1030 if (err) {
1031 env->cpu_breakpoint[index] = NULL;
1035 void hw_breakpoint_remove(CPUX86State *env, int index)
1037 CPUState *cs;
1039 if (!env->cpu_breakpoint[index]) {
1040 return;
1042 cs = CPU(x86_env_get_cpu(env));
1043 switch (hw_breakpoint_type(env->dr[7], index)) {
1044 case DR7_TYPE_BP_INST:
1045 if (hw_breakpoint_enabled(env->dr[7], index)) {
1046 cpu_breakpoint_remove_by_ref(cs, env->cpu_breakpoint[index]);
1048 break;
1049 case DR7_TYPE_DATA_WR:
1050 case DR7_TYPE_DATA_RW:
1051 cpu_watchpoint_remove_by_ref(cs, env->cpu_watchpoint[index]);
1052 break;
1053 case DR7_TYPE_IO_RW:
1054 /* No support for I/O watchpoints yet */
1055 break;
1059 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1061 target_ulong dr6;
1062 int reg;
1063 bool hit_enabled = false;
1065 dr6 = env->dr[6] & ~0xf;
1066 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1067 bool bp_match = false;
1068 bool wp_match = false;
1070 switch (hw_breakpoint_type(env->dr[7], reg)) {
1071 case DR7_TYPE_BP_INST:
1072 if (env->dr[reg] == env->eip) {
1073 bp_match = true;
1075 break;
1076 case DR7_TYPE_DATA_WR:
1077 case DR7_TYPE_DATA_RW:
1078 if (env->cpu_watchpoint[reg] &&
1079 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1080 wp_match = true;
1082 break;
1083 case DR7_TYPE_IO_RW:
1084 break;
1086 if (bp_match || wp_match) {
1087 dr6 |= 1 << reg;
1088 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1089 hit_enabled = true;
1094 if (hit_enabled || force_dr6_update) {
1095 env->dr[6] = dr6;
1098 return hit_enabled;
1101 void breakpoint_handler(CPUX86State *env)
1103 CPUState *cs = CPU(x86_env_get_cpu(env));
1104 CPUBreakpoint *bp;
1106 if (cs->watchpoint_hit) {
1107 if (cs->watchpoint_hit->flags & BP_CPU) {
1108 cs->watchpoint_hit = NULL;
1109 if (check_hw_breakpoints(env, false)) {
1110 raise_exception(env, EXCP01_DB);
1111 } else {
1112 cpu_resume_from_signal(cs, NULL);
1115 } else {
1116 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
1117 if (bp->pc == env->eip) {
1118 if (bp->flags & BP_CPU) {
1119 check_hw_breakpoints(env, true);
1120 raise_exception(env, EXCP01_DB);
1122 break;
1128 typedef struct MCEInjectionParams {
1129 Monitor *mon;
1130 X86CPU *cpu;
1131 int bank;
1132 uint64_t status;
1133 uint64_t mcg_status;
1134 uint64_t addr;
1135 uint64_t misc;
1136 int flags;
1137 } MCEInjectionParams;
1139 static void do_inject_x86_mce(void *data)
1141 MCEInjectionParams *params = data;
1142 CPUX86State *cenv = &params->cpu->env;
1143 CPUState *cpu = CPU(params->cpu);
1144 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1146 cpu_synchronize_state(cpu);
1149 * If there is an MCE exception being processed, ignore this SRAO MCE
1150 * unless unconditional injection was requested.
1152 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1153 && !(params->status & MCI_STATUS_AR)
1154 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1155 return;
1158 if (params->status & MCI_STATUS_UC) {
1160 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1161 * reporting is disabled
1163 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1164 monitor_printf(params->mon,
1165 "CPU %d: Uncorrected error reporting disabled\n",
1166 cpu->cpu_index);
1167 return;
1171 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1172 * reporting is disabled for the bank
1174 if (banks[0] != ~(uint64_t)0) {
1175 monitor_printf(params->mon,
1176 "CPU %d: Uncorrected error reporting disabled for"
1177 " bank %d\n",
1178 cpu->cpu_index, params->bank);
1179 return;
1182 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1183 !(cenv->cr[4] & CR4_MCE_MASK)) {
1184 monitor_printf(params->mon,
1185 "CPU %d: Previous MCE still in progress, raising"
1186 " triple fault\n",
1187 cpu->cpu_index);
1188 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1189 qemu_system_reset_request();
1190 return;
1192 if (banks[1] & MCI_STATUS_VAL) {
1193 params->status |= MCI_STATUS_OVER;
1195 banks[2] = params->addr;
1196 banks[3] = params->misc;
1197 cenv->mcg_status = params->mcg_status;
1198 banks[1] = params->status;
1199 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1200 } else if (!(banks[1] & MCI_STATUS_VAL)
1201 || !(banks[1] & MCI_STATUS_UC)) {
1202 if (banks[1] & MCI_STATUS_VAL) {
1203 params->status |= MCI_STATUS_OVER;
1205 banks[2] = params->addr;
1206 banks[3] = params->misc;
1207 banks[1] = params->status;
1208 } else {
1209 banks[1] |= MCI_STATUS_OVER;
1213 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1214 uint64_t status, uint64_t mcg_status, uint64_t addr,
1215 uint64_t misc, int flags)
1217 CPUState *cs = CPU(cpu);
1218 CPUX86State *cenv = &cpu->env;
1219 MCEInjectionParams params = {
1220 .mon = mon,
1221 .cpu = cpu,
1222 .bank = bank,
1223 .status = status,
1224 .mcg_status = mcg_status,
1225 .addr = addr,
1226 .misc = misc,
1227 .flags = flags,
1229 unsigned bank_num = cenv->mcg_cap & 0xff;
1231 if (!cenv->mcg_cap) {
1232 monitor_printf(mon, "MCE injection not supported\n");
1233 return;
1235 if (bank >= bank_num) {
1236 monitor_printf(mon, "Invalid MCE bank number\n");
1237 return;
1239 if (!(status & MCI_STATUS_VAL)) {
1240 monitor_printf(mon, "Invalid MCE status code\n");
1241 return;
1243 if ((flags & MCE_INJECT_BROADCAST)
1244 && !cpu_x86_support_mca_broadcast(cenv)) {
1245 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1246 return;
1249 run_on_cpu(cs, do_inject_x86_mce, &params);
1250 if (flags & MCE_INJECT_BROADCAST) {
1251 CPUState *other_cs;
1253 params.bank = 1;
1254 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1255 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1256 params.addr = 0;
1257 params.misc = 0;
1258 CPU_FOREACH(other_cs) {
1259 if (other_cs == cs) {
1260 continue;
1262 params.cpu = X86_CPU(other_cs);
1263 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1268 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1270 X86CPU *cpu = x86_env_get_cpu(env);
1271 CPUState *cs = CPU(cpu);
1273 if (kvm_enabled()) {
1274 env->tpr_access_type = access;
1276 cpu_interrupt(cs, CPU_INTERRUPT_TPR);
1277 } else {
1278 cpu_restore_state(cs, cs->mem_io_pc);
1280 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1283 #endif /* !CONFIG_USER_ONLY */
1285 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1286 target_ulong *base, unsigned int *limit,
1287 unsigned int *flags)
1289 X86CPU *cpu = x86_env_get_cpu(env);
1290 CPUState *cs = CPU(cpu);
1291 SegmentCache *dt;
1292 target_ulong ptr;
1293 uint32_t e1, e2;
1294 int index;
1296 if (selector & 0x4)
1297 dt = &env->ldt;
1298 else
1299 dt = &env->gdt;
1300 index = selector & ~7;
1301 ptr = dt->base + index;
1302 if ((index + 7) > dt->limit
1303 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1304 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1305 return 0;
1307 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1308 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1309 if (e2 & DESC_G_MASK)
1310 *limit = (*limit << 12) | 0xfff;
1311 *flags = e2;
1313 return 1;
1316 #if !defined(CONFIG_USER_ONLY)
1317 void do_cpu_init(X86CPU *cpu)
1319 CPUState *cs = CPU(cpu);
1320 CPUX86State *env = &cpu->env;
1321 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1322 uint64_t pat = env->pat;
1324 cpu_reset(cs);
1325 cs->interrupt_request = sipi;
1326 env->pat = pat;
1327 apic_init_reset(cpu->apic_state);
1330 void do_cpu_sipi(X86CPU *cpu)
1332 apic_sipi(cpu->apic_state);
1334 #else
1335 void do_cpu_init(X86CPU *cpu)
1338 void do_cpu_sipi(X86CPU *cpu)
1341 #endif