memory: cpu_physical_memory_clear_dirty_range() now uses bitmap operations
[qemu/cris-port.git] / target-i386 / helper.c
blobfe613b26e1d5c643e3423a380a0229dc16825ed9
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "cpu.h"
21 #include "sysemu/kvm.h"
22 #ifndef CONFIG_USER_ONLY
23 #include "sysemu/sysemu.h"
24 #include "monitor/monitor.h"
25 #endif
27 //#define DEBUG_MMU
29 static void cpu_x86_version(CPUX86State *env, int *family, int *model)
31 int cpuver = env->cpuid_version;
33 if (family == NULL || model == NULL) {
34 return;
37 *family = (cpuver >> 8) & 0x0f;
38 *model = ((cpuver >> 12) & 0xf0) + ((cpuver >> 4) & 0x0f);
41 /* Broadcast MCA signal for processor version 06H_EH and above */
42 int cpu_x86_support_mca_broadcast(CPUX86State *env)
44 int family = 0;
45 int model = 0;
47 cpu_x86_version(env, &family, &model);
48 if ((family == 6 && model >= 14) || family > 6) {
49 return 1;
52 return 0;
55 /***********************************************************/
56 /* x86 debug */
58 static const char *cc_op_str[CC_OP_NB] = {
59 "DYNAMIC",
60 "EFLAGS",
62 "MULB",
63 "MULW",
64 "MULL",
65 "MULQ",
67 "ADDB",
68 "ADDW",
69 "ADDL",
70 "ADDQ",
72 "ADCB",
73 "ADCW",
74 "ADCL",
75 "ADCQ",
77 "SUBB",
78 "SUBW",
79 "SUBL",
80 "SUBQ",
82 "SBBB",
83 "SBBW",
84 "SBBL",
85 "SBBQ",
87 "LOGICB",
88 "LOGICW",
89 "LOGICL",
90 "LOGICQ",
92 "INCB",
93 "INCW",
94 "INCL",
95 "INCQ",
97 "DECB",
98 "DECW",
99 "DECL",
100 "DECQ",
102 "SHLB",
103 "SHLW",
104 "SHLL",
105 "SHLQ",
107 "SARB",
108 "SARW",
109 "SARL",
110 "SARQ",
112 "BMILGB",
113 "BMILGW",
114 "BMILGL",
115 "BMILGQ",
117 "ADCX",
118 "ADOX",
119 "ADCOX",
121 "CLR",
124 static void
125 cpu_x86_dump_seg_cache(CPUX86State *env, FILE *f, fprintf_function cpu_fprintf,
126 const char *name, struct SegmentCache *sc)
128 #ifdef TARGET_X86_64
129 if (env->hflags & HF_CS64_MASK) {
130 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
131 sc->selector, sc->base, sc->limit, sc->flags & 0x00ffff00);
132 } else
133 #endif
135 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
136 (uint32_t)sc->base, sc->limit, sc->flags & 0x00ffff00);
139 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
140 goto done;
142 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
143 if (sc->flags & DESC_S_MASK) {
144 if (sc->flags & DESC_CS_MASK) {
145 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
146 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
147 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
148 (sc->flags & DESC_R_MASK) ? 'R' : '-');
149 } else {
150 cpu_fprintf(f,
151 (sc->flags & DESC_B_MASK || env->hflags & HF_LMA_MASK)
152 ? "DS " : "DS16");
153 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
154 (sc->flags & DESC_W_MASK) ? 'W' : '-');
156 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
157 } else {
158 static const char *sys_type_name[2][16] = {
159 { /* 32 bit mode */
160 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
161 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
162 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
163 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
165 { /* 64 bit mode */
166 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
167 "Reserved", "Reserved", "Reserved", "Reserved",
168 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
169 "Reserved", "IntGate64", "TrapGate64"
172 cpu_fprintf(f, "%s",
173 sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
174 [(sc->flags & DESC_TYPE_MASK)
175 >> DESC_TYPE_SHIFT]);
177 done:
178 cpu_fprintf(f, "\n");
181 #define DUMP_CODE_BYTES_TOTAL 50
182 #define DUMP_CODE_BYTES_BACKWARD 20
184 void x86_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
185 int flags)
187 X86CPU *cpu = X86_CPU(cs);
188 CPUX86State *env = &cpu->env;
189 int eflags, i, nb;
190 char cc_op_name[32];
191 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
193 eflags = cpu_compute_eflags(env);
194 #ifdef TARGET_X86_64
195 if (env->hflags & HF_CS64_MASK) {
196 cpu_fprintf(f,
197 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
198 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
199 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
200 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
201 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
202 env->regs[R_EAX],
203 env->regs[R_EBX],
204 env->regs[R_ECX],
205 env->regs[R_EDX],
206 env->regs[R_ESI],
207 env->regs[R_EDI],
208 env->regs[R_EBP],
209 env->regs[R_ESP],
210 env->regs[8],
211 env->regs[9],
212 env->regs[10],
213 env->regs[11],
214 env->regs[12],
215 env->regs[13],
216 env->regs[14],
217 env->regs[15],
218 env->eip, eflags,
219 eflags & DF_MASK ? 'D' : '-',
220 eflags & CC_O ? 'O' : '-',
221 eflags & CC_S ? 'S' : '-',
222 eflags & CC_Z ? 'Z' : '-',
223 eflags & CC_A ? 'A' : '-',
224 eflags & CC_P ? 'P' : '-',
225 eflags & CC_C ? 'C' : '-',
226 env->hflags & HF_CPL_MASK,
227 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
228 (env->a20_mask >> 20) & 1,
229 (env->hflags >> HF_SMM_SHIFT) & 1,
230 cs->halted);
231 } else
232 #endif
234 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
235 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
236 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
237 (uint32_t)env->regs[R_EAX],
238 (uint32_t)env->regs[R_EBX],
239 (uint32_t)env->regs[R_ECX],
240 (uint32_t)env->regs[R_EDX],
241 (uint32_t)env->regs[R_ESI],
242 (uint32_t)env->regs[R_EDI],
243 (uint32_t)env->regs[R_EBP],
244 (uint32_t)env->regs[R_ESP],
245 (uint32_t)env->eip, eflags,
246 eflags & DF_MASK ? 'D' : '-',
247 eflags & CC_O ? 'O' : '-',
248 eflags & CC_S ? 'S' : '-',
249 eflags & CC_Z ? 'Z' : '-',
250 eflags & CC_A ? 'A' : '-',
251 eflags & CC_P ? 'P' : '-',
252 eflags & CC_C ? 'C' : '-',
253 env->hflags & HF_CPL_MASK,
254 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
255 (env->a20_mask >> 20) & 1,
256 (env->hflags >> HF_SMM_SHIFT) & 1,
257 cs->halted);
260 for(i = 0; i < 6; i++) {
261 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
262 &env->segs[i]);
264 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
265 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
267 #ifdef TARGET_X86_64
268 if (env->hflags & HF_LMA_MASK) {
269 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
270 env->gdt.base, env->gdt.limit);
271 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
272 env->idt.base, env->idt.limit);
273 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
274 (uint32_t)env->cr[0],
275 env->cr[2],
276 env->cr[3],
277 (uint32_t)env->cr[4]);
278 for(i = 0; i < 4; i++)
279 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
280 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
281 env->dr[6], env->dr[7]);
282 } else
283 #endif
285 cpu_fprintf(f, "GDT= %08x %08x\n",
286 (uint32_t)env->gdt.base, env->gdt.limit);
287 cpu_fprintf(f, "IDT= %08x %08x\n",
288 (uint32_t)env->idt.base, env->idt.limit);
289 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
290 (uint32_t)env->cr[0],
291 (uint32_t)env->cr[2],
292 (uint32_t)env->cr[3],
293 (uint32_t)env->cr[4]);
294 for(i = 0; i < 4; i++) {
295 cpu_fprintf(f, "DR%d=" TARGET_FMT_lx " ", i, env->dr[i]);
297 cpu_fprintf(f, "\nDR6=" TARGET_FMT_lx " DR7=" TARGET_FMT_lx "\n",
298 env->dr[6], env->dr[7]);
300 if (flags & CPU_DUMP_CCOP) {
301 if ((unsigned)env->cc_op < CC_OP_NB)
302 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
303 else
304 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
305 #ifdef TARGET_X86_64
306 if (env->hflags & HF_CS64_MASK) {
307 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
308 env->cc_src, env->cc_dst,
309 cc_op_name);
310 } else
311 #endif
313 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
314 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
315 cc_op_name);
318 cpu_fprintf(f, "EFER=%016" PRIx64 "\n", env->efer);
319 if (flags & CPU_DUMP_FPU) {
320 int fptag;
321 fptag = 0;
322 for(i = 0; i < 8; i++) {
323 fptag |= ((!env->fptags[i]) << i);
325 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
326 env->fpuc,
327 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
328 env->fpstt,
329 fptag,
330 env->mxcsr);
331 for(i=0;i<8;i++) {
332 CPU_LDoubleU u;
333 u.d = env->fpregs[i].d;
334 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
335 i, u.l.lower, u.l.upper);
336 if ((i & 1) == 1)
337 cpu_fprintf(f, "\n");
338 else
339 cpu_fprintf(f, " ");
341 if (env->hflags & HF_CS64_MASK)
342 nb = 16;
343 else
344 nb = 8;
345 for(i=0;i<nb;i++) {
346 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
348 env->xmm_regs[i].XMM_L(3),
349 env->xmm_regs[i].XMM_L(2),
350 env->xmm_regs[i].XMM_L(1),
351 env->xmm_regs[i].XMM_L(0));
352 if ((i & 1) == 1)
353 cpu_fprintf(f, "\n");
354 else
355 cpu_fprintf(f, " ");
358 if (flags & CPU_DUMP_CODE) {
359 target_ulong base = env->segs[R_CS].base + env->eip;
360 target_ulong offs = MIN(env->eip, DUMP_CODE_BYTES_BACKWARD);
361 uint8_t code;
362 char codestr[3];
364 cpu_fprintf(f, "Code=");
365 for (i = 0; i < DUMP_CODE_BYTES_TOTAL; i++) {
366 if (cpu_memory_rw_debug(cs, base - offs + i, &code, 1, 0) == 0) {
367 snprintf(codestr, sizeof(codestr), "%02x", code);
368 } else {
369 snprintf(codestr, sizeof(codestr), "??");
371 cpu_fprintf(f, "%s%s%s%s", i > 0 ? " " : "",
372 i == offs ? "<" : "", codestr, i == offs ? ">" : "");
374 cpu_fprintf(f, "\n");
378 /***********************************************************/
379 /* x86 mmu */
380 /* XXX: add PGE support */
382 void x86_cpu_set_a20(X86CPU *cpu, int a20_state)
384 CPUX86State *env = &cpu->env;
386 a20_state = (a20_state != 0);
387 if (a20_state != ((env->a20_mask >> 20) & 1)) {
388 #if defined(DEBUG_MMU)
389 printf("A20 update: a20=%d\n", a20_state);
390 #endif
391 /* if the cpu is currently executing code, we must unlink it and
392 all the potentially executing TB */
393 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_EXITTB);
395 /* when a20 is changed, all the MMU mappings are invalid, so
396 we must flush everything */
397 tlb_flush(env, 1);
398 env->a20_mask = ~(1 << 20) | (a20_state << 20);
402 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
404 int pe_state;
406 #if defined(DEBUG_MMU)
407 printf("CR0 update: CR0=0x%08x\n", new_cr0);
408 #endif
409 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
410 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
411 tlb_flush(env, 1);
414 #ifdef TARGET_X86_64
415 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
416 (env->efer & MSR_EFER_LME)) {
417 /* enter in long mode */
418 /* XXX: generate an exception */
419 if (!(env->cr[4] & CR4_PAE_MASK))
420 return;
421 env->efer |= MSR_EFER_LMA;
422 env->hflags |= HF_LMA_MASK;
423 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
424 (env->efer & MSR_EFER_LMA)) {
425 /* exit long mode */
426 env->efer &= ~MSR_EFER_LMA;
427 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
428 env->eip &= 0xffffffff;
430 #endif
431 env->cr[0] = new_cr0 | CR0_ET_MASK;
433 /* update PE flag in hidden flags */
434 pe_state = (env->cr[0] & CR0_PE_MASK);
435 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
436 /* ensure that ADDSEG is always set in real mode */
437 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
438 /* update FPU flags */
439 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
440 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
443 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
444 the PDPT */
445 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
447 env->cr[3] = new_cr3;
448 if (env->cr[0] & CR0_PG_MASK) {
449 #if defined(DEBUG_MMU)
450 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
451 #endif
452 tlb_flush(env, 0);
456 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
458 #if defined(DEBUG_MMU)
459 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
460 #endif
461 if ((new_cr4 ^ env->cr[4]) &
462 (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK |
463 CR4_SMEP_MASK | CR4_SMAP_MASK)) {
464 tlb_flush(env, 1);
466 /* SSE handling */
467 if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
468 new_cr4 &= ~CR4_OSFXSR_MASK;
470 env->hflags &= ~HF_OSFXSR_MASK;
471 if (new_cr4 & CR4_OSFXSR_MASK) {
472 env->hflags |= HF_OSFXSR_MASK;
475 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
476 new_cr4 &= ~CR4_SMAP_MASK;
478 env->hflags &= ~HF_SMAP_MASK;
479 if (new_cr4 & CR4_SMAP_MASK) {
480 env->hflags |= HF_SMAP_MASK;
483 env->cr[4] = new_cr4;
486 #if defined(CONFIG_USER_ONLY)
488 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
489 int is_write, int mmu_idx)
491 /* user mode only emulation */
492 is_write &= 1;
493 env->cr[2] = addr;
494 env->error_code = (is_write << PG_ERROR_W_BIT);
495 env->error_code |= PG_ERROR_U_MASK;
496 env->exception_index = EXCP0E_PAGE;
497 return 1;
500 #else
502 /* XXX: This value should match the one returned by CPUID
503 * and in exec.c */
504 # if defined(TARGET_X86_64)
505 # define PHYS_ADDR_MASK 0xfffffff000LL
506 # else
507 # define PHYS_ADDR_MASK 0xffffff000LL
508 # endif
510 /* return value:
511 -1 = cannot handle fault
512 0 = nothing more to do
513 1 = generate PF fault
515 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
516 int is_write1, int mmu_idx)
518 uint64_t ptep, pte;
519 target_ulong pde_addr, pte_addr;
520 int error_code, is_dirty, prot, page_size, is_write, is_user;
521 hwaddr paddr;
522 uint32_t page_offset;
523 target_ulong vaddr, virt_addr;
525 is_user = mmu_idx == MMU_USER_IDX;
526 #if defined(DEBUG_MMU)
527 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
528 addr, is_write1, is_user, env->eip);
529 #endif
530 is_write = is_write1 & 1;
532 if (!(env->cr[0] & CR0_PG_MASK)) {
533 pte = addr;
534 #ifdef TARGET_X86_64
535 if (!(env->hflags & HF_LMA_MASK)) {
536 /* Without long mode we can only address 32bits in real mode */
537 pte = (uint32_t)pte;
539 #endif
540 virt_addr = addr & TARGET_PAGE_MASK;
541 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
542 page_size = 4096;
543 goto do_mapping;
546 if (env->cr[4] & CR4_PAE_MASK) {
547 uint64_t pde, pdpe;
548 target_ulong pdpe_addr;
550 #ifdef TARGET_X86_64
551 if (env->hflags & HF_LMA_MASK) {
552 uint64_t pml4e_addr, pml4e;
553 int32_t sext;
555 /* test virtual address sign extension */
556 sext = (int64_t)addr >> 47;
557 if (sext != 0 && sext != -1) {
558 env->error_code = 0;
559 env->exception_index = EXCP0D_GPF;
560 return 1;
563 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
564 env->a20_mask;
565 pml4e = ldq_phys(pml4e_addr);
566 if (!(pml4e & PG_PRESENT_MASK)) {
567 error_code = 0;
568 goto do_fault;
570 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
571 error_code = PG_ERROR_RSVD_MASK;
572 goto do_fault;
574 if (!(pml4e & PG_ACCESSED_MASK)) {
575 pml4e |= PG_ACCESSED_MASK;
576 stl_phys_notdirty(pml4e_addr, pml4e);
578 ptep = pml4e ^ PG_NX_MASK;
579 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
580 env->a20_mask;
581 pdpe = ldq_phys(pdpe_addr);
582 if (!(pdpe & PG_PRESENT_MASK)) {
583 error_code = 0;
584 goto do_fault;
586 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
587 error_code = PG_ERROR_RSVD_MASK;
588 goto do_fault;
590 ptep &= pdpe ^ PG_NX_MASK;
591 if (!(pdpe & PG_ACCESSED_MASK)) {
592 pdpe |= PG_ACCESSED_MASK;
593 stl_phys_notdirty(pdpe_addr, pdpe);
595 } else
596 #endif
598 /* XXX: load them when cr3 is loaded ? */
599 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
600 env->a20_mask;
601 pdpe = ldq_phys(pdpe_addr);
602 if (!(pdpe & PG_PRESENT_MASK)) {
603 error_code = 0;
604 goto do_fault;
606 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
609 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
610 env->a20_mask;
611 pde = ldq_phys(pde_addr);
612 if (!(pde & PG_PRESENT_MASK)) {
613 error_code = 0;
614 goto do_fault;
616 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
617 error_code = PG_ERROR_RSVD_MASK;
618 goto do_fault;
620 ptep &= pde ^ PG_NX_MASK;
621 if (pde & PG_PSE_MASK) {
622 /* 2 MB page */
623 page_size = 2048 * 1024;
624 ptep ^= PG_NX_MASK;
625 if ((ptep & PG_NX_MASK) && is_write1 == 2) {
626 goto do_fault_protect;
628 switch (mmu_idx) {
629 case MMU_USER_IDX:
630 if (!(ptep & PG_USER_MASK)) {
631 goto do_fault_protect;
633 if (is_write && !(ptep & PG_RW_MASK)) {
634 goto do_fault_protect;
636 break;
638 case MMU_KERNEL_IDX:
639 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
640 (ptep & PG_USER_MASK)) {
641 goto do_fault_protect;
643 /* fall through */
644 case MMU_KSMAP_IDX:
645 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
646 (ptep & PG_USER_MASK)) {
647 goto do_fault_protect;
649 if ((env->cr[0] & CR0_WP_MASK) &&
650 is_write && !(ptep & PG_RW_MASK)) {
651 goto do_fault_protect;
653 break;
655 default: /* cannot happen */
656 break;
658 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
659 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
660 pde |= PG_ACCESSED_MASK;
661 if (is_dirty)
662 pde |= PG_DIRTY_MASK;
663 stl_phys_notdirty(pde_addr, pde);
665 /* align to page_size */
666 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
667 virt_addr = addr & ~(page_size - 1);
668 } else {
669 /* 4 KB page */
670 if (!(pde & PG_ACCESSED_MASK)) {
671 pde |= PG_ACCESSED_MASK;
672 stl_phys_notdirty(pde_addr, pde);
674 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
675 env->a20_mask;
676 pte = ldq_phys(pte_addr);
677 if (!(pte & PG_PRESENT_MASK)) {
678 error_code = 0;
679 goto do_fault;
681 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
682 error_code = PG_ERROR_RSVD_MASK;
683 goto do_fault;
685 /* combine pde and pte nx, user and rw protections */
686 ptep &= pte ^ PG_NX_MASK;
687 ptep ^= PG_NX_MASK;
688 if ((ptep & PG_NX_MASK) && is_write1 == 2)
689 goto do_fault_protect;
690 switch (mmu_idx) {
691 case MMU_USER_IDX:
692 if (!(ptep & PG_USER_MASK)) {
693 goto do_fault_protect;
695 if (is_write && !(ptep & PG_RW_MASK)) {
696 goto do_fault_protect;
698 break;
700 case MMU_KERNEL_IDX:
701 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
702 (ptep & PG_USER_MASK)) {
703 goto do_fault_protect;
705 /* fall through */
706 case MMU_KSMAP_IDX:
707 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
708 (ptep & PG_USER_MASK)) {
709 goto do_fault_protect;
711 if ((env->cr[0] & CR0_WP_MASK) &&
712 is_write && !(ptep & PG_RW_MASK)) {
713 goto do_fault_protect;
715 break;
717 default: /* cannot happen */
718 break;
720 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
721 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
722 pte |= PG_ACCESSED_MASK;
723 if (is_dirty)
724 pte |= PG_DIRTY_MASK;
725 stl_phys_notdirty(pte_addr, pte);
727 page_size = 4096;
728 virt_addr = addr & ~0xfff;
729 pte = pte & (PHYS_ADDR_MASK | 0xfff);
731 } else {
732 uint32_t pde;
734 /* page directory entry */
735 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
736 env->a20_mask;
737 pde = ldl_phys(pde_addr);
738 if (!(pde & PG_PRESENT_MASK)) {
739 error_code = 0;
740 goto do_fault;
742 /* if PSE bit is set, then we use a 4MB page */
743 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
744 page_size = 4096 * 1024;
745 switch (mmu_idx) {
746 case MMU_USER_IDX:
747 if (!(pde & PG_USER_MASK)) {
748 goto do_fault_protect;
750 if (is_write && !(pde & PG_RW_MASK)) {
751 goto do_fault_protect;
753 break;
755 case MMU_KERNEL_IDX:
756 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
757 (pde & PG_USER_MASK)) {
758 goto do_fault_protect;
760 /* fall through */
761 case MMU_KSMAP_IDX:
762 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
763 (pde & PG_USER_MASK)) {
764 goto do_fault_protect;
766 if ((env->cr[0] & CR0_WP_MASK) &&
767 is_write && !(pde & PG_RW_MASK)) {
768 goto do_fault_protect;
770 break;
772 default: /* cannot happen */
773 break;
775 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
776 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
777 pde |= PG_ACCESSED_MASK;
778 if (is_dirty)
779 pde |= PG_DIRTY_MASK;
780 stl_phys_notdirty(pde_addr, pde);
783 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
784 ptep = pte;
785 virt_addr = addr & ~(page_size - 1);
786 } else {
787 if (!(pde & PG_ACCESSED_MASK)) {
788 pde |= PG_ACCESSED_MASK;
789 stl_phys_notdirty(pde_addr, pde);
792 /* page directory entry */
793 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
794 env->a20_mask;
795 pte = ldl_phys(pte_addr);
796 if (!(pte & PG_PRESENT_MASK)) {
797 error_code = 0;
798 goto do_fault;
800 /* combine pde and pte user and rw protections */
801 ptep = pte & pde;
802 switch (mmu_idx) {
803 case MMU_USER_IDX:
804 if (!(ptep & PG_USER_MASK)) {
805 goto do_fault_protect;
807 if (is_write && !(ptep & PG_RW_MASK)) {
808 goto do_fault_protect;
810 break;
812 case MMU_KERNEL_IDX:
813 if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) &&
814 (ptep & PG_USER_MASK)) {
815 goto do_fault_protect;
817 /* fall through */
818 case MMU_KSMAP_IDX:
819 if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) &&
820 (ptep & PG_USER_MASK)) {
821 goto do_fault_protect;
823 if ((env->cr[0] & CR0_WP_MASK) &&
824 is_write && !(ptep & PG_RW_MASK)) {
825 goto do_fault_protect;
827 break;
829 default: /* cannot happen */
830 break;
832 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
833 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
834 pte |= PG_ACCESSED_MASK;
835 if (is_dirty)
836 pte |= PG_DIRTY_MASK;
837 stl_phys_notdirty(pte_addr, pte);
839 page_size = 4096;
840 virt_addr = addr & ~0xfff;
843 /* the page can be put in the TLB */
844 prot = PAGE_READ;
845 if (!(ptep & PG_NX_MASK))
846 prot |= PAGE_EXEC;
847 if (pte & PG_DIRTY_MASK) {
848 /* only set write access if already dirty... otherwise wait
849 for dirty access */
850 if (is_user) {
851 if (ptep & PG_RW_MASK)
852 prot |= PAGE_WRITE;
853 } else {
854 if (!(env->cr[0] & CR0_WP_MASK) ||
855 (ptep & PG_RW_MASK))
856 prot |= PAGE_WRITE;
859 do_mapping:
860 pte = pte & env->a20_mask;
862 /* Even if 4MB pages, we map only one 4KB page in the cache to
863 avoid filling it too fast */
864 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
865 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
866 vaddr = virt_addr + page_offset;
868 tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
869 return 0;
870 do_fault_protect:
871 error_code = PG_ERROR_P_MASK;
872 do_fault:
873 error_code |= (is_write << PG_ERROR_W_BIT);
874 if (is_user)
875 error_code |= PG_ERROR_U_MASK;
876 if (is_write1 == 2 &&
877 (((env->efer & MSR_EFER_NXE) &&
878 (env->cr[4] & CR4_PAE_MASK)) ||
879 (env->cr[4] & CR4_SMEP_MASK)))
880 error_code |= PG_ERROR_I_D_MASK;
881 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
882 /* cr2 is not modified in case of exceptions */
883 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
884 addr);
885 } else {
886 env->cr[2] = addr;
888 env->error_code = error_code;
889 env->exception_index = EXCP0E_PAGE;
890 return 1;
893 hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
895 X86CPU *cpu = X86_CPU(cs);
896 CPUX86State *env = &cpu->env;
897 target_ulong pde_addr, pte_addr;
898 uint64_t pte;
899 hwaddr paddr;
900 uint32_t page_offset;
901 int page_size;
903 if (!(env->cr[0] & CR0_PG_MASK)) {
904 pte = addr & env->a20_mask;
905 page_size = 4096;
906 } else if (env->cr[4] & CR4_PAE_MASK) {
907 target_ulong pdpe_addr;
908 uint64_t pde, pdpe;
910 #ifdef TARGET_X86_64
911 if (env->hflags & HF_LMA_MASK) {
912 uint64_t pml4e_addr, pml4e;
913 int32_t sext;
915 /* test virtual address sign extension */
916 sext = (int64_t)addr >> 47;
917 if (sext != 0 && sext != -1)
918 return -1;
920 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
921 env->a20_mask;
922 pml4e = ldq_phys(pml4e_addr);
923 if (!(pml4e & PG_PRESENT_MASK))
924 return -1;
926 pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
927 (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask;
928 pdpe = ldq_phys(pdpe_addr);
929 if (!(pdpe & PG_PRESENT_MASK))
930 return -1;
931 } else
932 #endif
934 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
935 env->a20_mask;
936 pdpe = ldq_phys(pdpe_addr);
937 if (!(pdpe & PG_PRESENT_MASK))
938 return -1;
941 pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
942 (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask;
943 pde = ldq_phys(pde_addr);
944 if (!(pde & PG_PRESENT_MASK)) {
945 return -1;
947 if (pde & PG_PSE_MASK) {
948 /* 2 MB page */
949 page_size = 2048 * 1024;
950 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
951 } else {
952 /* 4 KB page */
953 pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) +
954 (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask;
955 page_size = 4096;
956 pte = ldq_phys(pte_addr);
958 pte &= ~(PG_NX_MASK | PG_HI_USER_MASK);
959 if (!(pte & PG_PRESENT_MASK))
960 return -1;
961 } else {
962 uint32_t pde;
964 /* page directory entry */
965 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
966 pde = ldl_phys(pde_addr);
967 if (!(pde & PG_PRESENT_MASK))
968 return -1;
969 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
970 pte = pde & ~0x003ff000; /* align to 4MB */
971 page_size = 4096 * 1024;
972 } else {
973 /* page directory entry */
974 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
975 pte = ldl_phys(pte_addr);
976 if (!(pte & PG_PRESENT_MASK))
977 return -1;
978 page_size = 4096;
980 pte = pte & env->a20_mask;
983 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
984 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
985 return paddr;
988 void hw_breakpoint_insert(CPUX86State *env, int index)
990 int type = 0, err = 0;
992 switch (hw_breakpoint_type(env->dr[7], index)) {
993 case DR7_TYPE_BP_INST:
994 if (hw_breakpoint_enabled(env->dr[7], index)) {
995 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
996 &env->cpu_breakpoint[index]);
998 break;
999 case DR7_TYPE_DATA_WR:
1000 type = BP_CPU | BP_MEM_WRITE;
1001 break;
1002 case DR7_TYPE_IO_RW:
1003 /* No support for I/O watchpoints yet */
1004 break;
1005 case DR7_TYPE_DATA_RW:
1006 type = BP_CPU | BP_MEM_ACCESS;
1007 break;
1010 if (type != 0) {
1011 err = cpu_watchpoint_insert(env, env->dr[index],
1012 hw_breakpoint_len(env->dr[7], index),
1013 type, &env->cpu_watchpoint[index]);
1016 if (err) {
1017 env->cpu_breakpoint[index] = NULL;
1021 void hw_breakpoint_remove(CPUX86State *env, int index)
1023 if (!env->cpu_breakpoint[index])
1024 return;
1025 switch (hw_breakpoint_type(env->dr[7], index)) {
1026 case DR7_TYPE_BP_INST:
1027 if (hw_breakpoint_enabled(env->dr[7], index)) {
1028 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1030 break;
1031 case DR7_TYPE_DATA_WR:
1032 case DR7_TYPE_DATA_RW:
1033 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1034 break;
1035 case DR7_TYPE_IO_RW:
1036 /* No support for I/O watchpoints yet */
1037 break;
1041 bool check_hw_breakpoints(CPUX86State *env, bool force_dr6_update)
1043 target_ulong dr6;
1044 int reg;
1045 bool hit_enabled = false;
1047 dr6 = env->dr[6] & ~0xf;
1048 for (reg = 0; reg < DR7_MAX_BP; reg++) {
1049 bool bp_match = false;
1050 bool wp_match = false;
1052 switch (hw_breakpoint_type(env->dr[7], reg)) {
1053 case DR7_TYPE_BP_INST:
1054 if (env->dr[reg] == env->eip) {
1055 bp_match = true;
1057 break;
1058 case DR7_TYPE_DATA_WR:
1059 case DR7_TYPE_DATA_RW:
1060 if (env->cpu_watchpoint[reg] &&
1061 env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT) {
1062 wp_match = true;
1064 break;
1065 case DR7_TYPE_IO_RW:
1066 break;
1068 if (bp_match || wp_match) {
1069 dr6 |= 1 << reg;
1070 if (hw_breakpoint_enabled(env->dr[7], reg)) {
1071 hit_enabled = true;
1076 if (hit_enabled || force_dr6_update) {
1077 env->dr[6] = dr6;
1080 return hit_enabled;
1083 void breakpoint_handler(CPUX86State *env)
1085 CPUBreakpoint *bp;
1087 if (env->watchpoint_hit) {
1088 if (env->watchpoint_hit->flags & BP_CPU) {
1089 env->watchpoint_hit = NULL;
1090 if (check_hw_breakpoints(env, false)) {
1091 raise_exception(env, EXCP01_DB);
1092 } else {
1093 cpu_resume_from_signal(env, NULL);
1096 } else {
1097 QTAILQ_FOREACH(bp, &env->breakpoints, entry)
1098 if (bp->pc == env->eip) {
1099 if (bp->flags & BP_CPU) {
1100 check_hw_breakpoints(env, true);
1101 raise_exception(env, EXCP01_DB);
1103 break;
1108 typedef struct MCEInjectionParams {
1109 Monitor *mon;
1110 X86CPU *cpu;
1111 int bank;
1112 uint64_t status;
1113 uint64_t mcg_status;
1114 uint64_t addr;
1115 uint64_t misc;
1116 int flags;
1117 } MCEInjectionParams;
1119 static void do_inject_x86_mce(void *data)
1121 MCEInjectionParams *params = data;
1122 CPUX86State *cenv = &params->cpu->env;
1123 CPUState *cpu = CPU(params->cpu);
1124 uint64_t *banks = cenv->mce_banks + 4 * params->bank;
1126 cpu_synchronize_state(cpu);
1129 * If there is an MCE exception being processed, ignore this SRAO MCE
1130 * unless unconditional injection was requested.
1132 if (!(params->flags & MCE_INJECT_UNCOND_AO)
1133 && !(params->status & MCI_STATUS_AR)
1134 && (cenv->mcg_status & MCG_STATUS_MCIP)) {
1135 return;
1138 if (params->status & MCI_STATUS_UC) {
1140 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1141 * reporting is disabled
1143 if ((cenv->mcg_cap & MCG_CTL_P) && cenv->mcg_ctl != ~(uint64_t)0) {
1144 monitor_printf(params->mon,
1145 "CPU %d: Uncorrected error reporting disabled\n",
1146 cpu->cpu_index);
1147 return;
1151 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1152 * reporting is disabled for the bank
1154 if (banks[0] != ~(uint64_t)0) {
1155 monitor_printf(params->mon,
1156 "CPU %d: Uncorrected error reporting disabled for"
1157 " bank %d\n",
1158 cpu->cpu_index, params->bank);
1159 return;
1162 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1163 !(cenv->cr[4] & CR4_MCE_MASK)) {
1164 monitor_printf(params->mon,
1165 "CPU %d: Previous MCE still in progress, raising"
1166 " triple fault\n",
1167 cpu->cpu_index);
1168 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1169 qemu_system_reset_request();
1170 return;
1172 if (banks[1] & MCI_STATUS_VAL) {
1173 params->status |= MCI_STATUS_OVER;
1175 banks[2] = params->addr;
1176 banks[3] = params->misc;
1177 cenv->mcg_status = params->mcg_status;
1178 banks[1] = params->status;
1179 cpu_interrupt(cpu, CPU_INTERRUPT_MCE);
1180 } else if (!(banks[1] & MCI_STATUS_VAL)
1181 || !(banks[1] & MCI_STATUS_UC)) {
1182 if (banks[1] & MCI_STATUS_VAL) {
1183 params->status |= MCI_STATUS_OVER;
1185 banks[2] = params->addr;
1186 banks[3] = params->misc;
1187 banks[1] = params->status;
1188 } else {
1189 banks[1] |= MCI_STATUS_OVER;
1193 void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank,
1194 uint64_t status, uint64_t mcg_status, uint64_t addr,
1195 uint64_t misc, int flags)
1197 CPUState *cs = CPU(cpu);
1198 CPUX86State *cenv = &cpu->env;
1199 MCEInjectionParams params = {
1200 .mon = mon,
1201 .cpu = cpu,
1202 .bank = bank,
1203 .status = status,
1204 .mcg_status = mcg_status,
1205 .addr = addr,
1206 .misc = misc,
1207 .flags = flags,
1209 unsigned bank_num = cenv->mcg_cap & 0xff;
1211 if (!cenv->mcg_cap) {
1212 monitor_printf(mon, "MCE injection not supported\n");
1213 return;
1215 if (bank >= bank_num) {
1216 monitor_printf(mon, "Invalid MCE bank number\n");
1217 return;
1219 if (!(status & MCI_STATUS_VAL)) {
1220 monitor_printf(mon, "Invalid MCE status code\n");
1221 return;
1223 if ((flags & MCE_INJECT_BROADCAST)
1224 && !cpu_x86_support_mca_broadcast(cenv)) {
1225 monitor_printf(mon, "Guest CPU does not support MCA broadcast\n");
1226 return;
1229 run_on_cpu(cs, do_inject_x86_mce, &params);
1230 if (flags & MCE_INJECT_BROADCAST) {
1231 CPUState *other_cs;
1233 params.bank = 1;
1234 params.status = MCI_STATUS_VAL | MCI_STATUS_UC;
1235 params.mcg_status = MCG_STATUS_MCIP | MCG_STATUS_RIPV;
1236 params.addr = 0;
1237 params.misc = 0;
1238 CPU_FOREACH(other_cs) {
1239 if (other_cs == cs) {
1240 continue;
1242 params.cpu = X86_CPU(other_cs);
1243 run_on_cpu(other_cs, do_inject_x86_mce, &params);
1248 void cpu_report_tpr_access(CPUX86State *env, TPRAccess access)
1250 X86CPU *cpu = x86_env_get_cpu(env);
1252 if (kvm_enabled()) {
1253 env->tpr_access_type = access;
1255 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_TPR);
1256 } else {
1257 cpu_restore_state(env, env->mem_io_pc);
1259 apic_handle_tpr_access_report(cpu->apic_state, env->eip, access);
1262 #endif /* !CONFIG_USER_ONLY */
1264 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1265 target_ulong *base, unsigned int *limit,
1266 unsigned int *flags)
1268 X86CPU *cpu = x86_env_get_cpu(env);
1269 CPUState *cs = CPU(cpu);
1270 SegmentCache *dt;
1271 target_ulong ptr;
1272 uint32_t e1, e2;
1273 int index;
1275 if (selector & 0x4)
1276 dt = &env->ldt;
1277 else
1278 dt = &env->gdt;
1279 index = selector & ~7;
1280 ptr = dt->base + index;
1281 if ((index + 7) > dt->limit
1282 || cpu_memory_rw_debug(cs, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1283 || cpu_memory_rw_debug(cs, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1284 return 0;
1286 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1287 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1288 if (e2 & DESC_G_MASK)
1289 *limit = (*limit << 12) | 0xfff;
1290 *flags = e2;
1292 return 1;
1295 #if !defined(CONFIG_USER_ONLY)
1296 void do_cpu_init(X86CPU *cpu)
1298 CPUState *cs = CPU(cpu);
1299 CPUX86State *env = &cpu->env;
1300 int sipi = cs->interrupt_request & CPU_INTERRUPT_SIPI;
1301 uint64_t pat = env->pat;
1303 cpu_reset(cs);
1304 cs->interrupt_request = sipi;
1305 env->pat = pat;
1306 apic_init_reset(cpu->apic_state);
1309 void do_cpu_sipi(X86CPU *cpu)
1311 apic_sipi(cpu->apic_state);
1313 #else
1314 void do_cpu_init(X86CPU *cpu)
1317 void do_cpu_sipi(X86CPU *cpu)
1320 #endif