Regenerate bios for mtrr support
[qemu-kvm/markmc.git] / target-i386 / helper2.c
bloba058826b1c15a116a3a89c60211c216760bd6336
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
33 #include "qemu-kvm.h"
35 //#define DEBUG_MMU
37 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
39 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
40 uint32_t *ext_features,
41 uint32_t *ext2_features,
42 uint32_t *ext3_features)
44 int i;
45 /* feature flags taken from "Intel Processor Identification and the CPUID
46 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
47 * about feature names, the Linux name is used. */
48 const char *feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
52 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
54 const char *ext_feature_name[] = {
55 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
56 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
57 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60 const char *ext2_feature_name[] = {
61 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
62 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
63 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
64 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
66 const char *ext3_feature_name[] = {
67 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
68 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73 for ( i = 0 ; i < 32 ; i++ )
74 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
75 *features |= 1 << i;
76 return;
78 for ( i = 0 ; i < 32 ; i++ )
79 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
80 *ext_features |= 1 << i;
81 return;
83 for ( i = 0 ; i < 32 ; i++ )
84 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
85 *ext2_features |= 1 << i;
86 return;
88 for ( i = 0 ; i < 32 ; i++ )
89 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
90 *ext3_features |= 1 << i;
91 return;
93 fprintf(stderr, "CPU feature %s not found\n", flagname);
96 extern const char *cpu_vendor_string;
98 CPUX86State *cpu_x86_init(const char *cpu_model)
100 CPUX86State *env;
101 static int inited;
103 env = qemu_mallocz(sizeof(CPUX86State));
104 if (!env)
105 return NULL;
106 cpu_exec_init(env);
107 env->cpu_model_str = cpu_model;
109 /* init various static tables */
110 if (!inited) {
111 inited = 1;
112 optimize_flags_init();
114 if (cpu_x86_register(env, cpu_model) < 0) {
115 cpu_x86_close(env);
116 return NULL;
118 cpu_reset(env);
119 #ifdef USE_KQEMU
120 kqemu_init(env);
121 #endif
122 #ifdef USE_KVM
123 if (kvm_enabled())
124 kvm_init_new_ap(env->cpu_index, env);
125 #endif
126 return env;
129 typedef struct x86_def_t {
130 const char *name;
131 uint32_t level;
132 uint32_t vendor1, vendor2, vendor3;
133 int family;
134 int model;
135 int stepping;
136 uint32_t features, ext_features, ext2_features, ext3_features;
137 uint32_t xlevel;
138 } x86_def_t;
140 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
141 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
142 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
143 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
144 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
145 CPUID_PSE36 | CPUID_FXSR)
146 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
147 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
148 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
149 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
150 CPUID_PAE | CPUID_SEP | CPUID_APIC)
151 static x86_def_t x86_defs[] = {
152 #ifdef TARGET_X86_64
154 .name = "qemu64",
155 .level = 2,
156 .vendor1 = 0x68747541, /* "Auth" */
157 .vendor2 = 0x69746e65, /* "enti" */
158 .vendor3 = 0x444d4163, /* "cAMD" */
159 .family = 6,
160 .model = 2,
161 .stepping = 3,
162 .features = PPRO_FEATURES |
163 /* these features are needed for Win64 and aren't fully implemented */
164 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
165 /* this feature is needed for Solaris and isn't fully implemented */
166 CPUID_PSE36,
167 .ext_features = CPUID_EXT_SSE3,
168 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
169 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
170 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
171 .ext3_features = CPUID_EXT3_SVM,
172 .xlevel = 0x8000000A,
174 #endif
176 .name = "qemu32",
177 .level = 2,
178 .family = 6,
179 .model = 3,
180 .stepping = 3,
181 .features = PPRO_FEATURES,
182 .ext_features = CPUID_EXT_SSE3,
183 .xlevel = 0,
186 .name = "486",
187 .level = 0,
188 .family = 4,
189 .model = 0,
190 .stepping = 0,
191 .features = I486_FEATURES,
192 .xlevel = 0,
195 .name = "pentium",
196 .level = 1,
197 .family = 5,
198 .model = 4,
199 .stepping = 3,
200 .features = PENTIUM_FEATURES,
201 .xlevel = 0,
204 .name = "pentium2",
205 .level = 2,
206 .family = 6,
207 .model = 5,
208 .stepping = 2,
209 .features = PENTIUM2_FEATURES,
210 .xlevel = 0,
213 .name = "pentium3",
214 .level = 2,
215 .family = 6,
216 .model = 7,
217 .stepping = 3,
218 .features = PENTIUM3_FEATURES,
219 .xlevel = 0,
222 .name = "athlon",
223 .level = 2,
224 .vendor1 = 0x68747541, /* "Auth" */
225 .vendor2 = 0x69746e65, /* "enti" */
226 .vendor3 = 0x444d4163, /* "cAMD" */
227 .family = 6,
228 .model = 2,
229 .stepping = 3,
230 .features = PPRO_FEATURES | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
231 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
232 .xlevel = 0x80000008,
236 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
238 unsigned int i;
239 x86_def_t *def;
241 char *s = strdup(cpu_model);
242 char *featurestr, *name = strtok(s, ",");
243 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
244 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
245 int family = -1, model = -1, stepping = -1;
247 def = NULL;
248 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
249 if (strcmp(name, x86_defs[i].name) == 0) {
250 def = &x86_defs[i];
251 break;
254 if (!def)
255 goto error;
256 memcpy(x86_cpu_def, def, sizeof(*def));
258 featurestr = strtok(NULL, ",");
260 while (featurestr) {
261 char *val;
262 if (featurestr[0] == '+') {
263 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
264 } else if (featurestr[0] == '-') {
265 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
266 } else if ((val = strchr(featurestr, '='))) {
267 *val = 0; val++;
268 if (!strcmp(featurestr, "family")) {
269 char *err;
270 family = strtol(val, &err, 10);
271 if (!*val || *err || family < 0) {
272 fprintf(stderr, "bad numerical value %s\n", val);
273 x86_cpu_def = 0;
274 goto error;
276 x86_cpu_def->family = family;
277 } else if (!strcmp(featurestr, "model")) {
278 char *err;
279 model = strtol(val, &err, 10);
280 if (!*val || *err || model < 0 || model > 0xf) {
281 fprintf(stderr, "bad numerical value %s\n", val);
282 x86_cpu_def = 0;
283 goto error;
285 x86_cpu_def->model = model;
286 } else if (!strcmp(featurestr, "stepping")) {
287 char *err;
288 stepping = strtol(val, &err, 10);
289 if (!*val || *err || stepping < 0 || stepping > 0xf) {
290 fprintf(stderr, "bad numerical value %s\n", val);
291 x86_cpu_def = 0;
292 goto error;
294 x86_cpu_def->stepping = stepping;
295 } else {
296 fprintf(stderr, "unrecognized feature %s\n", featurestr);
297 x86_cpu_def = 0;
298 goto error;
300 } else {
301 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
302 x86_cpu_def = 0;
303 goto error;
305 featurestr = strtok(NULL, ",");
307 x86_cpu_def->features |= plus_features;
308 x86_cpu_def->ext_features |= plus_ext_features;
309 x86_cpu_def->ext2_features |= plus_ext2_features;
310 x86_cpu_def->ext3_features |= plus_ext3_features;
311 x86_cpu_def->features &= ~minus_features;
312 x86_cpu_def->ext_features &= ~minus_ext_features;
313 x86_cpu_def->ext2_features &= ~minus_ext2_features;
314 x86_cpu_def->ext3_features &= ~minus_ext3_features;
315 free(s);
316 return 0;
318 error:
319 free(s);
320 return -1;
323 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
325 unsigned int i;
327 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
328 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
331 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
333 x86_def_t def1, *def = &def1;
335 if (cpu_x86_find_by_name(def, cpu_model) < 0)
336 return -1;
337 if (def->vendor1) {
338 env->cpuid_vendor1 = def->vendor1;
339 env->cpuid_vendor2 = def->vendor2;
340 env->cpuid_vendor3 = def->vendor3;
341 } else {
342 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
343 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
344 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
346 env->cpuid_level = def->level;
347 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
348 env->cpuid_features = def->features;
349 env->pat = 0x0007040600070406ULL;
350 env->cpuid_ext_features = def->ext_features;
351 env->cpuid_ext2_features = def->ext2_features;
352 env->cpuid_xlevel = def->xlevel;
353 env->cpuid_ext3_features = def->ext3_features;
355 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
356 int c, len, i;
357 if (cpu_vendor_string != NULL)
358 model_id = cpu_vendor_string;
359 len = strlen(model_id);
360 for(i = 0; i < 48; i++) {
361 if (i >= len)
362 c = '\0';
363 else
364 c = model_id[i];
365 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
368 return 0;
371 /* NOTE: must be called outside the CPU execute loop */
372 void cpu_reset(CPUX86State *env)
374 int i;
376 memset(env, 0, offsetof(CPUX86State, breakpoints));
378 tlb_flush(env, 1);
380 env->old_exception = -1;
382 /* init to reset state */
384 #ifdef CONFIG_SOFTMMU
385 env->hflags |= HF_SOFTMMU_MASK;
386 #endif
387 env->hflags |= HF_GIF_MASK;
389 cpu_x86_update_cr0(env, 0x60000010);
390 env->a20_mask = ~0x0;
391 env->smbase = 0x30000;
393 env->idt.limit = 0xffff;
394 env->gdt.limit = 0xffff;
395 env->ldt.limit = 0xffff;
396 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
397 env->tr.limit = 0xffff;
398 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
400 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
401 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
402 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
403 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
404 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
405 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
406 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
407 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
408 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
409 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
410 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
411 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
413 env->eip = 0xfff0;
414 env->regs[R_EDX] = env->cpuid_version;
416 env->eflags = 0x2;
418 /* FPU init */
419 for(i = 0;i < 8; i++)
420 env->fptags[i] = 1;
421 env->fpuc = 0x37f;
423 env->mxcsr = 0x1f80;
426 void cpu_x86_close(CPUX86State *env)
428 free(env);
431 /***********************************************************/
432 /* x86 debug */
434 static const char *cc_op_str[] = {
435 "DYNAMIC",
436 "EFLAGS",
438 "MULB",
439 "MULW",
440 "MULL",
441 "MULQ",
443 "ADDB",
444 "ADDW",
445 "ADDL",
446 "ADDQ",
448 "ADCB",
449 "ADCW",
450 "ADCL",
451 "ADCQ",
453 "SUBB",
454 "SUBW",
455 "SUBL",
456 "SUBQ",
458 "SBBB",
459 "SBBW",
460 "SBBL",
461 "SBBQ",
463 "LOGICB",
464 "LOGICW",
465 "LOGICL",
466 "LOGICQ",
468 "INCB",
469 "INCW",
470 "INCL",
471 "INCQ",
473 "DECB",
474 "DECW",
475 "DECL",
476 "DECQ",
478 "SHLB",
479 "SHLW",
480 "SHLL",
481 "SHLQ",
483 "SARB",
484 "SARW",
485 "SARL",
486 "SARQ",
489 void cpu_dump_state(CPUState *env, FILE *f,
490 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
491 int flags)
493 int eflags, i, nb;
494 char cc_op_name[32];
495 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
497 eflags = env->eflags;
498 #ifdef TARGET_X86_64
499 if (env->hflags & HF_CS64_MASK) {
500 cpu_fprintf(f,
501 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
502 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
503 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
504 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
505 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
506 env->regs[R_EAX],
507 env->regs[R_EBX],
508 env->regs[R_ECX],
509 env->regs[R_EDX],
510 env->regs[R_ESI],
511 env->regs[R_EDI],
512 env->regs[R_EBP],
513 env->regs[R_ESP],
514 env->regs[8],
515 env->regs[9],
516 env->regs[10],
517 env->regs[11],
518 env->regs[12],
519 env->regs[13],
520 env->regs[14],
521 env->regs[15],
522 env->eip, eflags,
523 eflags & DF_MASK ? 'D' : '-',
524 eflags & CC_O ? 'O' : '-',
525 eflags & CC_S ? 'S' : '-',
526 eflags & CC_Z ? 'Z' : '-',
527 eflags & CC_A ? 'A' : '-',
528 eflags & CC_P ? 'P' : '-',
529 eflags & CC_C ? 'C' : '-',
530 env->hflags & HF_CPL_MASK,
531 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
532 (env->a20_mask >> 20) & 1,
533 (env->hflags >> HF_SMM_SHIFT) & 1,
534 (env->hflags >> HF_HALTED_SHIFT) & 1);
535 } else
536 #endif
538 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
539 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
540 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
541 (uint32_t)env->regs[R_EAX],
542 (uint32_t)env->regs[R_EBX],
543 (uint32_t)env->regs[R_ECX],
544 (uint32_t)env->regs[R_EDX],
545 (uint32_t)env->regs[R_ESI],
546 (uint32_t)env->regs[R_EDI],
547 (uint32_t)env->regs[R_EBP],
548 (uint32_t)env->regs[R_ESP],
549 (uint32_t)env->eip, eflags,
550 eflags & DF_MASK ? 'D' : '-',
551 eflags & CC_O ? 'O' : '-',
552 eflags & CC_S ? 'S' : '-',
553 eflags & CC_Z ? 'Z' : '-',
554 eflags & CC_A ? 'A' : '-',
555 eflags & CC_P ? 'P' : '-',
556 eflags & CC_C ? 'C' : '-',
557 env->hflags & HF_CPL_MASK,
558 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
559 (env->a20_mask >> 20) & 1,
560 (env->hflags >> HF_SMM_SHIFT) & 1,
561 (env->hflags >> HF_HALTED_SHIFT) & 1);
564 #ifdef TARGET_X86_64
565 if (env->hflags & HF_LMA_MASK) {
566 for(i = 0; i < 6; i++) {
567 SegmentCache *sc = &env->segs[i];
568 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
569 seg_name[i],
570 sc->selector,
571 sc->base,
572 sc->limit,
573 sc->flags);
575 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
576 env->ldt.selector,
577 env->ldt.base,
578 env->ldt.limit,
579 env->ldt.flags);
580 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
581 env->tr.selector,
582 env->tr.base,
583 env->tr.limit,
584 env->tr.flags);
585 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
586 env->gdt.base, env->gdt.limit);
587 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
588 env->idt.base, env->idt.limit);
589 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
590 (uint32_t)env->cr[0],
591 env->cr[2],
592 env->cr[3],
593 (uint32_t)env->cr[4]);
594 } else
595 #endif
597 for(i = 0; i < 6; i++) {
598 SegmentCache *sc = &env->segs[i];
599 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
600 seg_name[i],
601 sc->selector,
602 (uint32_t)sc->base,
603 sc->limit,
604 sc->flags);
606 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
607 env->ldt.selector,
608 (uint32_t)env->ldt.base,
609 env->ldt.limit,
610 env->ldt.flags);
611 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
612 env->tr.selector,
613 (uint32_t)env->tr.base,
614 env->tr.limit,
615 env->tr.flags);
616 cpu_fprintf(f, "GDT= %08x %08x\n",
617 (uint32_t)env->gdt.base, env->gdt.limit);
618 cpu_fprintf(f, "IDT= %08x %08x\n",
619 (uint32_t)env->idt.base, env->idt.limit);
620 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
621 (uint32_t)env->cr[0],
622 (uint32_t)env->cr[2],
623 (uint32_t)env->cr[3],
624 (uint32_t)env->cr[4]);
626 if (flags & X86_DUMP_CCOP) {
627 if ((unsigned)env->cc_op < CC_OP_NB)
628 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
629 else
630 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
631 #ifdef TARGET_X86_64
632 if (env->hflags & HF_CS64_MASK) {
633 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
634 env->cc_src, env->cc_dst,
635 cc_op_name);
636 } else
637 #endif
639 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
640 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
641 cc_op_name);
644 if (flags & X86_DUMP_FPU) {
645 int fptag;
646 fptag = 0;
647 for(i = 0; i < 8; i++) {
648 fptag |= ((!env->fptags[i]) << i);
650 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
651 env->fpuc,
652 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
653 env->fpstt,
654 fptag,
655 env->mxcsr);
656 for(i=0;i<8;i++) {
657 #if defined(USE_X86LDOUBLE)
658 union {
659 long double d;
660 struct {
661 uint64_t lower;
662 uint16_t upper;
663 } l;
664 } tmp;
665 tmp.d = env->fpregs[i].d;
666 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
667 i, tmp.l.lower, tmp.l.upper);
668 #else
669 cpu_fprintf(f, "FPR%d=%016" PRIx64,
670 i, env->fpregs[i].mmx.q);
671 #endif
672 if ((i & 1) == 1)
673 cpu_fprintf(f, "\n");
674 else
675 cpu_fprintf(f, " ");
677 if (env->hflags & HF_CS64_MASK)
678 nb = 16;
679 else
680 nb = 8;
681 for(i=0;i<nb;i++) {
682 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
684 env->xmm_regs[i].XMM_L(3),
685 env->xmm_regs[i].XMM_L(2),
686 env->xmm_regs[i].XMM_L(1),
687 env->xmm_regs[i].XMM_L(0));
688 if ((i & 1) == 1)
689 cpu_fprintf(f, "\n");
690 else
691 cpu_fprintf(f, " ");
696 /***********************************************************/
697 /* x86 mmu */
698 /* XXX: add PGE support */
700 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
702 a20_state = (a20_state != 0);
703 if (a20_state != ((env->a20_mask >> 20) & 1)) {
704 #if defined(DEBUG_MMU)
705 printf("A20 update: a20=%d\n", a20_state);
706 #endif
707 /* if the cpu is currently executing code, we must unlink it and
708 all the potentially executing TB */
709 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
711 /* when a20 is changed, all the MMU mappings are invalid, so
712 we must flush everything */
713 tlb_flush(env, 1);
714 env->a20_mask = (~0x100000) | (a20_state << 20);
718 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
720 int pe_state;
722 #if defined(DEBUG_MMU)
723 printf("CR0 update: CR0=0x%08x\n", new_cr0);
724 #endif
725 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
726 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
727 tlb_flush(env, 1);
730 #ifdef TARGET_X86_64
731 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
732 (env->efer & MSR_EFER_LME)) {
733 /* enter in long mode */
734 /* XXX: generate an exception */
735 if (!(env->cr[4] & CR4_PAE_MASK))
736 return;
737 env->efer |= MSR_EFER_LMA;
738 env->hflags |= HF_LMA_MASK;
739 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
740 (env->efer & MSR_EFER_LMA)) {
741 /* exit long mode */
742 env->efer &= ~MSR_EFER_LMA;
743 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
744 env->eip &= 0xffffffff;
746 #endif
747 env->cr[0] = new_cr0 | CR0_ET_MASK;
749 /* update PE flag in hidden flags */
750 pe_state = (env->cr[0] & CR0_PE_MASK);
751 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
752 /* ensure that ADDSEG is always set in real mode */
753 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
754 /* update FPU flags */
755 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
756 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
759 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
760 the PDPT */
761 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
763 env->cr[3] = new_cr3;
764 if (env->cr[0] & CR0_PG_MASK) {
765 #if defined(DEBUG_MMU)
766 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
767 #endif
768 tlb_flush(env, 0);
772 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
774 #if defined(DEBUG_MMU)
775 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
776 #endif
777 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
778 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
779 tlb_flush(env, 1);
781 /* SSE handling */
782 if (!(env->cpuid_features & CPUID_SSE))
783 new_cr4 &= ~CR4_OSFXSR_MASK;
784 if (new_cr4 & CR4_OSFXSR_MASK)
785 env->hflags |= HF_OSFXSR_MASK;
786 else
787 env->hflags &= ~HF_OSFXSR_MASK;
789 env->cr[4] = new_cr4;
792 /* XXX: also flush 4MB pages */
793 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
795 tlb_flush_page(env, addr);
798 #if defined(CONFIG_USER_ONLY)
800 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
801 int is_write, int mmu_idx, int is_softmmu)
803 /* user mode only emulation */
804 is_write &= 1;
805 env->cr[2] = addr;
806 env->error_code = (is_write << PG_ERROR_W_BIT);
807 env->error_code |= PG_ERROR_U_MASK;
808 env->exception_index = EXCP0E_PAGE;
809 return 1;
812 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
814 return addr;
817 #else
819 /* XXX: This value should match the one returned by CPUID
820 * and in exec.c */
821 #if defined(USE_KQEMU)
822 #define PHYS_ADDR_MASK 0xfffff000L
823 #else
824 # if defined(TARGET_X86_64)
825 # define PHYS_ADDR_MASK 0xfffffff000L
826 # else
827 # define PHYS_ADDR_MASK 0xffffff000L
828 # endif
829 #endif
831 /* return value:
832 -1 = cannot handle fault
833 0 = nothing more to do
834 1 = generate PF fault
835 2 = soft MMU activation required for this block
837 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
838 int is_write1, int mmu_idx, int is_softmmu)
840 uint64_t ptep, pte;
841 target_ulong pde_addr, pte_addr;
842 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
843 target_phys_addr_t paddr;
844 uint32_t page_offset;
845 target_ulong vaddr, virt_addr;
847 is_user = mmu_idx == MMU_USER_IDX;
848 #if defined(DEBUG_MMU)
849 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
850 addr, is_write1, is_user, env->eip);
851 #endif
852 is_write = is_write1 & 1;
854 if (!(env->cr[0] & CR0_PG_MASK)) {
855 pte = addr;
856 virt_addr = addr & TARGET_PAGE_MASK;
857 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
858 page_size = 4096;
859 goto do_mapping;
862 if (env->cr[4] & CR4_PAE_MASK) {
863 uint64_t pde, pdpe;
864 target_ulong pdpe_addr;
866 #ifdef TARGET_X86_64
867 if (env->hflags & HF_LMA_MASK) {
868 uint64_t pml4e_addr, pml4e;
869 int32_t sext;
871 /* test virtual address sign extension */
872 sext = (int64_t)addr >> 47;
873 if (sext != 0 && sext != -1) {
874 env->error_code = 0;
875 env->exception_index = EXCP0D_GPF;
876 return 1;
879 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
880 env->a20_mask;
881 pml4e = ldq_phys(pml4e_addr);
882 if (!(pml4e & PG_PRESENT_MASK)) {
883 error_code = 0;
884 goto do_fault;
886 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
887 error_code = PG_ERROR_RSVD_MASK;
888 goto do_fault;
890 if (!(pml4e & PG_ACCESSED_MASK)) {
891 pml4e |= PG_ACCESSED_MASK;
892 stl_phys_notdirty(pml4e_addr, pml4e);
894 ptep = pml4e ^ PG_NX_MASK;
895 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
896 env->a20_mask;
897 pdpe = ldq_phys(pdpe_addr);
898 if (!(pdpe & PG_PRESENT_MASK)) {
899 error_code = 0;
900 goto do_fault;
902 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
903 error_code = PG_ERROR_RSVD_MASK;
904 goto do_fault;
906 ptep &= pdpe ^ PG_NX_MASK;
907 if (!(pdpe & PG_ACCESSED_MASK)) {
908 pdpe |= PG_ACCESSED_MASK;
909 stl_phys_notdirty(pdpe_addr, pdpe);
911 } else
912 #endif
914 /* XXX: load them when cr3 is loaded ? */
915 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
916 env->a20_mask;
917 pdpe = ldq_phys(pdpe_addr);
918 if (!(pdpe & PG_PRESENT_MASK)) {
919 error_code = 0;
920 goto do_fault;
922 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
925 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
926 env->a20_mask;
927 pde = ldq_phys(pde_addr);
928 if (!(pde & PG_PRESENT_MASK)) {
929 error_code = 0;
930 goto do_fault;
932 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
933 error_code = PG_ERROR_RSVD_MASK;
934 goto do_fault;
936 ptep &= pde ^ PG_NX_MASK;
937 if (pde & PG_PSE_MASK) {
938 /* 2 MB page */
939 page_size = 2048 * 1024;
940 ptep ^= PG_NX_MASK;
941 if ((ptep & PG_NX_MASK) && is_write1 == 2)
942 goto do_fault_protect;
943 if (is_user) {
944 if (!(ptep & PG_USER_MASK))
945 goto do_fault_protect;
946 if (is_write && !(ptep & PG_RW_MASK))
947 goto do_fault_protect;
948 } else {
949 if ((env->cr[0] & CR0_WP_MASK) &&
950 is_write && !(ptep & PG_RW_MASK))
951 goto do_fault_protect;
953 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
954 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
955 pde |= PG_ACCESSED_MASK;
956 if (is_dirty)
957 pde |= PG_DIRTY_MASK;
958 stl_phys_notdirty(pde_addr, pde);
960 /* align to page_size */
961 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
962 virt_addr = addr & ~(page_size - 1);
963 } else {
964 /* 4 KB page */
965 if (!(pde & PG_ACCESSED_MASK)) {
966 pde |= PG_ACCESSED_MASK;
967 stl_phys_notdirty(pde_addr, pde);
969 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
970 env->a20_mask;
971 pte = ldq_phys(pte_addr);
972 if (!(pte & PG_PRESENT_MASK)) {
973 error_code = 0;
974 goto do_fault;
976 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
977 error_code = PG_ERROR_RSVD_MASK;
978 goto do_fault;
980 /* combine pde and pte nx, user and rw protections */
981 ptep &= pte ^ PG_NX_MASK;
982 ptep ^= PG_NX_MASK;
983 if ((ptep & PG_NX_MASK) && is_write1 == 2)
984 goto do_fault_protect;
985 if (is_user) {
986 if (!(ptep & PG_USER_MASK))
987 goto do_fault_protect;
988 if (is_write && !(ptep & PG_RW_MASK))
989 goto do_fault_protect;
990 } else {
991 if ((env->cr[0] & CR0_WP_MASK) &&
992 is_write && !(ptep & PG_RW_MASK))
993 goto do_fault_protect;
995 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
996 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
997 pte |= PG_ACCESSED_MASK;
998 if (is_dirty)
999 pte |= PG_DIRTY_MASK;
1000 stl_phys_notdirty(pte_addr, pte);
1002 page_size = 4096;
1003 virt_addr = addr & ~0xfff;
1004 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1006 } else {
1007 uint32_t pde;
1009 /* page directory entry */
1010 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1011 env->a20_mask;
1012 pde = ldl_phys(pde_addr);
1013 if (!(pde & PG_PRESENT_MASK)) {
1014 error_code = 0;
1015 goto do_fault;
1017 /* if PSE bit is set, then we use a 4MB page */
1018 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1019 page_size = 4096 * 1024;
1020 if (is_user) {
1021 if (!(pde & PG_USER_MASK))
1022 goto do_fault_protect;
1023 if (is_write && !(pde & PG_RW_MASK))
1024 goto do_fault_protect;
1025 } else {
1026 if ((env->cr[0] & CR0_WP_MASK) &&
1027 is_write && !(pde & PG_RW_MASK))
1028 goto do_fault_protect;
1030 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1031 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1032 pde |= PG_ACCESSED_MASK;
1033 if (is_dirty)
1034 pde |= PG_DIRTY_MASK;
1035 stl_phys_notdirty(pde_addr, pde);
1038 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1039 ptep = pte;
1040 virt_addr = addr & ~(page_size - 1);
1041 } else {
1042 if (!(pde & PG_ACCESSED_MASK)) {
1043 pde |= PG_ACCESSED_MASK;
1044 stl_phys_notdirty(pde_addr, pde);
1047 /* page directory entry */
1048 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1049 env->a20_mask;
1050 pte = ldl_phys(pte_addr);
1051 if (!(pte & PG_PRESENT_MASK)) {
1052 error_code = 0;
1053 goto do_fault;
1055 /* combine pde and pte user and rw protections */
1056 ptep = pte & pde;
1057 if (is_user) {
1058 if (!(ptep & PG_USER_MASK))
1059 goto do_fault_protect;
1060 if (is_write && !(ptep & PG_RW_MASK))
1061 goto do_fault_protect;
1062 } else {
1063 if ((env->cr[0] & CR0_WP_MASK) &&
1064 is_write && !(ptep & PG_RW_MASK))
1065 goto do_fault_protect;
1067 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1068 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1069 pte |= PG_ACCESSED_MASK;
1070 if (is_dirty)
1071 pte |= PG_DIRTY_MASK;
1072 stl_phys_notdirty(pte_addr, pte);
1074 page_size = 4096;
1075 virt_addr = addr & ~0xfff;
1078 /* the page can be put in the TLB */
1079 prot = PAGE_READ;
1080 if (!(ptep & PG_NX_MASK))
1081 prot |= PAGE_EXEC;
1082 if (pte & PG_DIRTY_MASK) {
1083 /* only set write access if already dirty... otherwise wait
1084 for dirty access */
1085 if (is_user) {
1086 if (ptep & PG_RW_MASK)
1087 prot |= PAGE_WRITE;
1088 } else {
1089 if (!(env->cr[0] & CR0_WP_MASK) ||
1090 (ptep & PG_RW_MASK))
1091 prot |= PAGE_WRITE;
1094 do_mapping:
1095 pte = pte & env->a20_mask;
1097 /* Even if 4MB pages, we map only one 4KB page in the cache to
1098 avoid filling it too fast */
1099 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1100 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1101 vaddr = virt_addr + page_offset;
1103 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1104 return ret;
1105 do_fault_protect:
1106 error_code = PG_ERROR_P_MASK;
1107 do_fault:
1108 error_code |= (is_write << PG_ERROR_W_BIT);
1109 if (is_user)
1110 error_code |= PG_ERROR_U_MASK;
1111 if (is_write1 == 2 &&
1112 (env->efer & MSR_EFER_NXE) &&
1113 (env->cr[4] & CR4_PAE_MASK))
1114 error_code |= PG_ERROR_I_D_MASK;
1115 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
1116 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
1117 } else {
1118 env->cr[2] = addr;
1120 env->error_code = error_code;
1121 env->exception_index = EXCP0E_PAGE;
1122 /* the VMM will handle this */
1123 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
1124 return 2;
1125 return 1;
1128 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1130 target_ulong pde_addr, pte_addr;
1131 uint64_t pte;
1132 target_phys_addr_t paddr;
1133 uint32_t page_offset;
1134 int page_size;
1136 if (env->cr[4] & CR4_PAE_MASK) {
1137 target_ulong pdpe_addr;
1138 uint64_t pde, pdpe;
1140 #ifdef TARGET_X86_64
1141 if (env->hflags & HF_LMA_MASK) {
1142 uint64_t pml4e_addr, pml4e;
1143 int32_t sext;
1145 /* test virtual address sign extension */
1146 sext = (int64_t)addr >> 47;
1147 if (sext != 0 && sext != -1)
1148 return -1;
1150 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1151 env->a20_mask;
1152 pml4e = ldq_phys(pml4e_addr);
1153 if (!(pml4e & PG_PRESENT_MASK))
1154 return -1;
1156 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1157 env->a20_mask;
1158 pdpe = ldq_phys(pdpe_addr);
1159 if (!(pdpe & PG_PRESENT_MASK))
1160 return -1;
1161 } else
1162 #endif
1164 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1165 env->a20_mask;
1166 pdpe = ldq_phys(pdpe_addr);
1167 if (!(pdpe & PG_PRESENT_MASK))
1168 return -1;
1171 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1172 env->a20_mask;
1173 pde = ldq_phys(pde_addr);
1174 if (!(pde & PG_PRESENT_MASK)) {
1175 return -1;
1177 if (pde & PG_PSE_MASK) {
1178 /* 2 MB page */
1179 page_size = 2048 * 1024;
1180 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1181 } else {
1182 /* 4 KB page */
1183 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1184 env->a20_mask;
1185 page_size = 4096;
1186 pte = ldq_phys(pte_addr);
1188 } else {
1189 uint32_t pde;
1191 if (!(env->cr[0] & CR0_PG_MASK)) {
1192 pte = addr;
1193 page_size = 4096;
1194 } else {
1195 /* page directory entry */
1196 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1197 pde = ldl_phys(pde_addr);
1198 if (!(pde & PG_PRESENT_MASK))
1199 return -1;
1200 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1201 pte = pde & ~0x003ff000; /* align to 4MB */
1202 page_size = 4096 * 1024;
1203 } else {
1204 /* page directory entry */
1205 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1206 pte = ldl_phys(pte_addr);
1207 if (!(pte & PG_PRESENT_MASK))
1208 return -1;
1209 page_size = 4096;
1212 pte = pte & env->a20_mask;
1215 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1216 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1217 return paddr;
1219 #endif /* !CONFIG_USER_ONLY */