Update
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob856d8965d9cf281ade49f39c81b36e0f4dd9868c
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
33 //#define DEBUG_MMU
35 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
37 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
38 uint32_t *ext_features,
39 uint32_t *ext2_features,
40 uint32_t *ext3_features)
42 int i;
43 /* feature flags taken from "Intel Processor Identification and the CPUID
44 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
45 * about feature names, the Linux name is used. */
46 const char *feature_name[] = {
47 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
52 const char *ext_feature_name[] = {
53 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 const char *ext2_feature_name[] = {
59 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
61 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
64 const char *ext3_feature_name[] = {
65 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 return;
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 return;
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 return;
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 return;
91 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 CPUX86State *cpu_x86_init(const char *cpu_model)
96 CPUX86State *env;
97 static int inited;
99 env = qemu_mallocz(sizeof(CPUX86State));
100 if (!env)
101 return NULL;
102 cpu_exec_init(env);
103 env->cpu_model_str = cpu_model;
105 /* init various static tables */
106 if (!inited) {
107 inited = 1;
108 optimize_flags_init();
110 if (cpu_x86_register(env, cpu_model) < 0) {
111 cpu_x86_close(env);
112 return NULL;
114 cpu_reset(env);
115 #ifdef USE_KQEMU
116 kqemu_init(env);
117 #endif
118 return env;
121 typedef struct x86_def_t {
122 const char *name;
123 uint32_t level;
124 uint32_t vendor1, vendor2, vendor3;
125 int family;
126 int model;
127 int stepping;
128 uint32_t features, ext_features, ext2_features, ext3_features;
129 uint32_t xlevel;
130 } x86_def_t;
132 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
133 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
134 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
135 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
136 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
137 CPUID_PSE36 | CPUID_FXSR)
138 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
139 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
140 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
141 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
142 CPUID_PAE | CPUID_SEP | CPUID_APIC)
143 static x86_def_t x86_defs[] = {
144 #ifdef TARGET_X86_64
146 .name = "qemu64",
147 .level = 2,
148 .vendor1 = 0x68747541, /* "Auth" */
149 .vendor2 = 0x69746e65, /* "enti" */
150 .vendor3 = 0x444d4163, /* "cAMD" */
151 .family = 6,
152 .model = 2,
153 .stepping = 3,
154 .features = PPRO_FEATURES |
155 /* these features are needed for Win64 and aren't fully implemented */
156 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
157 /* this feature is needed for Solaris and isn't fully implemented */
158 CPUID_PSE36,
159 .ext_features = CPUID_EXT_SSE3,
160 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
161 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
162 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
163 .ext3_features = CPUID_EXT3_SVM,
164 .xlevel = 0x8000000A,
166 #endif
168 .name = "qemu32",
169 .level = 2,
170 .family = 6,
171 .model = 3,
172 .stepping = 3,
173 .features = PPRO_FEATURES,
174 .ext_features = CPUID_EXT_SSE3,
175 .xlevel = 0,
178 .name = "486",
179 .level = 0,
180 .family = 4,
181 .model = 0,
182 .stepping = 0,
183 .features = I486_FEATURES,
184 .xlevel = 0,
187 .name = "pentium",
188 .level = 1,
189 .family = 5,
190 .model = 4,
191 .stepping = 3,
192 .features = PENTIUM_FEATURES,
193 .xlevel = 0,
196 .name = "pentium2",
197 .level = 2,
198 .family = 6,
199 .model = 5,
200 .stepping = 2,
201 .features = PENTIUM2_FEATURES,
202 .xlevel = 0,
205 .name = "pentium3",
206 .level = 2,
207 .family = 6,
208 .model = 7,
209 .stepping = 3,
210 .features = PENTIUM3_FEATURES,
211 .xlevel = 0,
214 .name = "athlon",
215 .level = 2,
216 .vendor1 = 0x68747541, /* "Auth" */
217 .vendor2 = 0x69746e65, /* "enti" */
218 .vendor3 = 0x444d4163, /* "cAMD" */
219 .family = 6,
220 .model = 2,
221 .stepping = 3,
222 .features = PPRO_FEATURES | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
223 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
224 .xlevel = 0x80000008,
228 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
230 unsigned int i;
231 x86_def_t *def;
233 char *s = strdup(cpu_model);
234 char *featurestr, *name = strtok(s, ",");
235 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
236 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
237 int family = -1, model = -1, stepping = -1;
239 def = NULL;
240 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
241 if (strcmp(name, x86_defs[i].name) == 0) {
242 def = &x86_defs[i];
243 break;
246 if (!def)
247 goto error;
248 memcpy(x86_cpu_def, def, sizeof(*def));
250 featurestr = strtok(NULL, ",");
252 while (featurestr) {
253 char *val;
254 if (featurestr[0] == '+') {
255 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
256 } else if (featurestr[0] == '-') {
257 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
258 } else if ((val = strchr(featurestr, '='))) {
259 *val = 0; val++;
260 if (!strcmp(featurestr, "family")) {
261 char *err;
262 family = strtol(val, &err, 10);
263 if (!*val || *err || family < 0) {
264 fprintf(stderr, "bad numerical value %s\n", val);
265 x86_cpu_def = 0;
266 goto error;
268 x86_cpu_def->family = family;
269 } else if (!strcmp(featurestr, "model")) {
270 char *err;
271 model = strtol(val, &err, 10);
272 if (!*val || *err || model < 0 || model > 0xf) {
273 fprintf(stderr, "bad numerical value %s\n", val);
274 x86_cpu_def = 0;
275 goto error;
277 x86_cpu_def->model = model;
278 } else if (!strcmp(featurestr, "stepping")) {
279 char *err;
280 stepping = strtol(val, &err, 10);
281 if (!*val || *err || stepping < 0 || stepping > 0xf) {
282 fprintf(stderr, "bad numerical value %s\n", val);
283 x86_cpu_def = 0;
284 goto error;
286 x86_cpu_def->stepping = stepping;
287 } else {
288 fprintf(stderr, "unrecognized feature %s\n", featurestr);
289 x86_cpu_def = 0;
290 goto error;
292 } else {
293 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
294 x86_cpu_def = 0;
295 goto error;
297 featurestr = strtok(NULL, ",");
299 x86_cpu_def->features |= plus_features;
300 x86_cpu_def->ext_features |= plus_ext_features;
301 x86_cpu_def->ext2_features |= plus_ext2_features;
302 x86_cpu_def->ext3_features |= plus_ext3_features;
303 x86_cpu_def->features &= ~minus_features;
304 x86_cpu_def->ext_features &= ~minus_ext_features;
305 x86_cpu_def->ext2_features &= ~minus_ext2_features;
306 x86_cpu_def->ext3_features &= ~minus_ext3_features;
307 free(s);
308 return 0;
310 error:
311 free(s);
312 return -1;
315 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
317 unsigned int i;
319 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
320 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
323 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
325 x86_def_t def1, *def = &def1;
327 if (cpu_x86_find_by_name(def, cpu_model) < 0)
328 return -1;
329 if (def->vendor1) {
330 env->cpuid_vendor1 = def->vendor1;
331 env->cpuid_vendor2 = def->vendor2;
332 env->cpuid_vendor3 = def->vendor3;
333 } else {
334 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
335 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
336 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
338 env->cpuid_level = def->level;
339 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
340 env->cpuid_features = def->features;
341 env->pat = 0x0007040600070406ULL;
342 env->cpuid_ext_features = def->ext_features;
343 env->cpuid_ext2_features = def->ext2_features;
344 env->cpuid_xlevel = def->xlevel;
345 env->cpuid_ext3_features = def->ext3_features;
347 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
348 int c, len, i;
349 len = strlen(model_id);
350 for(i = 0; i < 48; i++) {
351 if (i >= len)
352 c = '\0';
353 else
354 c = model_id[i];
355 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
358 return 0;
361 /* NOTE: must be called outside the CPU execute loop */
362 void cpu_reset(CPUX86State *env)
364 int i;
366 memset(env, 0, offsetof(CPUX86State, breakpoints));
368 tlb_flush(env, 1);
370 env->old_exception = -1;
372 /* init to reset state */
374 #ifdef CONFIG_SOFTMMU
375 env->hflags |= HF_SOFTMMU_MASK;
376 #endif
377 env->hflags2 |= HF2_GIF_MASK;
379 cpu_x86_update_cr0(env, 0x60000010);
380 env->a20_mask = ~0x0;
381 env->smbase = 0x30000;
383 env->idt.limit = 0xffff;
384 env->gdt.limit = 0xffff;
385 env->ldt.limit = 0xffff;
386 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
387 env->tr.limit = 0xffff;
388 env->tr.flags = DESC_P_MASK | (11 < DESC_TYPE_SHIFT);
390 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
391 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
392 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
393 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
394 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
395 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
396 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
397 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
398 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
399 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
400 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
401 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
403 env->eip = 0xfff0;
404 env->regs[R_EDX] = env->cpuid_version;
406 env->eflags = 0x2;
408 /* FPU init */
409 for(i = 0;i < 8; i++)
410 env->fptags[i] = 1;
411 env->fpuc = 0x37f;
413 env->mxcsr = 0x1f80;
416 void cpu_x86_close(CPUX86State *env)
418 free(env);
421 /***********************************************************/
422 /* x86 debug */
424 static const char *cc_op_str[] = {
425 "DYNAMIC",
426 "EFLAGS",
428 "MULB",
429 "MULW",
430 "MULL",
431 "MULQ",
433 "ADDB",
434 "ADDW",
435 "ADDL",
436 "ADDQ",
438 "ADCB",
439 "ADCW",
440 "ADCL",
441 "ADCQ",
443 "SUBB",
444 "SUBW",
445 "SUBL",
446 "SUBQ",
448 "SBBB",
449 "SBBW",
450 "SBBL",
451 "SBBQ",
453 "LOGICB",
454 "LOGICW",
455 "LOGICL",
456 "LOGICQ",
458 "INCB",
459 "INCW",
460 "INCL",
461 "INCQ",
463 "DECB",
464 "DECW",
465 "DECL",
466 "DECQ",
468 "SHLB",
469 "SHLW",
470 "SHLL",
471 "SHLQ",
473 "SARB",
474 "SARW",
475 "SARL",
476 "SARQ",
479 void cpu_dump_state(CPUState *env, FILE *f,
480 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
481 int flags)
483 int eflags, i, nb;
484 char cc_op_name[32];
485 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
487 eflags = env->eflags;
488 #ifdef TARGET_X86_64
489 if (env->hflags & HF_CS64_MASK) {
490 cpu_fprintf(f,
491 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
492 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
493 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
494 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
495 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
496 env->regs[R_EAX],
497 env->regs[R_EBX],
498 env->regs[R_ECX],
499 env->regs[R_EDX],
500 env->regs[R_ESI],
501 env->regs[R_EDI],
502 env->regs[R_EBP],
503 env->regs[R_ESP],
504 env->regs[8],
505 env->regs[9],
506 env->regs[10],
507 env->regs[11],
508 env->regs[12],
509 env->regs[13],
510 env->regs[14],
511 env->regs[15],
512 env->eip, eflags,
513 eflags & DF_MASK ? 'D' : '-',
514 eflags & CC_O ? 'O' : '-',
515 eflags & CC_S ? 'S' : '-',
516 eflags & CC_Z ? 'Z' : '-',
517 eflags & CC_A ? 'A' : '-',
518 eflags & CC_P ? 'P' : '-',
519 eflags & CC_C ? 'C' : '-',
520 env->hflags & HF_CPL_MASK,
521 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
522 (int)(env->a20_mask >> 20) & 1,
523 (env->hflags >> HF_SMM_SHIFT) & 1,
524 env->halted);
525 } else
526 #endif
528 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
529 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
530 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
531 (uint32_t)env->regs[R_EAX],
532 (uint32_t)env->regs[R_EBX],
533 (uint32_t)env->regs[R_ECX],
534 (uint32_t)env->regs[R_EDX],
535 (uint32_t)env->regs[R_ESI],
536 (uint32_t)env->regs[R_EDI],
537 (uint32_t)env->regs[R_EBP],
538 (uint32_t)env->regs[R_ESP],
539 (uint32_t)env->eip, eflags,
540 eflags & DF_MASK ? 'D' : '-',
541 eflags & CC_O ? 'O' : '-',
542 eflags & CC_S ? 'S' : '-',
543 eflags & CC_Z ? 'Z' : '-',
544 eflags & CC_A ? 'A' : '-',
545 eflags & CC_P ? 'P' : '-',
546 eflags & CC_C ? 'C' : '-',
547 env->hflags & HF_CPL_MASK,
548 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
549 (int)(env->a20_mask >> 20) & 1,
550 (env->hflags >> HF_SMM_SHIFT) & 1,
551 env->halted);
554 #ifdef TARGET_X86_64
555 if (env->hflags & HF_LMA_MASK) {
556 for(i = 0; i < 6; i++) {
557 SegmentCache *sc = &env->segs[i];
558 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
559 seg_name[i],
560 sc->selector,
561 sc->base,
562 sc->limit,
563 sc->flags);
565 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
566 env->ldt.selector,
567 env->ldt.base,
568 env->ldt.limit,
569 env->ldt.flags);
570 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
571 env->tr.selector,
572 env->tr.base,
573 env->tr.limit,
574 env->tr.flags);
575 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
576 env->gdt.base, env->gdt.limit);
577 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
578 env->idt.base, env->idt.limit);
579 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
580 (uint32_t)env->cr[0],
581 env->cr[2],
582 env->cr[3],
583 (uint32_t)env->cr[4]);
584 } else
585 #endif
587 for(i = 0; i < 6; i++) {
588 SegmentCache *sc = &env->segs[i];
589 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
590 seg_name[i],
591 sc->selector,
592 (uint32_t)sc->base,
593 sc->limit,
594 sc->flags);
596 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
597 env->ldt.selector,
598 (uint32_t)env->ldt.base,
599 env->ldt.limit,
600 env->ldt.flags);
601 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
602 env->tr.selector,
603 (uint32_t)env->tr.base,
604 env->tr.limit,
605 env->tr.flags);
606 cpu_fprintf(f, "GDT= %08x %08x\n",
607 (uint32_t)env->gdt.base, env->gdt.limit);
608 cpu_fprintf(f, "IDT= %08x %08x\n",
609 (uint32_t)env->idt.base, env->idt.limit);
610 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
611 (uint32_t)env->cr[0],
612 (uint32_t)env->cr[2],
613 (uint32_t)env->cr[3],
614 (uint32_t)env->cr[4]);
616 if (flags & X86_DUMP_CCOP) {
617 if ((unsigned)env->cc_op < CC_OP_NB)
618 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
619 else
620 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
621 #ifdef TARGET_X86_64
622 if (env->hflags & HF_CS64_MASK) {
623 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
624 env->cc_src, env->cc_dst,
625 cc_op_name);
626 } else
627 #endif
629 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
630 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
631 cc_op_name);
634 if (flags & X86_DUMP_FPU) {
635 int fptag;
636 fptag = 0;
637 for(i = 0; i < 8; i++) {
638 fptag |= ((!env->fptags[i]) << i);
640 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
641 env->fpuc,
642 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
643 env->fpstt,
644 fptag,
645 env->mxcsr);
646 for(i=0;i<8;i++) {
647 #if defined(USE_X86LDOUBLE)
648 union {
649 long double d;
650 struct {
651 uint64_t lower;
652 uint16_t upper;
653 } l;
654 } tmp;
655 tmp.d = env->fpregs[i].d;
656 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
657 i, tmp.l.lower, tmp.l.upper);
658 #else
659 cpu_fprintf(f, "FPR%d=%016" PRIx64,
660 i, env->fpregs[i].mmx.q);
661 #endif
662 if ((i & 1) == 1)
663 cpu_fprintf(f, "\n");
664 else
665 cpu_fprintf(f, " ");
667 if (env->hflags & HF_CS64_MASK)
668 nb = 16;
669 else
670 nb = 8;
671 for(i=0;i<nb;i++) {
672 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
674 env->xmm_regs[i].XMM_L(3),
675 env->xmm_regs[i].XMM_L(2),
676 env->xmm_regs[i].XMM_L(1),
677 env->xmm_regs[i].XMM_L(0));
678 if ((i & 1) == 1)
679 cpu_fprintf(f, "\n");
680 else
681 cpu_fprintf(f, " ");
686 /***********************************************************/
687 /* x86 mmu */
688 /* XXX: add PGE support */
690 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
692 a20_state = (a20_state != 0);
693 if (a20_state != ((env->a20_mask >> 20) & 1)) {
694 #if defined(DEBUG_MMU)
695 printf("A20 update: a20=%d\n", a20_state);
696 #endif
697 /* if the cpu is currently executing code, we must unlink it and
698 all the potentially executing TB */
699 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
701 /* when a20 is changed, all the MMU mappings are invalid, so
702 we must flush everything */
703 tlb_flush(env, 1);
704 env->a20_mask = (~0x100000) | (a20_state << 20);
708 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
710 int pe_state;
712 #if defined(DEBUG_MMU)
713 printf("CR0 update: CR0=0x%08x\n", new_cr0);
714 #endif
715 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
716 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
717 tlb_flush(env, 1);
720 #ifdef TARGET_X86_64
721 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
722 (env->efer & MSR_EFER_LME)) {
723 /* enter in long mode */
724 /* XXX: generate an exception */
725 if (!(env->cr[4] & CR4_PAE_MASK))
726 return;
727 env->efer |= MSR_EFER_LMA;
728 env->hflags |= HF_LMA_MASK;
729 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
730 (env->efer & MSR_EFER_LMA)) {
731 /* exit long mode */
732 env->efer &= ~MSR_EFER_LMA;
733 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
734 env->eip &= 0xffffffff;
736 #endif
737 env->cr[0] = new_cr0 | CR0_ET_MASK;
739 /* update PE flag in hidden flags */
740 pe_state = (env->cr[0] & CR0_PE_MASK);
741 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
742 /* ensure that ADDSEG is always set in real mode */
743 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
744 /* update FPU flags */
745 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
746 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
749 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
750 the PDPT */
751 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
753 env->cr[3] = new_cr3;
754 if (env->cr[0] & CR0_PG_MASK) {
755 #if defined(DEBUG_MMU)
756 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
757 #endif
758 tlb_flush(env, 0);
762 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
764 #if defined(DEBUG_MMU)
765 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
766 #endif
767 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
768 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
769 tlb_flush(env, 1);
771 /* SSE handling */
772 if (!(env->cpuid_features & CPUID_SSE))
773 new_cr4 &= ~CR4_OSFXSR_MASK;
774 if (new_cr4 & CR4_OSFXSR_MASK)
775 env->hflags |= HF_OSFXSR_MASK;
776 else
777 env->hflags &= ~HF_OSFXSR_MASK;
779 env->cr[4] = new_cr4;
782 /* XXX: also flush 4MB pages */
783 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
785 tlb_flush_page(env, addr);
788 #if defined(CONFIG_USER_ONLY)
790 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
791 int is_write, int mmu_idx, int is_softmmu)
793 /* user mode only emulation */
794 is_write &= 1;
795 env->cr[2] = addr;
796 env->error_code = (is_write << PG_ERROR_W_BIT);
797 env->error_code |= PG_ERROR_U_MASK;
798 env->exception_index = EXCP0E_PAGE;
799 return 1;
802 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
804 return addr;
807 #else
809 /* XXX: This value should match the one returned by CPUID
810 * and in exec.c */
811 #if defined(USE_KQEMU)
812 #define PHYS_ADDR_MASK 0xfffff000L
813 #else
814 # if defined(TARGET_X86_64)
815 # define PHYS_ADDR_MASK 0xfffffff000L
816 # else
817 # define PHYS_ADDR_MASK 0xffffff000L
818 # endif
819 #endif
821 /* return value:
822 -1 = cannot handle fault
823 0 = nothing more to do
824 1 = generate PF fault
825 2 = soft MMU activation required for this block
827 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
828 int is_write1, int mmu_idx, int is_softmmu)
830 uint64_t ptep, pte;
831 target_ulong pde_addr, pte_addr;
832 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
833 target_phys_addr_t paddr;
834 uint32_t page_offset;
835 target_ulong vaddr, virt_addr;
837 is_user = mmu_idx == MMU_USER_IDX;
838 #if defined(DEBUG_MMU)
839 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
840 addr, is_write1, is_user, env->eip);
841 #endif
842 is_write = is_write1 & 1;
844 if (!(env->cr[0] & CR0_PG_MASK)) {
845 pte = addr;
846 virt_addr = addr & TARGET_PAGE_MASK;
847 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
848 page_size = 4096;
849 goto do_mapping;
852 if (env->cr[4] & CR4_PAE_MASK) {
853 uint64_t pde, pdpe;
854 target_ulong pdpe_addr;
856 #ifdef TARGET_X86_64
857 if (env->hflags & HF_LMA_MASK) {
858 uint64_t pml4e_addr, pml4e;
859 int32_t sext;
861 /* test virtual address sign extension */
862 sext = (int64_t)addr >> 47;
863 if (sext != 0 && sext != -1) {
864 env->error_code = 0;
865 env->exception_index = EXCP0D_GPF;
866 return 1;
869 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
870 env->a20_mask;
871 pml4e = ldq_phys(pml4e_addr);
872 if (!(pml4e & PG_PRESENT_MASK)) {
873 error_code = 0;
874 goto do_fault;
876 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
877 error_code = PG_ERROR_RSVD_MASK;
878 goto do_fault;
880 if (!(pml4e & PG_ACCESSED_MASK)) {
881 pml4e |= PG_ACCESSED_MASK;
882 stl_phys_notdirty(pml4e_addr, pml4e);
884 ptep = pml4e ^ PG_NX_MASK;
885 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
886 env->a20_mask;
887 pdpe = ldq_phys(pdpe_addr);
888 if (!(pdpe & PG_PRESENT_MASK)) {
889 error_code = 0;
890 goto do_fault;
892 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
893 error_code = PG_ERROR_RSVD_MASK;
894 goto do_fault;
896 ptep &= pdpe ^ PG_NX_MASK;
897 if (!(pdpe & PG_ACCESSED_MASK)) {
898 pdpe |= PG_ACCESSED_MASK;
899 stl_phys_notdirty(pdpe_addr, pdpe);
901 } else
902 #endif
904 /* XXX: load them when cr3 is loaded ? */
905 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
906 env->a20_mask;
907 pdpe = ldq_phys(pdpe_addr);
908 if (!(pdpe & PG_PRESENT_MASK)) {
909 error_code = 0;
910 goto do_fault;
912 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
915 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
916 env->a20_mask;
917 pde = ldq_phys(pde_addr);
918 if (!(pde & PG_PRESENT_MASK)) {
919 error_code = 0;
920 goto do_fault;
922 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
923 error_code = PG_ERROR_RSVD_MASK;
924 goto do_fault;
926 ptep &= pde ^ PG_NX_MASK;
927 if (pde & PG_PSE_MASK) {
928 /* 2 MB page */
929 page_size = 2048 * 1024;
930 ptep ^= PG_NX_MASK;
931 if ((ptep & PG_NX_MASK) && is_write1 == 2)
932 goto do_fault_protect;
933 if (is_user) {
934 if (!(ptep & PG_USER_MASK))
935 goto do_fault_protect;
936 if (is_write && !(ptep & PG_RW_MASK))
937 goto do_fault_protect;
938 } else {
939 if ((env->cr[0] & CR0_WP_MASK) &&
940 is_write && !(ptep & PG_RW_MASK))
941 goto do_fault_protect;
943 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
944 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
945 pde |= PG_ACCESSED_MASK;
946 if (is_dirty)
947 pde |= PG_DIRTY_MASK;
948 stl_phys_notdirty(pde_addr, pde);
950 /* align to page_size */
951 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
952 virt_addr = addr & ~(page_size - 1);
953 } else {
954 /* 4 KB page */
955 if (!(pde & PG_ACCESSED_MASK)) {
956 pde |= PG_ACCESSED_MASK;
957 stl_phys_notdirty(pde_addr, pde);
959 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
960 env->a20_mask;
961 pte = ldq_phys(pte_addr);
962 if (!(pte & PG_PRESENT_MASK)) {
963 error_code = 0;
964 goto do_fault;
966 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
967 error_code = PG_ERROR_RSVD_MASK;
968 goto do_fault;
970 /* combine pde and pte nx, user and rw protections */
971 ptep &= pte ^ PG_NX_MASK;
972 ptep ^= PG_NX_MASK;
973 if ((ptep & PG_NX_MASK) && is_write1 == 2)
974 goto do_fault_protect;
975 if (is_user) {
976 if (!(ptep & PG_USER_MASK))
977 goto do_fault_protect;
978 if (is_write && !(ptep & PG_RW_MASK))
979 goto do_fault_protect;
980 } else {
981 if ((env->cr[0] & CR0_WP_MASK) &&
982 is_write && !(ptep & PG_RW_MASK))
983 goto do_fault_protect;
985 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
986 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
987 pte |= PG_ACCESSED_MASK;
988 if (is_dirty)
989 pte |= PG_DIRTY_MASK;
990 stl_phys_notdirty(pte_addr, pte);
992 page_size = 4096;
993 virt_addr = addr & ~0xfff;
994 pte = pte & (PHYS_ADDR_MASK | 0xfff);
996 } else {
997 uint32_t pde;
999 /* page directory entry */
1000 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1001 env->a20_mask;
1002 pde = ldl_phys(pde_addr);
1003 if (!(pde & PG_PRESENT_MASK)) {
1004 error_code = 0;
1005 goto do_fault;
1007 /* if PSE bit is set, then we use a 4MB page */
1008 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1009 page_size = 4096 * 1024;
1010 if (is_user) {
1011 if (!(pde & PG_USER_MASK))
1012 goto do_fault_protect;
1013 if (is_write && !(pde & PG_RW_MASK))
1014 goto do_fault_protect;
1015 } else {
1016 if ((env->cr[0] & CR0_WP_MASK) &&
1017 is_write && !(pde & PG_RW_MASK))
1018 goto do_fault_protect;
1020 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1021 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1022 pde |= PG_ACCESSED_MASK;
1023 if (is_dirty)
1024 pde |= PG_DIRTY_MASK;
1025 stl_phys_notdirty(pde_addr, pde);
1028 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1029 ptep = pte;
1030 virt_addr = addr & ~(page_size - 1);
1031 } else {
1032 if (!(pde & PG_ACCESSED_MASK)) {
1033 pde |= PG_ACCESSED_MASK;
1034 stl_phys_notdirty(pde_addr, pde);
1037 /* page directory entry */
1038 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1039 env->a20_mask;
1040 pte = ldl_phys(pte_addr);
1041 if (!(pte & PG_PRESENT_MASK)) {
1042 error_code = 0;
1043 goto do_fault;
1045 /* combine pde and pte user and rw protections */
1046 ptep = pte & pde;
1047 if (is_user) {
1048 if (!(ptep & PG_USER_MASK))
1049 goto do_fault_protect;
1050 if (is_write && !(ptep & PG_RW_MASK))
1051 goto do_fault_protect;
1052 } else {
1053 if ((env->cr[0] & CR0_WP_MASK) &&
1054 is_write && !(ptep & PG_RW_MASK))
1055 goto do_fault_protect;
1057 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1058 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1059 pte |= PG_ACCESSED_MASK;
1060 if (is_dirty)
1061 pte |= PG_DIRTY_MASK;
1062 stl_phys_notdirty(pte_addr, pte);
1064 page_size = 4096;
1065 virt_addr = addr & ~0xfff;
1068 /* the page can be put in the TLB */
1069 prot = PAGE_READ;
1070 if (!(ptep & PG_NX_MASK))
1071 prot |= PAGE_EXEC;
1072 if (pte & PG_DIRTY_MASK) {
1073 /* only set write access if already dirty... otherwise wait
1074 for dirty access */
1075 if (is_user) {
1076 if (ptep & PG_RW_MASK)
1077 prot |= PAGE_WRITE;
1078 } else {
1079 if (!(env->cr[0] & CR0_WP_MASK) ||
1080 (ptep & PG_RW_MASK))
1081 prot |= PAGE_WRITE;
1084 do_mapping:
1085 pte = pte & env->a20_mask;
1087 /* Even if 4MB pages, we map only one 4KB page in the cache to
1088 avoid filling it too fast */
1089 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1090 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1091 vaddr = virt_addr + page_offset;
1093 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1094 return ret;
1095 do_fault_protect:
1096 error_code = PG_ERROR_P_MASK;
1097 do_fault:
1098 error_code |= (is_write << PG_ERROR_W_BIT);
1099 if (is_user)
1100 error_code |= PG_ERROR_U_MASK;
1101 if (is_write1 == 2 &&
1102 (env->efer & MSR_EFER_NXE) &&
1103 (env->cr[4] & CR4_PAE_MASK))
1104 error_code |= PG_ERROR_I_D_MASK;
1105 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1106 /* cr2 is not modified in case of exceptions */
1107 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1108 addr);
1109 } else {
1110 env->cr[2] = addr;
1112 env->error_code = error_code;
1113 env->exception_index = EXCP0E_PAGE;
1114 return 1;
1117 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1119 target_ulong pde_addr, pte_addr;
1120 uint64_t pte;
1121 target_phys_addr_t paddr;
1122 uint32_t page_offset;
1123 int page_size;
1125 if (env->cr[4] & CR4_PAE_MASK) {
1126 target_ulong pdpe_addr;
1127 uint64_t pde, pdpe;
1129 #ifdef TARGET_X86_64
1130 if (env->hflags & HF_LMA_MASK) {
1131 uint64_t pml4e_addr, pml4e;
1132 int32_t sext;
1134 /* test virtual address sign extension */
1135 sext = (int64_t)addr >> 47;
1136 if (sext != 0 && sext != -1)
1137 return -1;
1139 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1140 env->a20_mask;
1141 pml4e = ldq_phys(pml4e_addr);
1142 if (!(pml4e & PG_PRESENT_MASK))
1143 return -1;
1145 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1146 env->a20_mask;
1147 pdpe = ldq_phys(pdpe_addr);
1148 if (!(pdpe & PG_PRESENT_MASK))
1149 return -1;
1150 } else
1151 #endif
1153 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1154 env->a20_mask;
1155 pdpe = ldq_phys(pdpe_addr);
1156 if (!(pdpe & PG_PRESENT_MASK))
1157 return -1;
1160 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1161 env->a20_mask;
1162 pde = ldq_phys(pde_addr);
1163 if (!(pde & PG_PRESENT_MASK)) {
1164 return -1;
1166 if (pde & PG_PSE_MASK) {
1167 /* 2 MB page */
1168 page_size = 2048 * 1024;
1169 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1170 } else {
1171 /* 4 KB page */
1172 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1173 env->a20_mask;
1174 page_size = 4096;
1175 pte = ldq_phys(pte_addr);
1177 } else {
1178 uint32_t pde;
1180 if (!(env->cr[0] & CR0_PG_MASK)) {
1181 pte = addr;
1182 page_size = 4096;
1183 } else {
1184 /* page directory entry */
1185 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1186 pde = ldl_phys(pde_addr);
1187 if (!(pde & PG_PRESENT_MASK))
1188 return -1;
1189 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1190 pte = pde & ~0x003ff000; /* align to 4MB */
1191 page_size = 4096 * 1024;
1192 } else {
1193 /* page directory entry */
1194 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1195 pte = ldl_phys(pte_addr);
1196 if (!(pte & PG_PRESENT_MASK))
1197 return -1;
1198 page_size = 4096;
1201 pte = pte & env->a20_mask;
1204 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1205 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1206 return paddr;
1208 #endif /* !CONFIG_USER_ONLY */