linux-user: Add generic env variable handling
[qemu/mini2440/sniper_sniper_test.git] / target-i386 / helper.c
blobdb9f3977d765ca38be2c923968cb10ece2be14ec
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 //#define DEBUG_MMU
35 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
36 uint32_t *ext_features,
37 uint32_t *ext2_features,
38 uint32_t *ext3_features)
40 int i;
41 /* feature flags taken from "Intel Processor Identification and the CPUID
42 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
43 * about feature names, the Linux name is used. */
44 static const char *feature_name[] = {
45 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
46 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
47 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
48 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
50 static const char *ext_feature_name[] = {
51 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
52 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
53 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
54 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
56 static const char *ext2_feature_name[] = {
57 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
58 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
59 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
60 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
62 static const char *ext3_feature_name[] = {
63 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
64 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
65 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
66 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 return;
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 return;
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 return;
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 return;
89 fprintf(stderr, "CPU feature %s not found\n", flagname);
92 typedef struct x86_def_t {
93 const char *name;
94 uint32_t level;
95 uint32_t vendor1, vendor2, vendor3;
96 int family;
97 int model;
98 int stepping;
99 uint32_t features, ext_features, ext2_features, ext3_features;
100 uint32_t xlevel;
101 char model_id[48];
102 } x86_def_t;
104 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
105 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
106 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
107 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
108 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
109 CPUID_PSE36 | CPUID_FXSR)
110 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
111 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
112 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
113 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
114 CPUID_PAE | CPUID_SEP | CPUID_APIC)
115 static x86_def_t x86_defs[] = {
116 #ifdef TARGET_X86_64
118 .name = "qemu64",
119 .level = 2,
120 .vendor1 = CPUID_VENDOR_AMD_1,
121 .vendor2 = CPUID_VENDOR_AMD_2,
122 .vendor3 = CPUID_VENDOR_AMD_3,
123 .family = 6,
124 .model = 2,
125 .stepping = 3,
126 .features = PPRO_FEATURES |
127 /* these features are needed for Win64 and aren't fully implemented */
128 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
129 /* this feature is needed for Solaris and isn't fully implemented */
130 CPUID_PSE36,
131 .ext_features = CPUID_EXT_SSE3,
132 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
133 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
134 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
135 .ext3_features = CPUID_EXT3_SVM,
136 .xlevel = 0x8000000A,
137 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140 .name = "core2duo",
141 .level = 10,
142 .family = 6,
143 .model = 15,
144 .stepping = 11,
145 /* The original CPU also implements these features:
146 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
147 CPUID_TM, CPUID_PBE */
148 .features = PPRO_FEATURES |
149 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
150 CPUID_PSE36,
151 /* The original CPU also implements these ext features:
152 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
153 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
154 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
155 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
156 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
157 .xlevel = 0x80000008,
158 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
160 #endif
162 .name = "qemu32",
163 .level = 2,
164 .family = 6,
165 .model = 3,
166 .stepping = 3,
167 .features = PPRO_FEATURES,
168 .ext_features = CPUID_EXT_SSE3,
169 .xlevel = 0,
170 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
173 .name = "coreduo",
174 .level = 10,
175 .family = 6,
176 .model = 14,
177 .stepping = 8,
178 /* The original CPU also implements these features:
179 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
180 CPUID_TM, CPUID_PBE */
181 .features = PPRO_FEATURES | CPUID_VME |
182 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
183 /* The original CPU also implements these ext features:
184 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
185 CPUID_EXT_PDCM */
186 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
187 .ext2_features = CPUID_EXT2_NX,
188 .xlevel = 0x80000008,
189 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
192 .name = "486",
193 .level = 0,
194 .family = 4,
195 .model = 0,
196 .stepping = 0,
197 .features = I486_FEATURES,
198 .xlevel = 0,
201 .name = "pentium",
202 .level = 1,
203 .family = 5,
204 .model = 4,
205 .stepping = 3,
206 .features = PENTIUM_FEATURES,
207 .xlevel = 0,
210 .name = "pentium2",
211 .level = 2,
212 .family = 6,
213 .model = 5,
214 .stepping = 2,
215 .features = PENTIUM2_FEATURES,
216 .xlevel = 0,
219 .name = "pentium3",
220 .level = 2,
221 .family = 6,
222 .model = 7,
223 .stepping = 3,
224 .features = PENTIUM3_FEATURES,
225 .xlevel = 0,
228 .name = "athlon",
229 .level = 2,
230 .vendor1 = 0x68747541, /* "Auth" */
231 .vendor2 = 0x69746e65, /* "enti" */
232 .vendor3 = 0x444d4163, /* "cAMD" */
233 .family = 6,
234 .model = 2,
235 .stepping = 3,
236 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
237 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
238 .xlevel = 0x80000008,
239 /* XXX: put another string ? */
240 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
243 .name = "n270",
244 /* original is on level 10 */
245 .level = 5,
246 .family = 6,
247 .model = 28,
248 .stepping = 2,
249 .features = PPRO_FEATURES |
250 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
251 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
252 * CPUID_HT | CPUID_TM | CPUID_PBE */
253 /* Some CPUs got no CPUID_SEP */
254 .ext_features = CPUID_EXT_MONITOR |
255 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
256 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
257 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
258 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
259 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
260 .xlevel = 0x8000000A,
261 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
265 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
267 unsigned int i;
268 x86_def_t *def;
270 char *s = strdup(cpu_model);
271 char *featurestr, *name = strtok(s, ",");
272 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
273 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
274 int family = -1, model = -1, stepping = -1;
276 def = NULL;
277 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
278 if (strcmp(name, x86_defs[i].name) == 0) {
279 def = &x86_defs[i];
280 break;
283 if (!def)
284 goto error;
285 memcpy(x86_cpu_def, def, sizeof(*def));
287 featurestr = strtok(NULL, ",");
289 while (featurestr) {
290 char *val;
291 if (featurestr[0] == '+') {
292 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
293 } else if (featurestr[0] == '-') {
294 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
295 } else if ((val = strchr(featurestr, '='))) {
296 *val = 0; val++;
297 if (!strcmp(featurestr, "family")) {
298 char *err;
299 family = strtol(val, &err, 10);
300 if (!*val || *err || family < 0) {
301 fprintf(stderr, "bad numerical value %s\n", val);
302 goto error;
304 x86_cpu_def->family = family;
305 } else if (!strcmp(featurestr, "model")) {
306 char *err;
307 model = strtol(val, &err, 10);
308 if (!*val || *err || model < 0 || model > 0xff) {
309 fprintf(stderr, "bad numerical value %s\n", val);
310 goto error;
312 x86_cpu_def->model = model;
313 } else if (!strcmp(featurestr, "stepping")) {
314 char *err;
315 stepping = strtol(val, &err, 10);
316 if (!*val || *err || stepping < 0 || stepping > 0xf) {
317 fprintf(stderr, "bad numerical value %s\n", val);
318 goto error;
320 x86_cpu_def->stepping = stepping;
321 } else if (!strcmp(featurestr, "vendor")) {
322 if (strlen(val) != 12) {
323 fprintf(stderr, "vendor string must be 12 chars long\n");
324 goto error;
326 x86_cpu_def->vendor1 = 0;
327 x86_cpu_def->vendor2 = 0;
328 x86_cpu_def->vendor3 = 0;
329 for(i = 0; i < 4; i++) {
330 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
331 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
332 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
334 } else if (!strcmp(featurestr, "model_id")) {
335 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
336 val);
337 } else {
338 fprintf(stderr, "unrecognized feature %s\n", featurestr);
339 goto error;
341 } else {
342 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
343 goto error;
345 featurestr = strtok(NULL, ",");
347 x86_cpu_def->features |= plus_features;
348 x86_cpu_def->ext_features |= plus_ext_features;
349 x86_cpu_def->ext2_features |= plus_ext2_features;
350 x86_cpu_def->ext3_features |= plus_ext3_features;
351 x86_cpu_def->features &= ~minus_features;
352 x86_cpu_def->ext_features &= ~minus_ext_features;
353 x86_cpu_def->ext2_features &= ~minus_ext2_features;
354 x86_cpu_def->ext3_features &= ~minus_ext3_features;
355 free(s);
356 return 0;
358 error:
359 free(s);
360 return -1;
363 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
365 unsigned int i;
367 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
368 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
371 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
373 x86_def_t def1, *def = &def1;
375 if (cpu_x86_find_by_name(def, cpu_model) < 0)
376 return -1;
377 if (def->vendor1) {
378 env->cpuid_vendor1 = def->vendor1;
379 env->cpuid_vendor2 = def->vendor2;
380 env->cpuid_vendor3 = def->vendor3;
381 } else {
382 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
383 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
384 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
386 env->cpuid_level = def->level;
387 if (def->family > 0x0f)
388 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
389 else
390 env->cpuid_version = def->family << 8;
391 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
392 env->cpuid_version |= def->stepping;
393 env->cpuid_features = def->features;
394 env->pat = 0x0007040600070406ULL;
395 env->cpuid_ext_features = def->ext_features;
396 env->cpuid_ext2_features = def->ext2_features;
397 env->cpuid_xlevel = def->xlevel;
398 env->cpuid_ext3_features = def->ext3_features;
400 const char *model_id = def->model_id;
401 int c, len, i;
402 if (!model_id)
403 model_id = "";
404 len = strlen(model_id);
405 for(i = 0; i < 48; i++) {
406 if (i >= len)
407 c = '\0';
408 else
409 c = (uint8_t)model_id[i];
410 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
413 return 0;
416 /* NOTE: must be called outside the CPU execute loop */
417 void cpu_reset(CPUX86State *env)
419 int i;
421 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
422 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
423 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
426 memset(env, 0, offsetof(CPUX86State, breakpoints));
428 tlb_flush(env, 1);
430 env->old_exception = -1;
432 /* init to reset state */
434 #ifdef CONFIG_SOFTMMU
435 env->hflags |= HF_SOFTMMU_MASK;
436 #endif
437 env->hflags2 |= HF2_GIF_MASK;
439 cpu_x86_update_cr0(env, 0x60000010);
440 env->a20_mask = ~0x0;
441 env->smbase = 0x30000;
443 env->idt.limit = 0xffff;
444 env->gdt.limit = 0xffff;
445 env->ldt.limit = 0xffff;
446 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
447 env->tr.limit = 0xffff;
448 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
450 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
451 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
452 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
453 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
454 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
455 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
456 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
457 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
458 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
459 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
460 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
461 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
463 env->eip = 0xfff0;
464 env->regs[R_EDX] = env->cpuid_version;
466 env->eflags = 0x2;
468 /* FPU init */
469 for(i = 0;i < 8; i++)
470 env->fptags[i] = 1;
471 env->fpuc = 0x37f;
473 env->mxcsr = 0x1f80;
475 memset(env->dr, 0, sizeof(env->dr));
476 env->dr[6] = DR6_FIXED_1;
477 env->dr[7] = DR7_FIXED_1;
478 cpu_breakpoint_remove_all(env, BP_CPU);
479 cpu_watchpoint_remove_all(env, BP_CPU);
482 void cpu_x86_close(CPUX86State *env)
484 qemu_free(env);
487 /***********************************************************/
488 /* x86 debug */
490 static const char *cc_op_str[] = {
491 "DYNAMIC",
492 "EFLAGS",
494 "MULB",
495 "MULW",
496 "MULL",
497 "MULQ",
499 "ADDB",
500 "ADDW",
501 "ADDL",
502 "ADDQ",
504 "ADCB",
505 "ADCW",
506 "ADCL",
507 "ADCQ",
509 "SUBB",
510 "SUBW",
511 "SUBL",
512 "SUBQ",
514 "SBBB",
515 "SBBW",
516 "SBBL",
517 "SBBQ",
519 "LOGICB",
520 "LOGICW",
521 "LOGICL",
522 "LOGICQ",
524 "INCB",
525 "INCW",
526 "INCL",
527 "INCQ",
529 "DECB",
530 "DECW",
531 "DECL",
532 "DECQ",
534 "SHLB",
535 "SHLW",
536 "SHLL",
537 "SHLQ",
539 "SARB",
540 "SARW",
541 "SARL",
542 "SARQ",
545 void cpu_dump_state(CPUState *env, FILE *f,
546 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
547 int flags)
549 int eflags, i, nb;
550 char cc_op_name[32];
551 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
553 eflags = env->eflags;
554 #ifdef TARGET_X86_64
555 if (env->hflags & HF_CS64_MASK) {
556 cpu_fprintf(f,
557 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
558 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
559 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
560 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
561 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
562 env->regs[R_EAX],
563 env->regs[R_EBX],
564 env->regs[R_ECX],
565 env->regs[R_EDX],
566 env->regs[R_ESI],
567 env->regs[R_EDI],
568 env->regs[R_EBP],
569 env->regs[R_ESP],
570 env->regs[8],
571 env->regs[9],
572 env->regs[10],
573 env->regs[11],
574 env->regs[12],
575 env->regs[13],
576 env->regs[14],
577 env->regs[15],
578 env->eip, eflags,
579 eflags & DF_MASK ? 'D' : '-',
580 eflags & CC_O ? 'O' : '-',
581 eflags & CC_S ? 'S' : '-',
582 eflags & CC_Z ? 'Z' : '-',
583 eflags & CC_A ? 'A' : '-',
584 eflags & CC_P ? 'P' : '-',
585 eflags & CC_C ? 'C' : '-',
586 env->hflags & HF_CPL_MASK,
587 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
588 (int)(env->a20_mask >> 20) & 1,
589 (env->hflags >> HF_SMM_SHIFT) & 1,
590 env->halted);
591 } else
592 #endif
594 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
595 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
596 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
597 (uint32_t)env->regs[R_EAX],
598 (uint32_t)env->regs[R_EBX],
599 (uint32_t)env->regs[R_ECX],
600 (uint32_t)env->regs[R_EDX],
601 (uint32_t)env->regs[R_ESI],
602 (uint32_t)env->regs[R_EDI],
603 (uint32_t)env->regs[R_EBP],
604 (uint32_t)env->regs[R_ESP],
605 (uint32_t)env->eip, eflags,
606 eflags & DF_MASK ? 'D' : '-',
607 eflags & CC_O ? 'O' : '-',
608 eflags & CC_S ? 'S' : '-',
609 eflags & CC_Z ? 'Z' : '-',
610 eflags & CC_A ? 'A' : '-',
611 eflags & CC_P ? 'P' : '-',
612 eflags & CC_C ? 'C' : '-',
613 env->hflags & HF_CPL_MASK,
614 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
615 (int)(env->a20_mask >> 20) & 1,
616 (env->hflags >> HF_SMM_SHIFT) & 1,
617 env->halted);
620 #ifdef TARGET_X86_64
621 if (env->hflags & HF_LMA_MASK) {
622 for(i = 0; i < 6; i++) {
623 SegmentCache *sc = &env->segs[i];
624 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
625 seg_name[i],
626 sc->selector,
627 sc->base,
628 sc->limit,
629 sc->flags);
631 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
632 env->ldt.selector,
633 env->ldt.base,
634 env->ldt.limit,
635 env->ldt.flags);
636 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
637 env->tr.selector,
638 env->tr.base,
639 env->tr.limit,
640 env->tr.flags);
641 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
642 env->gdt.base, env->gdt.limit);
643 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
644 env->idt.base, env->idt.limit);
645 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
646 (uint32_t)env->cr[0],
647 env->cr[2],
648 env->cr[3],
649 (uint32_t)env->cr[4]);
650 for(i = 0; i < 4; i++)
651 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
652 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
653 env->dr[6], env->dr[7]);
654 } else
655 #endif
657 for(i = 0; i < 6; i++) {
658 SegmentCache *sc = &env->segs[i];
659 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
660 seg_name[i],
661 sc->selector,
662 (uint32_t)sc->base,
663 sc->limit,
664 sc->flags);
666 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
667 env->ldt.selector,
668 (uint32_t)env->ldt.base,
669 env->ldt.limit,
670 env->ldt.flags);
671 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
672 env->tr.selector,
673 (uint32_t)env->tr.base,
674 env->tr.limit,
675 env->tr.flags);
676 cpu_fprintf(f, "GDT= %08x %08x\n",
677 (uint32_t)env->gdt.base, env->gdt.limit);
678 cpu_fprintf(f, "IDT= %08x %08x\n",
679 (uint32_t)env->idt.base, env->idt.limit);
680 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
681 (uint32_t)env->cr[0],
682 (uint32_t)env->cr[2],
683 (uint32_t)env->cr[3],
684 (uint32_t)env->cr[4]);
685 for(i = 0; i < 4; i++)
686 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
687 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
689 if (flags & X86_DUMP_CCOP) {
690 if ((unsigned)env->cc_op < CC_OP_NB)
691 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
692 else
693 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
694 #ifdef TARGET_X86_64
695 if (env->hflags & HF_CS64_MASK) {
696 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
697 env->cc_src, env->cc_dst,
698 cc_op_name);
699 } else
700 #endif
702 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
703 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
704 cc_op_name);
707 if (flags & X86_DUMP_FPU) {
708 int fptag;
709 fptag = 0;
710 for(i = 0; i < 8; i++) {
711 fptag |= ((!env->fptags[i]) << i);
713 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
714 env->fpuc,
715 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
716 env->fpstt,
717 fptag,
718 env->mxcsr);
719 for(i=0;i<8;i++) {
720 #if defined(USE_X86LDOUBLE)
721 union {
722 long double d;
723 struct {
724 uint64_t lower;
725 uint16_t upper;
726 } l;
727 } tmp;
728 tmp.d = env->fpregs[i].d;
729 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
730 i, tmp.l.lower, tmp.l.upper);
731 #else
732 cpu_fprintf(f, "FPR%d=%016" PRIx64,
733 i, env->fpregs[i].mmx.q);
734 #endif
735 if ((i & 1) == 1)
736 cpu_fprintf(f, "\n");
737 else
738 cpu_fprintf(f, " ");
740 if (env->hflags & HF_CS64_MASK)
741 nb = 16;
742 else
743 nb = 8;
744 for(i=0;i<nb;i++) {
745 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
747 env->xmm_regs[i].XMM_L(3),
748 env->xmm_regs[i].XMM_L(2),
749 env->xmm_regs[i].XMM_L(1),
750 env->xmm_regs[i].XMM_L(0));
751 if ((i & 1) == 1)
752 cpu_fprintf(f, "\n");
753 else
754 cpu_fprintf(f, " ");
759 /***********************************************************/
760 /* x86 mmu */
761 /* XXX: add PGE support */
763 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
765 a20_state = (a20_state != 0);
766 if (a20_state != ((env->a20_mask >> 20) & 1)) {
767 #if defined(DEBUG_MMU)
768 printf("A20 update: a20=%d\n", a20_state);
769 #endif
770 /* if the cpu is currently executing code, we must unlink it and
771 all the potentially executing TB */
772 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
774 /* when a20 is changed, all the MMU mappings are invalid, so
775 we must flush everything */
776 tlb_flush(env, 1);
777 env->a20_mask = (~0x100000) | (a20_state << 20);
781 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
783 int pe_state;
785 #if defined(DEBUG_MMU)
786 printf("CR0 update: CR0=0x%08x\n", new_cr0);
787 #endif
788 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
789 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
790 tlb_flush(env, 1);
793 #ifdef TARGET_X86_64
794 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
795 (env->efer & MSR_EFER_LME)) {
796 /* enter in long mode */
797 /* XXX: generate an exception */
798 if (!(env->cr[4] & CR4_PAE_MASK))
799 return;
800 env->efer |= MSR_EFER_LMA;
801 env->hflags |= HF_LMA_MASK;
802 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
803 (env->efer & MSR_EFER_LMA)) {
804 /* exit long mode */
805 env->efer &= ~MSR_EFER_LMA;
806 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
807 env->eip &= 0xffffffff;
809 #endif
810 env->cr[0] = new_cr0 | CR0_ET_MASK;
812 /* update PE flag in hidden flags */
813 pe_state = (env->cr[0] & CR0_PE_MASK);
814 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
815 /* ensure that ADDSEG is always set in real mode */
816 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
817 /* update FPU flags */
818 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
819 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
822 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
823 the PDPT */
824 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
826 env->cr[3] = new_cr3;
827 if (env->cr[0] & CR0_PG_MASK) {
828 #if defined(DEBUG_MMU)
829 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
830 #endif
831 tlb_flush(env, 0);
835 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
837 #if defined(DEBUG_MMU)
838 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
839 #endif
840 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
841 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
842 tlb_flush(env, 1);
844 /* SSE handling */
845 if (!(env->cpuid_features & CPUID_SSE))
846 new_cr4 &= ~CR4_OSFXSR_MASK;
847 if (new_cr4 & CR4_OSFXSR_MASK)
848 env->hflags |= HF_OSFXSR_MASK;
849 else
850 env->hflags &= ~HF_OSFXSR_MASK;
852 env->cr[4] = new_cr4;
855 #if defined(CONFIG_USER_ONLY)
857 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
858 int is_write, int mmu_idx, int is_softmmu)
860 /* user mode only emulation */
861 is_write &= 1;
862 env->cr[2] = addr;
863 env->error_code = (is_write << PG_ERROR_W_BIT);
864 env->error_code |= PG_ERROR_U_MASK;
865 env->exception_index = EXCP0E_PAGE;
866 return 1;
869 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
871 return addr;
874 #else
876 /* XXX: This value should match the one returned by CPUID
877 * and in exec.c */
878 #if defined(USE_KQEMU)
879 #define PHYS_ADDR_MASK 0xfffff000LL
880 #else
881 # if defined(TARGET_X86_64)
882 # define PHYS_ADDR_MASK 0xfffffff000LL
883 # else
884 # define PHYS_ADDR_MASK 0xffffff000LL
885 # endif
886 #endif
888 /* return value:
889 -1 = cannot handle fault
890 0 = nothing more to do
891 1 = generate PF fault
892 2 = soft MMU activation required for this block
894 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
895 int is_write1, int mmu_idx, int is_softmmu)
897 uint64_t ptep, pte;
898 target_ulong pde_addr, pte_addr;
899 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
900 target_phys_addr_t paddr;
901 uint32_t page_offset;
902 target_ulong vaddr, virt_addr;
904 is_user = mmu_idx == MMU_USER_IDX;
905 #if defined(DEBUG_MMU)
906 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
907 addr, is_write1, is_user, env->eip);
908 #endif
909 is_write = is_write1 & 1;
911 if (!(env->cr[0] & CR0_PG_MASK)) {
912 pte = addr;
913 virt_addr = addr & TARGET_PAGE_MASK;
914 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
915 page_size = 4096;
916 goto do_mapping;
919 if (env->cr[4] & CR4_PAE_MASK) {
920 uint64_t pde, pdpe;
921 target_ulong pdpe_addr;
923 #ifdef TARGET_X86_64
924 if (env->hflags & HF_LMA_MASK) {
925 uint64_t pml4e_addr, pml4e;
926 int32_t sext;
928 /* test virtual address sign extension */
929 sext = (int64_t)addr >> 47;
930 if (sext != 0 && sext != -1) {
931 env->error_code = 0;
932 env->exception_index = EXCP0D_GPF;
933 return 1;
936 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
937 env->a20_mask;
938 pml4e = ldq_phys(pml4e_addr);
939 if (!(pml4e & PG_PRESENT_MASK)) {
940 error_code = 0;
941 goto do_fault;
943 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
944 error_code = PG_ERROR_RSVD_MASK;
945 goto do_fault;
947 if (!(pml4e & PG_ACCESSED_MASK)) {
948 pml4e |= PG_ACCESSED_MASK;
949 stl_phys_notdirty(pml4e_addr, pml4e);
951 ptep = pml4e ^ PG_NX_MASK;
952 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
953 env->a20_mask;
954 pdpe = ldq_phys(pdpe_addr);
955 if (!(pdpe & PG_PRESENT_MASK)) {
956 error_code = 0;
957 goto do_fault;
959 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
960 error_code = PG_ERROR_RSVD_MASK;
961 goto do_fault;
963 ptep &= pdpe ^ PG_NX_MASK;
964 if (!(pdpe & PG_ACCESSED_MASK)) {
965 pdpe |= PG_ACCESSED_MASK;
966 stl_phys_notdirty(pdpe_addr, pdpe);
968 } else
969 #endif
971 /* XXX: load them when cr3 is loaded ? */
972 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
973 env->a20_mask;
974 pdpe = ldq_phys(pdpe_addr);
975 if (!(pdpe & PG_PRESENT_MASK)) {
976 error_code = 0;
977 goto do_fault;
979 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
982 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
983 env->a20_mask;
984 pde = ldq_phys(pde_addr);
985 if (!(pde & PG_PRESENT_MASK)) {
986 error_code = 0;
987 goto do_fault;
989 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
990 error_code = PG_ERROR_RSVD_MASK;
991 goto do_fault;
993 ptep &= pde ^ PG_NX_MASK;
994 if (pde & PG_PSE_MASK) {
995 /* 2 MB page */
996 page_size = 2048 * 1024;
997 ptep ^= PG_NX_MASK;
998 if ((ptep & PG_NX_MASK) && is_write1 == 2)
999 goto do_fault_protect;
1000 if (is_user) {
1001 if (!(ptep & PG_USER_MASK))
1002 goto do_fault_protect;
1003 if (is_write && !(ptep & PG_RW_MASK))
1004 goto do_fault_protect;
1005 } else {
1006 if ((env->cr[0] & CR0_WP_MASK) &&
1007 is_write && !(ptep & PG_RW_MASK))
1008 goto do_fault_protect;
1010 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1011 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1012 pde |= PG_ACCESSED_MASK;
1013 if (is_dirty)
1014 pde |= PG_DIRTY_MASK;
1015 stl_phys_notdirty(pde_addr, pde);
1017 /* align to page_size */
1018 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1019 virt_addr = addr & ~(page_size - 1);
1020 } else {
1021 /* 4 KB page */
1022 if (!(pde & PG_ACCESSED_MASK)) {
1023 pde |= PG_ACCESSED_MASK;
1024 stl_phys_notdirty(pde_addr, pde);
1026 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1027 env->a20_mask;
1028 pte = ldq_phys(pte_addr);
1029 if (!(pte & PG_PRESENT_MASK)) {
1030 error_code = 0;
1031 goto do_fault;
1033 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1034 error_code = PG_ERROR_RSVD_MASK;
1035 goto do_fault;
1037 /* combine pde and pte nx, user and rw protections */
1038 ptep &= pte ^ PG_NX_MASK;
1039 ptep ^= PG_NX_MASK;
1040 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1041 goto do_fault_protect;
1042 if (is_user) {
1043 if (!(ptep & PG_USER_MASK))
1044 goto do_fault_protect;
1045 if (is_write && !(ptep & PG_RW_MASK))
1046 goto do_fault_protect;
1047 } else {
1048 if ((env->cr[0] & CR0_WP_MASK) &&
1049 is_write && !(ptep & PG_RW_MASK))
1050 goto do_fault_protect;
1052 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1053 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1054 pte |= PG_ACCESSED_MASK;
1055 if (is_dirty)
1056 pte |= PG_DIRTY_MASK;
1057 stl_phys_notdirty(pte_addr, pte);
1059 page_size = 4096;
1060 virt_addr = addr & ~0xfff;
1061 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1063 } else {
1064 uint32_t pde;
1066 /* page directory entry */
1067 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1068 env->a20_mask;
1069 pde = ldl_phys(pde_addr);
1070 if (!(pde & PG_PRESENT_MASK)) {
1071 error_code = 0;
1072 goto do_fault;
1074 /* if PSE bit is set, then we use a 4MB page */
1075 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1076 page_size = 4096 * 1024;
1077 if (is_user) {
1078 if (!(pde & PG_USER_MASK))
1079 goto do_fault_protect;
1080 if (is_write && !(pde & PG_RW_MASK))
1081 goto do_fault_protect;
1082 } else {
1083 if ((env->cr[0] & CR0_WP_MASK) &&
1084 is_write && !(pde & PG_RW_MASK))
1085 goto do_fault_protect;
1087 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1088 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1089 pde |= PG_ACCESSED_MASK;
1090 if (is_dirty)
1091 pde |= PG_DIRTY_MASK;
1092 stl_phys_notdirty(pde_addr, pde);
1095 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1096 ptep = pte;
1097 virt_addr = addr & ~(page_size - 1);
1098 } else {
1099 if (!(pde & PG_ACCESSED_MASK)) {
1100 pde |= PG_ACCESSED_MASK;
1101 stl_phys_notdirty(pde_addr, pde);
1104 /* page directory entry */
1105 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1106 env->a20_mask;
1107 pte = ldl_phys(pte_addr);
1108 if (!(pte & PG_PRESENT_MASK)) {
1109 error_code = 0;
1110 goto do_fault;
1112 /* combine pde and pte user and rw protections */
1113 ptep = pte & pde;
1114 if (is_user) {
1115 if (!(ptep & PG_USER_MASK))
1116 goto do_fault_protect;
1117 if (is_write && !(ptep & PG_RW_MASK))
1118 goto do_fault_protect;
1119 } else {
1120 if ((env->cr[0] & CR0_WP_MASK) &&
1121 is_write && !(ptep & PG_RW_MASK))
1122 goto do_fault_protect;
1124 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1125 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1126 pte |= PG_ACCESSED_MASK;
1127 if (is_dirty)
1128 pte |= PG_DIRTY_MASK;
1129 stl_phys_notdirty(pte_addr, pte);
1131 page_size = 4096;
1132 virt_addr = addr & ~0xfff;
1135 /* the page can be put in the TLB */
1136 prot = PAGE_READ;
1137 if (!(ptep & PG_NX_MASK))
1138 prot |= PAGE_EXEC;
1139 if (pte & PG_DIRTY_MASK) {
1140 /* only set write access if already dirty... otherwise wait
1141 for dirty access */
1142 if (is_user) {
1143 if (ptep & PG_RW_MASK)
1144 prot |= PAGE_WRITE;
1145 } else {
1146 if (!(env->cr[0] & CR0_WP_MASK) ||
1147 (ptep & PG_RW_MASK))
1148 prot |= PAGE_WRITE;
1151 do_mapping:
1152 pte = pte & env->a20_mask;
1154 /* Even if 4MB pages, we map only one 4KB page in the cache to
1155 avoid filling it too fast */
1156 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1157 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1158 vaddr = virt_addr + page_offset;
1160 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1161 return ret;
1162 do_fault_protect:
1163 error_code = PG_ERROR_P_MASK;
1164 do_fault:
1165 error_code |= (is_write << PG_ERROR_W_BIT);
1166 if (is_user)
1167 error_code |= PG_ERROR_U_MASK;
1168 if (is_write1 == 2 &&
1169 (env->efer & MSR_EFER_NXE) &&
1170 (env->cr[4] & CR4_PAE_MASK))
1171 error_code |= PG_ERROR_I_D_MASK;
1172 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1173 /* cr2 is not modified in case of exceptions */
1174 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1175 addr);
1176 } else {
1177 env->cr[2] = addr;
1179 env->error_code = error_code;
1180 env->exception_index = EXCP0E_PAGE;
1181 return 1;
1184 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1186 target_ulong pde_addr, pte_addr;
1187 uint64_t pte;
1188 target_phys_addr_t paddr;
1189 uint32_t page_offset;
1190 int page_size;
1192 if (env->cr[4] & CR4_PAE_MASK) {
1193 target_ulong pdpe_addr;
1194 uint64_t pde, pdpe;
1196 #ifdef TARGET_X86_64
1197 if (env->hflags & HF_LMA_MASK) {
1198 uint64_t pml4e_addr, pml4e;
1199 int32_t sext;
1201 /* test virtual address sign extension */
1202 sext = (int64_t)addr >> 47;
1203 if (sext != 0 && sext != -1)
1204 return -1;
1206 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1207 env->a20_mask;
1208 pml4e = ldq_phys(pml4e_addr);
1209 if (!(pml4e & PG_PRESENT_MASK))
1210 return -1;
1212 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1213 env->a20_mask;
1214 pdpe = ldq_phys(pdpe_addr);
1215 if (!(pdpe & PG_PRESENT_MASK))
1216 return -1;
1217 } else
1218 #endif
1220 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1221 env->a20_mask;
1222 pdpe = ldq_phys(pdpe_addr);
1223 if (!(pdpe & PG_PRESENT_MASK))
1224 return -1;
1227 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1228 env->a20_mask;
1229 pde = ldq_phys(pde_addr);
1230 if (!(pde & PG_PRESENT_MASK)) {
1231 return -1;
1233 if (pde & PG_PSE_MASK) {
1234 /* 2 MB page */
1235 page_size = 2048 * 1024;
1236 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1237 } else {
1238 /* 4 KB page */
1239 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1240 env->a20_mask;
1241 page_size = 4096;
1242 pte = ldq_phys(pte_addr);
1244 if (!(pte & PG_PRESENT_MASK))
1245 return -1;
1246 } else {
1247 uint32_t pde;
1249 if (!(env->cr[0] & CR0_PG_MASK)) {
1250 pte = addr;
1251 page_size = 4096;
1252 } else {
1253 /* page directory entry */
1254 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1255 pde = ldl_phys(pde_addr);
1256 if (!(pde & PG_PRESENT_MASK))
1257 return -1;
1258 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1259 pte = pde & ~0x003ff000; /* align to 4MB */
1260 page_size = 4096 * 1024;
1261 } else {
1262 /* page directory entry */
1263 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1264 pte = ldl_phys(pte_addr);
1265 if (!(pte & PG_PRESENT_MASK))
1266 return -1;
1267 page_size = 4096;
1270 pte = pte & env->a20_mask;
1273 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1274 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1275 return paddr;
1278 void hw_breakpoint_insert(CPUState *env, int index)
1280 int type, err = 0;
1282 switch (hw_breakpoint_type(env->dr[7], index)) {
1283 case 0:
1284 if (hw_breakpoint_enabled(env->dr[7], index))
1285 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1286 &env->cpu_breakpoint[index]);
1287 break;
1288 case 1:
1289 type = BP_CPU | BP_MEM_WRITE;
1290 goto insert_wp;
1291 case 2:
1292 /* No support for I/O watchpoints yet */
1293 break;
1294 case 3:
1295 type = BP_CPU | BP_MEM_ACCESS;
1296 insert_wp:
1297 err = cpu_watchpoint_insert(env, env->dr[index],
1298 hw_breakpoint_len(env->dr[7], index),
1299 type, &env->cpu_watchpoint[index]);
1300 break;
1302 if (err)
1303 env->cpu_breakpoint[index] = NULL;
1306 void hw_breakpoint_remove(CPUState *env, int index)
1308 if (!env->cpu_breakpoint[index])
1309 return;
1310 switch (hw_breakpoint_type(env->dr[7], index)) {
1311 case 0:
1312 if (hw_breakpoint_enabled(env->dr[7], index))
1313 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1314 break;
1315 case 1:
1316 case 3:
1317 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1318 break;
1319 case 2:
1320 /* No support for I/O watchpoints yet */
1321 break;
1325 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1327 target_ulong dr6;
1328 int reg, type;
1329 int hit_enabled = 0;
1331 dr6 = env->dr[6] & ~0xf;
1332 for (reg = 0; reg < 4; reg++) {
1333 type = hw_breakpoint_type(env->dr[7], reg);
1334 if ((type == 0 && env->dr[reg] == env->eip) ||
1335 ((type & 1) && env->cpu_watchpoint[reg] &&
1336 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1337 dr6 |= 1 << reg;
1338 if (hw_breakpoint_enabled(env->dr[7], reg))
1339 hit_enabled = 1;
1342 if (hit_enabled || force_dr6_update)
1343 env->dr[6] = dr6;
1344 return hit_enabled;
1347 static CPUDebugExcpHandler *prev_debug_excp_handler;
1349 void raise_exception(int exception_index);
1351 static void breakpoint_handler(CPUState *env)
1353 CPUBreakpoint *bp;
1355 if (env->watchpoint_hit) {
1356 if (env->watchpoint_hit->flags & BP_CPU) {
1357 env->watchpoint_hit = NULL;
1358 if (check_hw_breakpoints(env, 0))
1359 raise_exception(EXCP01_DB);
1360 else
1361 cpu_resume_from_signal(env, NULL);
1363 } else {
1364 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1365 if (bp->pc == env->eip) {
1366 if (bp->flags & BP_CPU) {
1367 check_hw_breakpoints(env, 1);
1368 raise_exception(EXCP01_DB);
1370 break;
1373 if (prev_debug_excp_handler)
1374 prev_debug_excp_handler(env);
1376 #endif /* !CONFIG_USER_ONLY */
1378 static void host_cpuid(uint32_t function, uint32_t *eax, uint32_t *ebx,
1379 uint32_t *ecx, uint32_t *edx)
1381 #if defined(CONFIG_KVM)
1382 uint32_t vec[4];
1384 #ifdef __x86_64__
1385 asm volatile("cpuid"
1386 : "=a"(vec[0]), "=b"(vec[1]),
1387 "=c"(vec[2]), "=d"(vec[3])
1388 : "0"(function) : "cc");
1389 #else
1390 asm volatile("pusha \n\t"
1391 "cpuid \n\t"
1392 "mov %%eax, 0(%1) \n\t"
1393 "mov %%ebx, 4(%1) \n\t"
1394 "mov %%ecx, 8(%1) \n\t"
1395 "mov %%edx, 12(%1) \n\t"
1396 "popa"
1397 : : "a"(function), "S"(vec)
1398 : "memory", "cc");
1399 #endif
1401 if (eax)
1402 *eax = vec[0];
1403 if (ebx)
1404 *ebx = vec[1];
1405 if (ecx)
1406 *ecx = vec[2];
1407 if (edx)
1408 *edx = vec[3];
1409 #endif
1412 void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
1413 uint32_t *eax, uint32_t *ebx,
1414 uint32_t *ecx, uint32_t *edx)
1416 /* test if maximum index reached */
1417 if (index & 0x80000000) {
1418 if (index > env->cpuid_xlevel)
1419 index = env->cpuid_level;
1420 } else {
1421 if (index > env->cpuid_level)
1422 index = env->cpuid_level;
1425 switch(index) {
1426 case 0:
1427 *eax = env->cpuid_level;
1428 *ebx = env->cpuid_vendor1;
1429 *edx = env->cpuid_vendor2;
1430 *ecx = env->cpuid_vendor3;
1432 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1433 * isn't supported in compatibility mode on Intel. so advertise the
1434 * actuall cpu, and say goodbye to migration between different vendors
1435 * is you use compatibility mode. */
1436 if (kvm_enabled())
1437 host_cpuid(0, NULL, ebx, ecx, edx);
1438 break;
1439 case 1:
1440 *eax = env->cpuid_version;
1441 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1442 *ecx = env->cpuid_ext_features;
1443 *edx = env->cpuid_features;
1445 /* "Hypervisor present" bit required for Microsoft SVVP */
1446 if (kvm_enabled())
1447 *ecx |= (1 << 31);
1448 break;
1449 case 2:
1450 /* cache info: needed for Pentium Pro compatibility */
1451 *eax = 1;
1452 *ebx = 0;
1453 *ecx = 0;
1454 *edx = 0x2c307d;
1455 break;
1456 case 4:
1457 /* cache info: needed for Core compatibility */
1458 switch (*ecx) {
1459 case 0: /* L1 dcache info */
1460 *eax = 0x0000121;
1461 *ebx = 0x1c0003f;
1462 *ecx = 0x000003f;
1463 *edx = 0x0000001;
1464 break;
1465 case 1: /* L1 icache info */
1466 *eax = 0x0000122;
1467 *ebx = 0x1c0003f;
1468 *ecx = 0x000003f;
1469 *edx = 0x0000001;
1470 break;
1471 case 2: /* L2 cache info */
1472 *eax = 0x0000143;
1473 *ebx = 0x3c0003f;
1474 *ecx = 0x0000fff;
1475 *edx = 0x0000001;
1476 break;
1477 default: /* end of info */
1478 *eax = 0;
1479 *ebx = 0;
1480 *ecx = 0;
1481 *edx = 0;
1482 break;
1485 break;
1486 case 5:
1487 /* mwait info: needed for Core compatibility */
1488 *eax = 0; /* Smallest monitor-line size in bytes */
1489 *ebx = 0; /* Largest monitor-line size in bytes */
1490 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1491 *edx = 0;
1492 break;
1493 case 6:
1494 /* Thermal and Power Leaf */
1495 *eax = 0;
1496 *ebx = 0;
1497 *ecx = 0;
1498 *edx = 0;
1499 break;
1500 case 9:
1501 /* Direct Cache Access Information Leaf */
1502 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1503 *ebx = 0;
1504 *ecx = 0;
1505 *edx = 0;
1506 break;
1507 case 0xA:
1508 /* Architectural Performance Monitoring Leaf */
1509 *eax = 0;
1510 *ebx = 0;
1511 *ecx = 0;
1512 *edx = 0;
1513 break;
1514 case 0x80000000:
1515 *eax = env->cpuid_xlevel;
1516 *ebx = env->cpuid_vendor1;
1517 *edx = env->cpuid_vendor2;
1518 *ecx = env->cpuid_vendor3;
1519 break;
1520 case 0x80000001:
1521 *eax = env->cpuid_features;
1522 *ebx = 0;
1523 *ecx = env->cpuid_ext3_features;
1524 *edx = env->cpuid_ext2_features;
1526 if (kvm_enabled()) {
1527 uint32_t h_eax, h_edx;
1529 host_cpuid(0x80000001, &h_eax, NULL, NULL, &h_edx);
1531 /* disable CPU features that the host does not support */
1533 /* long mode */
1534 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1535 *edx &= ~0x20000000;
1536 /* syscall */
1537 if ((h_edx & 0x00000800) == 0)
1538 *edx &= ~0x00000800;
1539 /* nx */
1540 if ((h_edx & 0x00100000) == 0)
1541 *edx &= ~0x00100000;
1543 /* disable CPU features that KVM cannot support */
1545 /* svm */
1546 *ecx &= ~4UL;
1547 /* 3dnow */
1548 *edx &= ~0xc0000000;
1550 break;
1551 case 0x80000002:
1552 case 0x80000003:
1553 case 0x80000004:
1554 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1555 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1556 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1557 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1558 break;
1559 case 0x80000005:
1560 /* cache info (L1 cache) */
1561 *eax = 0x01ff01ff;
1562 *ebx = 0x01ff01ff;
1563 *ecx = 0x40020140;
1564 *edx = 0x40020140;
1565 break;
1566 case 0x80000006:
1567 /* cache info (L2 cache) */
1568 *eax = 0;
1569 *ebx = 0x42004200;
1570 *ecx = 0x02008140;
1571 *edx = 0;
1572 break;
1573 case 0x80000008:
1574 /* virtual & phys address size in low 2 bytes. */
1575 /* XXX: This value must match the one used in the MMU code. */
1576 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1577 /* 64 bit processor */
1578 #if defined(USE_KQEMU)
1579 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1580 #else
1581 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1582 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1583 #endif
1584 } else {
1585 #if defined(USE_KQEMU)
1586 *eax = 0x00000020; /* 32 bits physical */
1587 #else
1588 if (env->cpuid_features & CPUID_PSE36)
1589 *eax = 0x00000024; /* 36 bits physical */
1590 else
1591 *eax = 0x00000020; /* 32 bits physical */
1592 #endif
1594 *ebx = 0;
1595 *ecx = 0;
1596 *edx = 0;
1597 break;
1598 case 0x8000000A:
1599 *eax = 0x00000001; /* SVM Revision */
1600 *ebx = 0x00000010; /* nr of ASIDs */
1601 *ecx = 0;
1602 *edx = 0; /* optional features */
1603 break;
1604 default:
1605 /* reserved values: zero */
1606 *eax = 0;
1607 *ebx = 0;
1608 *ecx = 0;
1609 *edx = 0;
1610 break;
1614 CPUX86State *cpu_x86_init(const char *cpu_model)
1616 CPUX86State *env;
1617 static int inited;
1619 env = qemu_mallocz(sizeof(CPUX86State));
1620 if (!env)
1621 return NULL;
1622 cpu_exec_init(env);
1623 env->cpu_model_str = cpu_model;
1625 /* init various static tables */
1626 if (!inited) {
1627 inited = 1;
1628 optimize_flags_init();
1629 #ifndef CONFIG_USER_ONLY
1630 prev_debug_excp_handler =
1631 cpu_set_debug_excp_handler(breakpoint_handler);
1632 #endif
1634 if (cpu_x86_register(env, cpu_model) < 0) {
1635 cpu_x86_close(env);
1636 return NULL;
1638 cpu_reset(env);
1639 #ifdef USE_KQEMU
1640 kqemu_init(env);
1641 #endif
1642 if (kvm_enabled())
1643 kvm_init_vcpu(env);
1644 return env;