Merge branch 'qemu-cvs'
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob121651d12576b5c47967f0d9cb772d9db1e60165
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 #include "qemu-kvm.h"
35 //#define DEBUG_MMU
37 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
38 uint32_t *ext_features,
39 uint32_t *ext2_features,
40 uint32_t *ext3_features)
42 int i;
43 /* feature flags taken from "Intel Processor Identification and the CPUID
44 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
45 * about feature names, the Linux name is used. */
46 static const char *feature_name[] = {
47 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
52 static const char *ext_feature_name[] = {
53 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 static const char *ext2_feature_name[] = {
59 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
61 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
64 static const char *ext3_feature_name[] = {
65 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 return;
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 return;
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 return;
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 return;
91 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 extern const char *cpu_vendor_string;
96 typedef struct x86_def_t {
97 const char *name;
98 uint32_t level;
99 uint32_t vendor1, vendor2, vendor3;
100 int family;
101 int model;
102 int stepping;
103 uint32_t features, ext_features, ext2_features, ext3_features;
104 uint32_t xlevel;
105 char model_id[48];
106 } x86_def_t;
108 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
109 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
110 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
111 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
112 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
113 CPUID_PSE36 | CPUID_FXSR)
114 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
115 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
116 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
117 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
118 CPUID_PAE | CPUID_SEP | CPUID_APIC)
119 static x86_def_t x86_defs[] = {
120 #ifdef TARGET_X86_64
122 .name = "qemu64",
123 .level = 2,
124 .vendor1 = CPUID_VENDOR_AMD_1,
125 .vendor2 = CPUID_VENDOR_AMD_2,
126 .vendor3 = CPUID_VENDOR_AMD_3,
127 .family = 6,
128 .model = 2,
129 .stepping = 3,
130 .features = PPRO_FEATURES |
131 /* these features are needed for Win64 and aren't fully implemented */
132 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
133 /* this feature is needed for Solaris and isn't fully implemented */
134 CPUID_PSE36,
135 .ext_features = CPUID_EXT_SSE3,
136 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
137 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
138 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
139 .ext3_features = CPUID_EXT3_SVM,
140 .xlevel = 0x8000000A,
141 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
144 .name = "core2duo",
145 .level = 10,
146 .family = 6,
147 .model = 15,
148 .stepping = 11,
149 /* The original CPU also implements these features:
150 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
151 CPUID_TM, CPUID_PBE */
152 .features = PPRO_FEATURES |
153 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
154 CPUID_PSE36,
155 /* The original CPU also implements these ext features:
156 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
157 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
158 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
159 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
160 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
161 .xlevel = 0x80000008,
162 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
164 #endif
166 .name = "qemu32",
167 .level = 2,
168 .family = 6,
169 .model = 3,
170 .stepping = 3,
171 .features = PPRO_FEATURES,
172 .ext_features = CPUID_EXT_SSE3,
173 .xlevel = 0,
174 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
177 .name = "coreduo",
178 .level = 10,
179 .family = 6,
180 .model = 14,
181 .stepping = 8,
182 /* The original CPU also implements these features:
183 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
184 CPUID_TM, CPUID_PBE */
185 .features = PPRO_FEATURES | CPUID_VME |
186 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
187 /* The original CPU also implements these ext features:
188 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
189 CPUID_EXT_PDCM */
190 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
191 .ext2_features = CPUID_EXT2_NX,
192 .xlevel = 0x80000008,
193 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
196 .name = "486",
197 .level = 0,
198 .family = 4,
199 .model = 0,
200 .stepping = 0,
201 .features = I486_FEATURES,
202 .xlevel = 0,
205 .name = "pentium",
206 .level = 1,
207 .family = 5,
208 .model = 4,
209 .stepping = 3,
210 .features = PENTIUM_FEATURES,
211 .xlevel = 0,
214 .name = "pentium2",
215 .level = 2,
216 .family = 6,
217 .model = 5,
218 .stepping = 2,
219 .features = PENTIUM2_FEATURES,
220 .xlevel = 0,
223 .name = "pentium3",
224 .level = 2,
225 .family = 6,
226 .model = 7,
227 .stepping = 3,
228 .features = PENTIUM3_FEATURES,
229 .xlevel = 0,
232 .name = "athlon",
233 .level = 2,
234 .vendor1 = 0x68747541, /* "Auth" */
235 .vendor2 = 0x69746e65, /* "enti" */
236 .vendor3 = 0x444d4163, /* "cAMD" */
237 .family = 6,
238 .model = 2,
239 .stepping = 3,
240 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
241 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
242 .xlevel = 0x80000008,
243 /* XXX: put another string ? */
244 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
247 .name = "n270",
248 /* original is on level 10 */
249 .level = 5,
250 .family = 6,
251 .model = 28,
252 .stepping = 2,
253 .features = PPRO_FEATURES |
254 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
255 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
256 * CPUID_HT | CPUID_TM | CPUID_PBE */
257 /* Some CPUs got no CPUID_SEP */
258 .ext_features = CPUID_EXT_MONITOR |
259 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
260 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
261 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
262 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
263 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
264 .xlevel = 0x8000000A,
265 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
269 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
271 unsigned int i;
272 x86_def_t *def;
274 char *s = strdup(cpu_model);
275 char *featurestr, *name = strtok(s, ",");
276 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
277 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
278 int family = -1, model = -1, stepping = -1;
280 def = NULL;
281 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
282 if (strcmp(name, x86_defs[i].name) == 0) {
283 def = &x86_defs[i];
284 break;
287 if (!def)
288 goto error;
289 memcpy(x86_cpu_def, def, sizeof(*def));
291 featurestr = strtok(NULL, ",");
293 while (featurestr) {
294 char *val;
295 if (featurestr[0] == '+') {
296 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
297 } else if (featurestr[0] == '-') {
298 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
299 } else if ((val = strchr(featurestr, '='))) {
300 *val = 0; val++;
301 if (!strcmp(featurestr, "family")) {
302 char *err;
303 family = strtol(val, &err, 10);
304 if (!*val || *err || family < 0) {
305 fprintf(stderr, "bad numerical value %s\n", val);
306 goto error;
308 x86_cpu_def->family = family;
309 } else if (!strcmp(featurestr, "model")) {
310 char *err;
311 model = strtol(val, &err, 10);
312 if (!*val || *err || model < 0 || model > 0xff) {
313 fprintf(stderr, "bad numerical value %s\n", val);
314 goto error;
316 x86_cpu_def->model = model;
317 } else if (!strcmp(featurestr, "stepping")) {
318 char *err;
319 stepping = strtol(val, &err, 10);
320 if (!*val || *err || stepping < 0 || stepping > 0xf) {
321 fprintf(stderr, "bad numerical value %s\n", val);
322 goto error;
324 x86_cpu_def->stepping = stepping;
325 } else if (!strcmp(featurestr, "vendor")) {
326 if (strlen(val) != 12) {
327 fprintf(stderr, "vendor string must be 12 chars long\n");
328 goto error;
330 x86_cpu_def->vendor1 = 0;
331 x86_cpu_def->vendor2 = 0;
332 x86_cpu_def->vendor3 = 0;
333 for(i = 0; i < 4; i++) {
334 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
335 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
336 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
338 } else if (!strcmp(featurestr, "model_id")) {
339 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
340 val);
341 } else {
342 fprintf(stderr, "unrecognized feature %s\n", featurestr);
343 goto error;
345 } else {
346 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
347 goto error;
349 featurestr = strtok(NULL, ",");
351 x86_cpu_def->features |= plus_features;
352 x86_cpu_def->ext_features |= plus_ext_features;
353 x86_cpu_def->ext2_features |= plus_ext2_features;
354 x86_cpu_def->ext3_features |= plus_ext3_features;
355 x86_cpu_def->features &= ~minus_features;
356 x86_cpu_def->ext_features &= ~minus_ext_features;
357 x86_cpu_def->ext2_features &= ~minus_ext2_features;
358 x86_cpu_def->ext3_features &= ~minus_ext3_features;
359 free(s);
360 return 0;
362 error:
363 free(s);
364 return -1;
367 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
369 unsigned int i;
371 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
372 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
375 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
377 x86_def_t def1, *def = &def1;
379 if (cpu_x86_find_by_name(def, cpu_model) < 0)
380 return -1;
381 if (def->vendor1) {
382 env->cpuid_vendor1 = def->vendor1;
383 env->cpuid_vendor2 = def->vendor2;
384 env->cpuid_vendor3 = def->vendor3;
385 } else {
386 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
387 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
388 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
390 env->cpuid_level = def->level;
391 if (def->family > 0x0f)
392 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
393 else
394 env->cpuid_version = def->family << 8;
395 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
396 env->cpuid_version |= def->stepping;
397 env->cpuid_features = def->features;
398 env->pat = 0x0007040600070406ULL;
399 env->cpuid_ext_features = def->ext_features;
400 env->cpuid_ext2_features = def->ext2_features;
401 env->cpuid_xlevel = def->xlevel;
402 env->cpuid_ext3_features = def->ext3_features;
404 const char *model_id = def->model_id;
405 int c, len, i;
407 if (cpu_vendor_string != NULL)
408 model_id = cpu_vendor_string;
409 if (!model_id)
410 model_id = "";
411 len = strlen(model_id);
412 for(i = 0; i < 48; i++) {
413 if (i >= len)
414 c = '\0';
415 else
416 c = (uint8_t)model_id[i];
417 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
420 return 0;
423 /* NOTE: must be called outside the CPU execute loop */
424 void cpu_reset(CPUX86State *env)
426 int i;
428 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
429 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
430 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
433 memset(env, 0, offsetof(CPUX86State, breakpoints));
435 tlb_flush(env, 1);
437 env->old_exception = -1;
439 /* init to reset state */
441 #ifdef CONFIG_SOFTMMU
442 env->hflags |= HF_SOFTMMU_MASK;
443 #endif
444 env->hflags2 |= HF2_GIF_MASK;
446 cpu_x86_update_cr0(env, 0x60000010);
447 env->a20_mask = ~0x0;
448 env->smbase = 0x30000;
450 env->idt.limit = 0xffff;
451 env->gdt.limit = 0xffff;
452 env->ldt.limit = 0xffff;
453 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
454 env->tr.limit = 0xffff;
455 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
457 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
458 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
459 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
460 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
461 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
462 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
463 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
464 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
465 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
466 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
467 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
468 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
470 env->eip = 0xfff0;
471 env->regs[R_EDX] = env->cpuid_version;
473 env->eflags = 0x2;
475 /* FPU init */
476 for(i = 0;i < 8; i++)
477 env->fptags[i] = 1;
478 env->fpuc = 0x37f;
480 env->mxcsr = 0x1f80;
482 memset(env->dr, 0, sizeof(env->dr));
483 env->dr[6] = DR6_FIXED_1;
484 env->dr[7] = DR7_FIXED_1;
485 cpu_breakpoint_remove_all(env, BP_CPU);
486 cpu_watchpoint_remove_all(env, BP_CPU);
489 void cpu_x86_close(CPUX86State *env)
491 qemu_free(env);
494 /***********************************************************/
495 /* x86 debug */
497 static const char *cc_op_str[] = {
498 "DYNAMIC",
499 "EFLAGS",
501 "MULB",
502 "MULW",
503 "MULL",
504 "MULQ",
506 "ADDB",
507 "ADDW",
508 "ADDL",
509 "ADDQ",
511 "ADCB",
512 "ADCW",
513 "ADCL",
514 "ADCQ",
516 "SUBB",
517 "SUBW",
518 "SUBL",
519 "SUBQ",
521 "SBBB",
522 "SBBW",
523 "SBBL",
524 "SBBQ",
526 "LOGICB",
527 "LOGICW",
528 "LOGICL",
529 "LOGICQ",
531 "INCB",
532 "INCW",
533 "INCL",
534 "INCQ",
536 "DECB",
537 "DECW",
538 "DECL",
539 "DECQ",
541 "SHLB",
542 "SHLW",
543 "SHLL",
544 "SHLQ",
546 "SARB",
547 "SARW",
548 "SARL",
549 "SARQ",
552 void cpu_dump_state(CPUState *env, FILE *f,
553 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
554 int flags)
556 int eflags, i, nb;
557 char cc_op_name[32];
558 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
560 eflags = env->eflags;
561 #ifdef TARGET_X86_64
562 if (env->hflags & HF_CS64_MASK) {
563 cpu_fprintf(f,
564 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
565 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
566 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
567 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
568 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
569 env->regs[R_EAX],
570 env->regs[R_EBX],
571 env->regs[R_ECX],
572 env->regs[R_EDX],
573 env->regs[R_ESI],
574 env->regs[R_EDI],
575 env->regs[R_EBP],
576 env->regs[R_ESP],
577 env->regs[8],
578 env->regs[9],
579 env->regs[10],
580 env->regs[11],
581 env->regs[12],
582 env->regs[13],
583 env->regs[14],
584 env->regs[15],
585 env->eip, eflags,
586 eflags & DF_MASK ? 'D' : '-',
587 eflags & CC_O ? 'O' : '-',
588 eflags & CC_S ? 'S' : '-',
589 eflags & CC_Z ? 'Z' : '-',
590 eflags & CC_A ? 'A' : '-',
591 eflags & CC_P ? 'P' : '-',
592 eflags & CC_C ? 'C' : '-',
593 env->hflags & HF_CPL_MASK,
594 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
595 (int)(env->a20_mask >> 20) & 1,
596 (env->hflags >> HF_SMM_SHIFT) & 1,
597 env->halted);
598 } else
599 #endif
601 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
602 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
603 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
604 (uint32_t)env->regs[R_EAX],
605 (uint32_t)env->regs[R_EBX],
606 (uint32_t)env->regs[R_ECX],
607 (uint32_t)env->regs[R_EDX],
608 (uint32_t)env->regs[R_ESI],
609 (uint32_t)env->regs[R_EDI],
610 (uint32_t)env->regs[R_EBP],
611 (uint32_t)env->regs[R_ESP],
612 (uint32_t)env->eip, eflags,
613 eflags & DF_MASK ? 'D' : '-',
614 eflags & CC_O ? 'O' : '-',
615 eflags & CC_S ? 'S' : '-',
616 eflags & CC_Z ? 'Z' : '-',
617 eflags & CC_A ? 'A' : '-',
618 eflags & CC_P ? 'P' : '-',
619 eflags & CC_C ? 'C' : '-',
620 env->hflags & HF_CPL_MASK,
621 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
622 (int)(env->a20_mask >> 20) & 1,
623 (env->hflags >> HF_SMM_SHIFT) & 1,
624 env->halted);
627 #ifdef TARGET_X86_64
628 if (env->hflags & HF_LMA_MASK) {
629 for(i = 0; i < 6; i++) {
630 SegmentCache *sc = &env->segs[i];
631 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
632 seg_name[i],
633 sc->selector,
634 sc->base,
635 sc->limit,
636 sc->flags);
638 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
639 env->ldt.selector,
640 env->ldt.base,
641 env->ldt.limit,
642 env->ldt.flags);
643 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
644 env->tr.selector,
645 env->tr.base,
646 env->tr.limit,
647 env->tr.flags);
648 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
649 env->gdt.base, env->gdt.limit);
650 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
651 env->idt.base, env->idt.limit);
652 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
653 (uint32_t)env->cr[0],
654 env->cr[2],
655 env->cr[3],
656 (uint32_t)env->cr[4]);
657 for(i = 0; i < 4; i++)
658 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
659 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
660 env->dr[6], env->dr[7]);
661 } else
662 #endif
664 for(i = 0; i < 6; i++) {
665 SegmentCache *sc = &env->segs[i];
666 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
667 seg_name[i],
668 sc->selector,
669 (uint32_t)sc->base,
670 sc->limit,
671 sc->flags);
673 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
674 env->ldt.selector,
675 (uint32_t)env->ldt.base,
676 env->ldt.limit,
677 env->ldt.flags);
678 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
679 env->tr.selector,
680 (uint32_t)env->tr.base,
681 env->tr.limit,
682 env->tr.flags);
683 cpu_fprintf(f, "GDT= %08x %08x\n",
684 (uint32_t)env->gdt.base, env->gdt.limit);
685 cpu_fprintf(f, "IDT= %08x %08x\n",
686 (uint32_t)env->idt.base, env->idt.limit);
687 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
688 (uint32_t)env->cr[0],
689 (uint32_t)env->cr[2],
690 (uint32_t)env->cr[3],
691 (uint32_t)env->cr[4]);
692 for(i = 0; i < 4; i++)
693 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
694 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
696 if (flags & X86_DUMP_CCOP) {
697 if ((unsigned)env->cc_op < CC_OP_NB)
698 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
699 else
700 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
701 #ifdef TARGET_X86_64
702 if (env->hflags & HF_CS64_MASK) {
703 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
704 env->cc_src, env->cc_dst,
705 cc_op_name);
706 } else
707 #endif
709 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
710 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
711 cc_op_name);
714 if (flags & X86_DUMP_FPU) {
715 int fptag;
716 fptag = 0;
717 for(i = 0; i < 8; i++) {
718 fptag |= ((!env->fptags[i]) << i);
720 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
721 env->fpuc,
722 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
723 env->fpstt,
724 fptag,
725 env->mxcsr);
726 for(i=0;i<8;i++) {
727 #if defined(USE_X86LDOUBLE)
728 union {
729 long double d;
730 struct {
731 uint64_t lower;
732 uint16_t upper;
733 } l;
734 } tmp;
735 tmp.d = env->fpregs[i].d;
736 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
737 i, tmp.l.lower, tmp.l.upper);
738 #else
739 cpu_fprintf(f, "FPR%d=%016" PRIx64,
740 i, env->fpregs[i].mmx.q);
741 #endif
742 if ((i & 1) == 1)
743 cpu_fprintf(f, "\n");
744 else
745 cpu_fprintf(f, " ");
747 if (env->hflags & HF_CS64_MASK)
748 nb = 16;
749 else
750 nb = 8;
751 for(i=0;i<nb;i++) {
752 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
754 env->xmm_regs[i].XMM_L(3),
755 env->xmm_regs[i].XMM_L(2),
756 env->xmm_regs[i].XMM_L(1),
757 env->xmm_regs[i].XMM_L(0));
758 if ((i & 1) == 1)
759 cpu_fprintf(f, "\n");
760 else
761 cpu_fprintf(f, " ");
766 /***********************************************************/
767 /* x86 mmu */
768 /* XXX: add PGE support */
770 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
772 a20_state = (a20_state != 0);
773 if (a20_state != ((env->a20_mask >> 20) & 1)) {
774 #if defined(DEBUG_MMU)
775 printf("A20 update: a20=%d\n", a20_state);
776 #endif
777 /* if the cpu is currently executing code, we must unlink it and
778 all the potentially executing TB */
779 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
781 /* when a20 is changed, all the MMU mappings are invalid, so
782 we must flush everything */
783 tlb_flush(env, 1);
784 env->a20_mask = (~0x100000) | (a20_state << 20);
788 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
790 int pe_state;
792 #if defined(DEBUG_MMU)
793 printf("CR0 update: CR0=0x%08x\n", new_cr0);
794 #endif
795 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
796 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
797 tlb_flush(env, 1);
800 #ifdef TARGET_X86_64
801 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
802 (env->efer & MSR_EFER_LME)) {
803 /* enter in long mode */
804 /* XXX: generate an exception */
805 if (!(env->cr[4] & CR4_PAE_MASK))
806 return;
807 env->efer |= MSR_EFER_LMA;
808 env->hflags |= HF_LMA_MASK;
809 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
810 (env->efer & MSR_EFER_LMA)) {
811 /* exit long mode */
812 env->efer &= ~MSR_EFER_LMA;
813 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
814 env->eip &= 0xffffffff;
816 #endif
817 env->cr[0] = new_cr0 | CR0_ET_MASK;
819 /* update PE flag in hidden flags */
820 pe_state = (env->cr[0] & CR0_PE_MASK);
821 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
822 /* ensure that ADDSEG is always set in real mode */
823 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
824 /* update FPU flags */
825 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
826 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
829 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
830 the PDPT */
831 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
833 env->cr[3] = new_cr3;
834 if (env->cr[0] & CR0_PG_MASK) {
835 #if defined(DEBUG_MMU)
836 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
837 #endif
838 tlb_flush(env, 0);
842 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
844 #if defined(DEBUG_MMU)
845 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
846 #endif
847 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
848 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
849 tlb_flush(env, 1);
851 /* SSE handling */
852 if (!(env->cpuid_features & CPUID_SSE))
853 new_cr4 &= ~CR4_OSFXSR_MASK;
854 if (new_cr4 & CR4_OSFXSR_MASK)
855 env->hflags |= HF_OSFXSR_MASK;
856 else
857 env->hflags &= ~HF_OSFXSR_MASK;
859 env->cr[4] = new_cr4;
862 #if defined(CONFIG_USER_ONLY)
864 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
865 int is_write, int mmu_idx, int is_softmmu)
867 /* user mode only emulation */
868 is_write &= 1;
869 env->cr[2] = addr;
870 env->error_code = (is_write << PG_ERROR_W_BIT);
871 env->error_code |= PG_ERROR_U_MASK;
872 env->exception_index = EXCP0E_PAGE;
873 return 1;
876 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
878 return addr;
881 #else
883 /* XXX: This value should match the one returned by CPUID
884 * and in exec.c */
885 #if defined(USE_KQEMU)
886 #define PHYS_ADDR_MASK 0xfffff000LL
887 #else
888 # if defined(TARGET_X86_64)
889 # define PHYS_ADDR_MASK 0xfffffff000LL
890 # else
891 # define PHYS_ADDR_MASK 0xffffff000LL
892 # endif
893 #endif
895 /* return value:
896 -1 = cannot handle fault
897 0 = nothing more to do
898 1 = generate PF fault
899 2 = soft MMU activation required for this block
901 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
902 int is_write1, int mmu_idx, int is_softmmu)
904 uint64_t ptep, pte;
905 target_ulong pde_addr, pte_addr;
906 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
907 target_phys_addr_t paddr;
908 uint32_t page_offset;
909 target_ulong vaddr, virt_addr;
911 is_user = mmu_idx == MMU_USER_IDX;
912 #if defined(DEBUG_MMU)
913 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
914 addr, is_write1, is_user, env->eip);
915 #endif
916 is_write = is_write1 & 1;
918 if (!(env->cr[0] & CR0_PG_MASK)) {
919 pte = addr;
920 virt_addr = addr & TARGET_PAGE_MASK;
921 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
922 page_size = 4096;
923 goto do_mapping;
926 if (env->cr[4] & CR4_PAE_MASK) {
927 uint64_t pde, pdpe;
928 target_ulong pdpe_addr;
930 #ifdef TARGET_X86_64
931 if (env->hflags & HF_LMA_MASK) {
932 uint64_t pml4e_addr, pml4e;
933 int32_t sext;
935 /* test virtual address sign extension */
936 sext = (int64_t)addr >> 47;
937 if (sext != 0 && sext != -1) {
938 env->error_code = 0;
939 env->exception_index = EXCP0D_GPF;
940 return 1;
943 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
944 env->a20_mask;
945 pml4e = ldq_phys(pml4e_addr);
946 if (!(pml4e & PG_PRESENT_MASK)) {
947 error_code = 0;
948 goto do_fault;
950 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
951 error_code = PG_ERROR_RSVD_MASK;
952 goto do_fault;
954 if (!(pml4e & PG_ACCESSED_MASK)) {
955 pml4e |= PG_ACCESSED_MASK;
956 stl_phys_notdirty(pml4e_addr, pml4e);
958 ptep = pml4e ^ PG_NX_MASK;
959 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
960 env->a20_mask;
961 pdpe = ldq_phys(pdpe_addr);
962 if (!(pdpe & PG_PRESENT_MASK)) {
963 error_code = 0;
964 goto do_fault;
966 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
967 error_code = PG_ERROR_RSVD_MASK;
968 goto do_fault;
970 ptep &= pdpe ^ PG_NX_MASK;
971 if (!(pdpe & PG_ACCESSED_MASK)) {
972 pdpe |= PG_ACCESSED_MASK;
973 stl_phys_notdirty(pdpe_addr, pdpe);
975 } else
976 #endif
978 /* XXX: load them when cr3 is loaded ? */
979 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
980 env->a20_mask;
981 pdpe = ldq_phys(pdpe_addr);
982 if (!(pdpe & PG_PRESENT_MASK)) {
983 error_code = 0;
984 goto do_fault;
986 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
989 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
990 env->a20_mask;
991 pde = ldq_phys(pde_addr);
992 if (!(pde & PG_PRESENT_MASK)) {
993 error_code = 0;
994 goto do_fault;
996 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
997 error_code = PG_ERROR_RSVD_MASK;
998 goto do_fault;
1000 ptep &= pde ^ PG_NX_MASK;
1001 if (pde & PG_PSE_MASK) {
1002 /* 2 MB page */
1003 page_size = 2048 * 1024;
1004 ptep ^= PG_NX_MASK;
1005 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1006 goto do_fault_protect;
1007 if (is_user) {
1008 if (!(ptep & PG_USER_MASK))
1009 goto do_fault_protect;
1010 if (is_write && !(ptep & PG_RW_MASK))
1011 goto do_fault_protect;
1012 } else {
1013 if ((env->cr[0] & CR0_WP_MASK) &&
1014 is_write && !(ptep & PG_RW_MASK))
1015 goto do_fault_protect;
1017 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1018 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1019 pde |= PG_ACCESSED_MASK;
1020 if (is_dirty)
1021 pde |= PG_DIRTY_MASK;
1022 stl_phys_notdirty(pde_addr, pde);
1024 /* align to page_size */
1025 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1026 virt_addr = addr & ~(page_size - 1);
1027 } else {
1028 /* 4 KB page */
1029 if (!(pde & PG_ACCESSED_MASK)) {
1030 pde |= PG_ACCESSED_MASK;
1031 stl_phys_notdirty(pde_addr, pde);
1033 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1034 env->a20_mask;
1035 pte = ldq_phys(pte_addr);
1036 if (!(pte & PG_PRESENT_MASK)) {
1037 error_code = 0;
1038 goto do_fault;
1040 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1041 error_code = PG_ERROR_RSVD_MASK;
1042 goto do_fault;
1044 /* combine pde and pte nx, user and rw protections */
1045 ptep &= pte ^ PG_NX_MASK;
1046 ptep ^= PG_NX_MASK;
1047 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1048 goto do_fault_protect;
1049 if (is_user) {
1050 if (!(ptep & PG_USER_MASK))
1051 goto do_fault_protect;
1052 if (is_write && !(ptep & PG_RW_MASK))
1053 goto do_fault_protect;
1054 } else {
1055 if ((env->cr[0] & CR0_WP_MASK) &&
1056 is_write && !(ptep & PG_RW_MASK))
1057 goto do_fault_protect;
1059 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1060 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1061 pte |= PG_ACCESSED_MASK;
1062 if (is_dirty)
1063 pte |= PG_DIRTY_MASK;
1064 stl_phys_notdirty(pte_addr, pte);
1066 page_size = 4096;
1067 virt_addr = addr & ~0xfff;
1068 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1070 } else {
1071 uint32_t pde;
1073 /* page directory entry */
1074 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1075 env->a20_mask;
1076 pde = ldl_phys(pde_addr);
1077 if (!(pde & PG_PRESENT_MASK)) {
1078 error_code = 0;
1079 goto do_fault;
1081 /* if PSE bit is set, then we use a 4MB page */
1082 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1083 page_size = 4096 * 1024;
1084 if (is_user) {
1085 if (!(pde & PG_USER_MASK))
1086 goto do_fault_protect;
1087 if (is_write && !(pde & PG_RW_MASK))
1088 goto do_fault_protect;
1089 } else {
1090 if ((env->cr[0] & CR0_WP_MASK) &&
1091 is_write && !(pde & PG_RW_MASK))
1092 goto do_fault_protect;
1094 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1095 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1096 pde |= PG_ACCESSED_MASK;
1097 if (is_dirty)
1098 pde |= PG_DIRTY_MASK;
1099 stl_phys_notdirty(pde_addr, pde);
1102 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1103 ptep = pte;
1104 virt_addr = addr & ~(page_size - 1);
1105 } else {
1106 if (!(pde & PG_ACCESSED_MASK)) {
1107 pde |= PG_ACCESSED_MASK;
1108 stl_phys_notdirty(pde_addr, pde);
1111 /* page directory entry */
1112 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1113 env->a20_mask;
1114 pte = ldl_phys(pte_addr);
1115 if (!(pte & PG_PRESENT_MASK)) {
1116 error_code = 0;
1117 goto do_fault;
1119 /* combine pde and pte user and rw protections */
1120 ptep = pte & pde;
1121 if (is_user) {
1122 if (!(ptep & PG_USER_MASK))
1123 goto do_fault_protect;
1124 if (is_write && !(ptep & PG_RW_MASK))
1125 goto do_fault_protect;
1126 } else {
1127 if ((env->cr[0] & CR0_WP_MASK) &&
1128 is_write && !(ptep & PG_RW_MASK))
1129 goto do_fault_protect;
1131 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1132 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1133 pte |= PG_ACCESSED_MASK;
1134 if (is_dirty)
1135 pte |= PG_DIRTY_MASK;
1136 stl_phys_notdirty(pte_addr, pte);
1138 page_size = 4096;
1139 virt_addr = addr & ~0xfff;
1142 /* the page can be put in the TLB */
1143 prot = PAGE_READ;
1144 if (!(ptep & PG_NX_MASK))
1145 prot |= PAGE_EXEC;
1146 if (pte & PG_DIRTY_MASK) {
1147 /* only set write access if already dirty... otherwise wait
1148 for dirty access */
1149 if (is_user) {
1150 if (ptep & PG_RW_MASK)
1151 prot |= PAGE_WRITE;
1152 } else {
1153 if (!(env->cr[0] & CR0_WP_MASK) ||
1154 (ptep & PG_RW_MASK))
1155 prot |= PAGE_WRITE;
1158 do_mapping:
1159 pte = pte & env->a20_mask;
1161 /* Even if 4MB pages, we map only one 4KB page in the cache to
1162 avoid filling it too fast */
1163 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1164 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1165 vaddr = virt_addr + page_offset;
1167 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1168 return ret;
1169 do_fault_protect:
1170 error_code = PG_ERROR_P_MASK;
1171 do_fault:
1172 error_code |= (is_write << PG_ERROR_W_BIT);
1173 if (is_user)
1174 error_code |= PG_ERROR_U_MASK;
1175 if (is_write1 == 2 &&
1176 (env->efer & MSR_EFER_NXE) &&
1177 (env->cr[4] & CR4_PAE_MASK))
1178 error_code |= PG_ERROR_I_D_MASK;
1179 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1180 /* cr2 is not modified in case of exceptions */
1181 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1182 addr);
1183 } else {
1184 env->cr[2] = addr;
1186 env->error_code = error_code;
1187 env->exception_index = EXCP0E_PAGE;
1188 return 1;
1191 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1193 target_ulong pde_addr, pte_addr;
1194 uint64_t pte;
1195 target_phys_addr_t paddr;
1196 uint32_t page_offset;
1197 int page_size;
1199 if (env->cr[4] & CR4_PAE_MASK) {
1200 target_ulong pdpe_addr;
1201 uint64_t pde, pdpe;
1203 #ifdef TARGET_X86_64
1204 if (env->hflags & HF_LMA_MASK) {
1205 uint64_t pml4e_addr, pml4e;
1206 int32_t sext;
1208 /* test virtual address sign extension */
1209 sext = (int64_t)addr >> 47;
1210 if (sext != 0 && sext != -1)
1211 return -1;
1213 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1214 env->a20_mask;
1215 pml4e = ldq_phys(pml4e_addr);
1216 if (!(pml4e & PG_PRESENT_MASK))
1217 return -1;
1219 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1220 env->a20_mask;
1221 pdpe = ldq_phys(pdpe_addr);
1222 if (!(pdpe & PG_PRESENT_MASK))
1223 return -1;
1224 } else
1225 #endif
1227 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1228 env->a20_mask;
1229 pdpe = ldq_phys(pdpe_addr);
1230 if (!(pdpe & PG_PRESENT_MASK))
1231 return -1;
1234 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1235 env->a20_mask;
1236 pde = ldq_phys(pde_addr);
1237 if (!(pde & PG_PRESENT_MASK)) {
1238 return -1;
1240 if (pde & PG_PSE_MASK) {
1241 /* 2 MB page */
1242 page_size = 2048 * 1024;
1243 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1244 } else {
1245 /* 4 KB page */
1246 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1247 env->a20_mask;
1248 page_size = 4096;
1249 pte = ldq_phys(pte_addr);
1251 if (!(pte & PG_PRESENT_MASK))
1252 return -1;
1253 } else {
1254 uint32_t pde;
1256 if (!(env->cr[0] & CR0_PG_MASK)) {
1257 pte = addr;
1258 page_size = 4096;
1259 } else {
1260 /* page directory entry */
1261 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1262 pde = ldl_phys(pde_addr);
1263 if (!(pde & PG_PRESENT_MASK))
1264 return -1;
1265 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1266 pte = pde & ~0x003ff000; /* align to 4MB */
1267 page_size = 4096 * 1024;
1268 } else {
1269 /* page directory entry */
1270 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1271 pte = ldl_phys(pte_addr);
1272 if (!(pte & PG_PRESENT_MASK))
1273 return -1;
1274 page_size = 4096;
1277 pte = pte & env->a20_mask;
1280 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1281 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1282 return paddr;
1285 void hw_breakpoint_insert(CPUState *env, int index)
1287 int type, err = 0;
1289 switch (hw_breakpoint_type(env->dr[7], index)) {
1290 case 0:
1291 if (hw_breakpoint_enabled(env->dr[7], index))
1292 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1293 &env->cpu_breakpoint[index]);
1294 break;
1295 case 1:
1296 type = BP_CPU | BP_MEM_WRITE;
1297 goto insert_wp;
1298 case 2:
1299 /* No support for I/O watchpoints yet */
1300 break;
1301 case 3:
1302 type = BP_CPU | BP_MEM_ACCESS;
1303 insert_wp:
1304 err = cpu_watchpoint_insert(env, env->dr[index],
1305 hw_breakpoint_len(env->dr[7], index),
1306 type, &env->cpu_watchpoint[index]);
1307 break;
1309 if (err)
1310 env->cpu_breakpoint[index] = NULL;
1313 void hw_breakpoint_remove(CPUState *env, int index)
1315 if (!env->cpu_breakpoint[index])
1316 return;
1317 switch (hw_breakpoint_type(env->dr[7], index)) {
1318 case 0:
1319 if (hw_breakpoint_enabled(env->dr[7], index))
1320 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1321 break;
1322 case 1:
1323 case 3:
1324 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1325 break;
1326 case 2:
1327 /* No support for I/O watchpoints yet */
1328 break;
1332 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1334 target_ulong dr6;
1335 int reg, type;
1336 int hit_enabled = 0;
1338 dr6 = env->dr[6] & ~0xf;
1339 for (reg = 0; reg < 4; reg++) {
1340 type = hw_breakpoint_type(env->dr[7], reg);
1341 if ((type == 0 && env->dr[reg] == env->eip) ||
1342 ((type & 1) && env->cpu_watchpoint[reg] &&
1343 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1344 dr6 |= 1 << reg;
1345 if (hw_breakpoint_enabled(env->dr[7], reg))
1346 hit_enabled = 1;
1349 if (hit_enabled || force_dr6_update)
1350 env->dr[6] = dr6;
1351 return hit_enabled;
1354 static CPUDebugExcpHandler *prev_debug_excp_handler;
1356 void raise_exception(int exception_index);
1358 static void breakpoint_handler(CPUState *env)
1360 CPUBreakpoint *bp;
1362 if (env->watchpoint_hit) {
1363 if (env->watchpoint_hit->flags & BP_CPU) {
1364 env->watchpoint_hit = NULL;
1365 if (check_hw_breakpoints(env, 0))
1366 raise_exception(EXCP01_DB);
1367 else
1368 cpu_resume_from_signal(env, NULL);
1370 } else {
1371 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1372 if (bp->pc == env->eip) {
1373 if (bp->flags & BP_CPU) {
1374 check_hw_breakpoints(env, 1);
1375 raise_exception(EXCP01_DB);
1377 break;
1380 if (prev_debug_excp_handler)
1381 prev_debug_excp_handler(env);
1383 #endif /* !CONFIG_USER_ONLY */
1385 static void host_cpuid(uint32_t function, uint32_t *eax, uint32_t *ebx,
1386 uint32_t *ecx, uint32_t *edx)
1388 #if defined(CONFIG_KVM) || defined(USE_KVM)
1389 uint32_t vec[4];
1391 #ifdef __x86_64__
1392 asm volatile("cpuid"
1393 : "=a"(vec[0]), "=b"(vec[1]),
1394 "=c"(vec[2]), "=d"(vec[3])
1395 : "0"(function) : "cc");
1396 #else
1397 asm volatile("pusha \n\t"
1398 "cpuid \n\t"
1399 "mov %%eax, 0(%1) \n\t"
1400 "mov %%ebx, 4(%1) \n\t"
1401 "mov %%ecx, 8(%1) \n\t"
1402 "mov %%edx, 12(%1) \n\t"
1403 "popa"
1404 : : "a"(function), "S"(vec)
1405 : "memory", "cc");
1406 #endif
1408 if (eax)
1409 *eax = vec[0];
1410 if (ebx)
1411 *ebx = vec[1];
1412 if (ecx)
1413 *ecx = vec[2];
1414 if (edx)
1415 *edx = vec[3];
1416 #endif
1419 void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
1420 uint32_t *eax, uint32_t *ebx,
1421 uint32_t *ecx, uint32_t *edx)
1423 /* test if maximum index reached */
1424 if (index & 0x80000000) {
1425 if (index > env->cpuid_xlevel)
1426 index = env->cpuid_level;
1427 } else {
1428 if (index > env->cpuid_level)
1429 index = env->cpuid_level;
1432 switch(index) {
1433 case 0:
1434 *eax = env->cpuid_level;
1435 *ebx = env->cpuid_vendor1;
1436 *edx = env->cpuid_vendor2;
1437 *ecx = env->cpuid_vendor3;
1439 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1440 * isn't supported in compatibility mode on Intel. so advertise the
1441 * actuall cpu, and say goodbye to migration between different vendors
1442 * is you use compatibility mode. */
1443 if (kvm_enabled())
1444 host_cpuid(0, NULL, ebx, ecx, edx);
1445 break;
1446 case 1:
1447 *eax = env->cpuid_version;
1448 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1449 *ecx = env->cpuid_ext_features;
1450 *edx = env->cpuid_features;
1452 /* "Hypervisor present" bit required for Microsoft SVVP */
1453 if (kvm_enabled())
1454 *ecx |= (1 << 31);
1455 break;
1456 case 2:
1457 /* cache info: needed for Pentium Pro compatibility */
1458 *eax = 1;
1459 *ebx = 0;
1460 *ecx = 0;
1461 *edx = 0x2c307d;
1462 break;
1463 case 4:
1464 /* cache info: needed for Core compatibility */
1465 switch (*ecx) {
1466 case 0: /* L1 dcache info */
1467 *eax = 0x0000121;
1468 *ebx = 0x1c0003f;
1469 *ecx = 0x000003f;
1470 *edx = 0x0000001;
1471 break;
1472 case 1: /* L1 icache info */
1473 *eax = 0x0000122;
1474 *ebx = 0x1c0003f;
1475 *ecx = 0x000003f;
1476 *edx = 0x0000001;
1477 break;
1478 case 2: /* L2 cache info */
1479 *eax = 0x0000143;
1480 *ebx = 0x3c0003f;
1481 *ecx = 0x0000fff;
1482 *edx = 0x0000001;
1483 break;
1484 default: /* end of info */
1485 *eax = 0;
1486 *ebx = 0;
1487 *ecx = 0;
1488 *edx = 0;
1489 break;
1492 break;
1493 case 5:
1494 /* mwait info: needed for Core compatibility */
1495 *eax = 0; /* Smallest monitor-line size in bytes */
1496 *ebx = 0; /* Largest monitor-line size in bytes */
1497 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1498 *edx = 0;
1499 break;
1500 case 6:
1501 /* Thermal and Power Leaf */
1502 *eax = 0;
1503 *ebx = 0;
1504 *ecx = 0;
1505 *edx = 0;
1506 break;
1507 case 9:
1508 /* Direct Cache Access Information Leaf */
1509 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1510 *ebx = 0;
1511 *ecx = 0;
1512 *edx = 0;
1513 break;
1514 case 0xA:
1515 /* Architectural Performance Monitoring Leaf */
1516 *eax = 0;
1517 *ebx = 0;
1518 *ecx = 0;
1519 *edx = 0;
1520 break;
1521 case 0x80000000:
1522 *eax = env->cpuid_xlevel;
1523 *ebx = env->cpuid_vendor1;
1524 *edx = env->cpuid_vendor2;
1525 *ecx = env->cpuid_vendor3;
1526 break;
1527 case 0x80000001:
1528 *eax = env->cpuid_features;
1529 *ebx = 0;
1530 *ecx = env->cpuid_ext3_features;
1531 *edx = env->cpuid_ext2_features;
1533 if (kvm_enabled()) {
1534 uint32_t h_eax, h_edx;
1536 host_cpuid(0x80000001, &h_eax, NULL, NULL, &h_edx);
1538 /* disable CPU features that the host does not support */
1540 /* long mode */
1541 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1542 *edx &= ~0x20000000;
1543 /* syscall */
1544 if ((h_edx & 0x00000800) == 0)
1545 *edx &= ~0x00000800;
1546 /* nx */
1547 if ((h_edx & 0x00100000) == 0)
1548 *edx &= ~0x00100000;
1550 /* disable CPU features that KVM cannot support */
1552 /* svm */
1553 if (!kvm_nested)
1554 *ecx &= ~4UL;
1555 /* 3dnow */
1556 *edx &= ~0xc0000000;
1558 break;
1559 case 0x80000002:
1560 case 0x80000003:
1561 case 0x80000004:
1562 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1563 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1564 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1565 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1566 break;
1567 case 0x80000005:
1568 /* cache info (L1 cache) */
1569 *eax = 0x01ff01ff;
1570 *ebx = 0x01ff01ff;
1571 *ecx = 0x40020140;
1572 *edx = 0x40020140;
1573 break;
1574 case 0x80000006:
1575 /* cache info (L2 cache) */
1576 *eax = 0;
1577 *ebx = 0x42004200;
1578 *ecx = 0x02008140;
1579 *edx = 0;
1580 break;
1581 case 0x80000008:
1582 /* virtual & phys address size in low 2 bytes. */
1583 /* XXX: This value must match the one used in the MMU code. */
1584 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1585 /* 64 bit processor */
1586 #if defined(USE_KQEMU)
1587 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1588 #else
1589 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1590 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1591 #endif
1592 } else {
1593 #if defined(USE_KQEMU)
1594 *eax = 0x00000020; /* 32 bits physical */
1595 #else
1596 if (env->cpuid_features & CPUID_PSE36)
1597 *eax = 0x00000024; /* 36 bits physical */
1598 else
1599 *eax = 0x00000020; /* 32 bits physical */
1600 #endif
1602 *ebx = 0;
1603 *ecx = 0;
1604 *edx = 0;
1605 break;
1606 case 0x8000000A:
1607 *eax = 0x00000001; /* SVM Revision */
1608 *ebx = 0x00000010; /* nr of ASIDs */
1609 *ecx = 0;
1610 *edx = 0; /* optional features */
1611 break;
1612 default:
1613 /* reserved values: zero */
1614 *eax = 0;
1615 *ebx = 0;
1616 *ecx = 0;
1617 *edx = 0;
1618 break;
1622 CPUX86State *cpu_x86_init(const char *cpu_model)
1624 CPUX86State *env;
1625 static int inited;
1627 env = qemu_mallocz(sizeof(CPUX86State));
1628 if (!env)
1629 return NULL;
1630 cpu_exec_init(env);
1631 env->cpu_model_str = cpu_model;
1633 /* init various static tables */
1634 if (!inited) {
1635 inited = 1;
1636 optimize_flags_init();
1637 #ifndef CONFIG_USER_ONLY
1638 prev_debug_excp_handler =
1639 cpu_set_debug_excp_handler(breakpoint_handler);
1640 #endif
1642 if (cpu_x86_register(env, cpu_model) < 0) {
1643 cpu_x86_close(env);
1644 return NULL;
1646 cpu_reset(env);
1647 #ifdef USE_KQEMU
1648 kqemu_init(env);
1649 #endif
1650 if (kvm_enabled())
1651 kvm_init_vcpu(env);
1652 return env;