Use the ARRAY_SIZE() macro where appropriate.
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob9ff76b8ab8f3406680e319beba14f7ca5403e086
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 //#define DEBUG_MMU
35 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
36 uint32_t *ext_features,
37 uint32_t *ext2_features,
38 uint32_t *ext3_features)
40 int i;
41 /* feature flags taken from "Intel Processor Identification and the CPUID
42 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
43 * about feature names, the Linux name is used. */
44 static const char *feature_name[] = {
45 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
46 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
47 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
48 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
50 static const char *ext_feature_name[] = {
51 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
52 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
53 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
54 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
56 static const char *ext2_feature_name[] = {
57 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
58 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
59 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
60 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
62 static const char *ext3_feature_name[] = {
63 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
64 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
65 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
66 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 return;
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 return;
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 return;
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 return;
89 fprintf(stderr, "CPU feature %s not found\n", flagname);
92 typedef struct x86_def_t {
93 const char *name;
94 uint32_t level;
95 uint32_t vendor1, vendor2, vendor3;
96 int family;
97 int model;
98 int stepping;
99 uint32_t features, ext_features, ext2_features, ext3_features;
100 uint32_t xlevel;
101 char model_id[48];
102 } x86_def_t;
104 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
105 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
106 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
107 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
108 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
109 CPUID_PSE36 | CPUID_FXSR)
110 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
111 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
112 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
113 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
114 CPUID_PAE | CPUID_SEP | CPUID_APIC)
115 static x86_def_t x86_defs[] = {
116 #ifdef TARGET_X86_64
118 .name = "qemu64",
119 .level = 2,
120 .vendor1 = CPUID_VENDOR_AMD_1,
121 .vendor2 = CPUID_VENDOR_AMD_2,
122 .vendor3 = CPUID_VENDOR_AMD_3,
123 .family = 6,
124 .model = 2,
125 .stepping = 3,
126 .features = PPRO_FEATURES |
127 /* these features are needed for Win64 and aren't fully implemented */
128 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
129 /* this feature is needed for Solaris and isn't fully implemented */
130 CPUID_PSE36,
131 .ext_features = CPUID_EXT_SSE3,
132 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
133 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
134 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
135 .ext3_features = CPUID_EXT3_SVM,
136 .xlevel = 0x8000000A,
137 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140 .name = "core2duo",
141 .level = 10,
142 .family = 6,
143 .model = 15,
144 .stepping = 11,
145 /* The original CPU also implements these features:
146 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
147 CPUID_TM, CPUID_PBE */
148 .features = PPRO_FEATURES |
149 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
150 CPUID_PSE36,
151 /* The original CPU also implements these ext features:
152 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
153 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
154 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
155 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
156 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
157 .xlevel = 0x80000008,
158 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
160 #endif
162 .name = "qemu32",
163 .level = 2,
164 .family = 6,
165 .model = 3,
166 .stepping = 3,
167 .features = PPRO_FEATURES,
168 .ext_features = CPUID_EXT_SSE3,
169 .xlevel = 0,
170 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
173 .name = "coreduo",
174 .level = 10,
175 .family = 6,
176 .model = 14,
177 .stepping = 8,
178 /* The original CPU also implements these features:
179 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
180 CPUID_TM, CPUID_PBE */
181 .features = PPRO_FEATURES | CPUID_VME |
182 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
183 /* The original CPU also implements these ext features:
184 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
185 CPUID_EXT_PDCM */
186 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
187 .ext2_features = CPUID_EXT2_NX,
188 .xlevel = 0x80000008,
189 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
192 .name = "486",
193 .level = 0,
194 .family = 4,
195 .model = 0,
196 .stepping = 0,
197 .features = I486_FEATURES,
198 .xlevel = 0,
201 .name = "pentium",
202 .level = 1,
203 .family = 5,
204 .model = 4,
205 .stepping = 3,
206 .features = PENTIUM_FEATURES,
207 .xlevel = 0,
210 .name = "pentium2",
211 .level = 2,
212 .family = 6,
213 .model = 5,
214 .stepping = 2,
215 .features = PENTIUM2_FEATURES,
216 .xlevel = 0,
219 .name = "pentium3",
220 .level = 2,
221 .family = 6,
222 .model = 7,
223 .stepping = 3,
224 .features = PENTIUM3_FEATURES,
225 .xlevel = 0,
228 .name = "athlon",
229 .level = 2,
230 .vendor1 = 0x68747541, /* "Auth" */
231 .vendor2 = 0x69746e65, /* "enti" */
232 .vendor3 = 0x444d4163, /* "cAMD" */
233 .family = 6,
234 .model = 2,
235 .stepping = 3,
236 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
237 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
238 .xlevel = 0x80000008,
239 /* XXX: put another string ? */
240 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
243 .name = "n270",
244 /* original is on level 10 */
245 .level = 5,
246 .family = 6,
247 .model = 28,
248 .stepping = 2,
249 .features = PPRO_FEATURES |
250 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
251 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
252 * CPUID_HT | CPUID_TM | CPUID_PBE */
253 /* Some CPUs got no CPUID_SEP */
254 .ext_features = CPUID_EXT_MONITOR |
255 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
256 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
257 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
258 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
259 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
260 .xlevel = 0x8000000A,
261 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
265 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
267 unsigned int i;
268 x86_def_t *def;
270 char *s = strdup(cpu_model);
271 char *featurestr, *name = strtok(s, ",");
272 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
273 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
274 int family = -1, model = -1, stepping = -1;
276 def = NULL;
277 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
278 if (strcmp(name, x86_defs[i].name) == 0) {
279 def = &x86_defs[i];
280 break;
283 if (!def)
284 goto error;
285 memcpy(x86_cpu_def, def, sizeof(*def));
287 featurestr = strtok(NULL, ",");
289 while (featurestr) {
290 char *val;
291 if (featurestr[0] == '+') {
292 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
293 } else if (featurestr[0] == '-') {
294 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
295 } else if ((val = strchr(featurestr, '='))) {
296 *val = 0; val++;
297 if (!strcmp(featurestr, "family")) {
298 char *err;
299 family = strtol(val, &err, 10);
300 if (!*val || *err || family < 0) {
301 fprintf(stderr, "bad numerical value %s\n", val);
302 goto error;
304 x86_cpu_def->family = family;
305 } else if (!strcmp(featurestr, "model")) {
306 char *err;
307 model = strtol(val, &err, 10);
308 if (!*val || *err || model < 0 || model > 0xff) {
309 fprintf(stderr, "bad numerical value %s\n", val);
310 goto error;
312 x86_cpu_def->model = model;
313 } else if (!strcmp(featurestr, "stepping")) {
314 char *err;
315 stepping = strtol(val, &err, 10);
316 if (!*val || *err || stepping < 0 || stepping > 0xf) {
317 fprintf(stderr, "bad numerical value %s\n", val);
318 goto error;
320 x86_cpu_def->stepping = stepping;
321 } else if (!strcmp(featurestr, "vendor")) {
322 if (strlen(val) != 12) {
323 fprintf(stderr, "vendor string must be 12 chars long\n");
324 goto error;
326 x86_cpu_def->vendor1 = 0;
327 x86_cpu_def->vendor2 = 0;
328 x86_cpu_def->vendor3 = 0;
329 for(i = 0; i < 4; i++) {
330 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
331 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
332 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
334 } else if (!strcmp(featurestr, "model_id")) {
335 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
336 val);
337 } else {
338 fprintf(stderr, "unrecognized feature %s\n", featurestr);
339 goto error;
341 } else {
342 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
343 goto error;
345 featurestr = strtok(NULL, ",");
347 x86_cpu_def->features |= plus_features;
348 x86_cpu_def->ext_features |= plus_ext_features;
349 x86_cpu_def->ext2_features |= plus_ext2_features;
350 x86_cpu_def->ext3_features |= plus_ext3_features;
351 x86_cpu_def->features &= ~minus_features;
352 x86_cpu_def->ext_features &= ~minus_ext_features;
353 x86_cpu_def->ext2_features &= ~minus_ext2_features;
354 x86_cpu_def->ext3_features &= ~minus_ext3_features;
355 free(s);
356 return 0;
358 error:
359 free(s);
360 return -1;
363 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
365 unsigned int i;
367 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
368 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
371 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
373 x86_def_t def1, *def = &def1;
375 if (cpu_x86_find_by_name(def, cpu_model) < 0)
376 return -1;
377 if (def->vendor1) {
378 env->cpuid_vendor1 = def->vendor1;
379 env->cpuid_vendor2 = def->vendor2;
380 env->cpuid_vendor3 = def->vendor3;
381 } else {
382 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
383 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
384 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
386 env->cpuid_level = def->level;
387 if (def->family > 0x0f)
388 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
389 else
390 env->cpuid_version = def->family << 8;
391 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
392 env->cpuid_version |= def->stepping;
393 env->cpuid_features = def->features;
394 env->pat = 0x0007040600070406ULL;
395 env->cpuid_ext_features = def->ext_features;
396 env->cpuid_ext2_features = def->ext2_features;
397 env->cpuid_xlevel = def->xlevel;
398 env->cpuid_ext3_features = def->ext3_features;
400 const char *model_id = def->model_id;
401 int c, len, i;
402 if (!model_id)
403 model_id = "";
404 len = strlen(model_id);
405 for(i = 0; i < 48; i++) {
406 if (i >= len)
407 c = '\0';
408 else
409 c = (uint8_t)model_id[i];
410 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
413 return 0;
416 /* NOTE: must be called outside the CPU execute loop */
417 void cpu_reset(CPUX86State *env)
419 int i;
421 memset(env, 0, offsetof(CPUX86State, breakpoints));
423 tlb_flush(env, 1);
425 env->old_exception = -1;
427 /* init to reset state */
429 #ifdef CONFIG_SOFTMMU
430 env->hflags |= HF_SOFTMMU_MASK;
431 #endif
432 env->hflags2 |= HF2_GIF_MASK;
434 cpu_x86_update_cr0(env, 0x60000010);
435 env->a20_mask = ~0x0;
436 env->smbase = 0x30000;
438 env->idt.limit = 0xffff;
439 env->gdt.limit = 0xffff;
440 env->ldt.limit = 0xffff;
441 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
442 env->tr.limit = 0xffff;
443 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
445 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
446 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
447 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
448 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
449 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
450 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
451 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
452 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
453 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
454 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
455 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
456 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
458 env->eip = 0xfff0;
459 env->regs[R_EDX] = env->cpuid_version;
461 env->eflags = 0x2;
463 /* FPU init */
464 for(i = 0;i < 8; i++)
465 env->fptags[i] = 1;
466 env->fpuc = 0x37f;
468 env->mxcsr = 0x1f80;
470 memset(env->dr, 0, sizeof(env->dr));
471 env->dr[6] = DR6_FIXED_1;
472 env->dr[7] = DR7_FIXED_1;
473 cpu_breakpoint_remove_all(env, BP_CPU);
474 cpu_watchpoint_remove_all(env, BP_CPU);
477 void cpu_x86_close(CPUX86State *env)
479 qemu_free(env);
482 /***********************************************************/
483 /* x86 debug */
485 static const char *cc_op_str[] = {
486 "DYNAMIC",
487 "EFLAGS",
489 "MULB",
490 "MULW",
491 "MULL",
492 "MULQ",
494 "ADDB",
495 "ADDW",
496 "ADDL",
497 "ADDQ",
499 "ADCB",
500 "ADCW",
501 "ADCL",
502 "ADCQ",
504 "SUBB",
505 "SUBW",
506 "SUBL",
507 "SUBQ",
509 "SBBB",
510 "SBBW",
511 "SBBL",
512 "SBBQ",
514 "LOGICB",
515 "LOGICW",
516 "LOGICL",
517 "LOGICQ",
519 "INCB",
520 "INCW",
521 "INCL",
522 "INCQ",
524 "DECB",
525 "DECW",
526 "DECL",
527 "DECQ",
529 "SHLB",
530 "SHLW",
531 "SHLL",
532 "SHLQ",
534 "SARB",
535 "SARW",
536 "SARL",
537 "SARQ",
540 void cpu_dump_state(CPUState *env, FILE *f,
541 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
542 int flags)
544 int eflags, i, nb;
545 char cc_op_name[32];
546 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
548 eflags = env->eflags;
549 #ifdef TARGET_X86_64
550 if (env->hflags & HF_CS64_MASK) {
551 cpu_fprintf(f,
552 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
553 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
554 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
555 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
556 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
557 env->regs[R_EAX],
558 env->regs[R_EBX],
559 env->regs[R_ECX],
560 env->regs[R_EDX],
561 env->regs[R_ESI],
562 env->regs[R_EDI],
563 env->regs[R_EBP],
564 env->regs[R_ESP],
565 env->regs[8],
566 env->regs[9],
567 env->regs[10],
568 env->regs[11],
569 env->regs[12],
570 env->regs[13],
571 env->regs[14],
572 env->regs[15],
573 env->eip, eflags,
574 eflags & DF_MASK ? 'D' : '-',
575 eflags & CC_O ? 'O' : '-',
576 eflags & CC_S ? 'S' : '-',
577 eflags & CC_Z ? 'Z' : '-',
578 eflags & CC_A ? 'A' : '-',
579 eflags & CC_P ? 'P' : '-',
580 eflags & CC_C ? 'C' : '-',
581 env->hflags & HF_CPL_MASK,
582 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
583 (int)(env->a20_mask >> 20) & 1,
584 (env->hflags >> HF_SMM_SHIFT) & 1,
585 env->halted);
586 } else
587 #endif
589 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
590 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
591 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
592 (uint32_t)env->regs[R_EAX],
593 (uint32_t)env->regs[R_EBX],
594 (uint32_t)env->regs[R_ECX],
595 (uint32_t)env->regs[R_EDX],
596 (uint32_t)env->regs[R_ESI],
597 (uint32_t)env->regs[R_EDI],
598 (uint32_t)env->regs[R_EBP],
599 (uint32_t)env->regs[R_ESP],
600 (uint32_t)env->eip, eflags,
601 eflags & DF_MASK ? 'D' : '-',
602 eflags & CC_O ? 'O' : '-',
603 eflags & CC_S ? 'S' : '-',
604 eflags & CC_Z ? 'Z' : '-',
605 eflags & CC_A ? 'A' : '-',
606 eflags & CC_P ? 'P' : '-',
607 eflags & CC_C ? 'C' : '-',
608 env->hflags & HF_CPL_MASK,
609 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
610 (int)(env->a20_mask >> 20) & 1,
611 (env->hflags >> HF_SMM_SHIFT) & 1,
612 env->halted);
615 #ifdef TARGET_X86_64
616 if (env->hflags & HF_LMA_MASK) {
617 for(i = 0; i < 6; i++) {
618 SegmentCache *sc = &env->segs[i];
619 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
620 seg_name[i],
621 sc->selector,
622 sc->base,
623 sc->limit,
624 sc->flags);
626 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
627 env->ldt.selector,
628 env->ldt.base,
629 env->ldt.limit,
630 env->ldt.flags);
631 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
632 env->tr.selector,
633 env->tr.base,
634 env->tr.limit,
635 env->tr.flags);
636 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
637 env->gdt.base, env->gdt.limit);
638 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
639 env->idt.base, env->idt.limit);
640 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
641 (uint32_t)env->cr[0],
642 env->cr[2],
643 env->cr[3],
644 (uint32_t)env->cr[4]);
645 for(i = 0; i < 4; i++)
646 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
647 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
648 env->dr[6], env->dr[7]);
649 } else
650 #endif
652 for(i = 0; i < 6; i++) {
653 SegmentCache *sc = &env->segs[i];
654 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
655 seg_name[i],
656 sc->selector,
657 (uint32_t)sc->base,
658 sc->limit,
659 sc->flags);
661 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
662 env->ldt.selector,
663 (uint32_t)env->ldt.base,
664 env->ldt.limit,
665 env->ldt.flags);
666 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
667 env->tr.selector,
668 (uint32_t)env->tr.base,
669 env->tr.limit,
670 env->tr.flags);
671 cpu_fprintf(f, "GDT= %08x %08x\n",
672 (uint32_t)env->gdt.base, env->gdt.limit);
673 cpu_fprintf(f, "IDT= %08x %08x\n",
674 (uint32_t)env->idt.base, env->idt.limit);
675 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
676 (uint32_t)env->cr[0],
677 (uint32_t)env->cr[2],
678 (uint32_t)env->cr[3],
679 (uint32_t)env->cr[4]);
680 for(i = 0; i < 4; i++)
681 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
682 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
684 if (flags & X86_DUMP_CCOP) {
685 if ((unsigned)env->cc_op < CC_OP_NB)
686 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
687 else
688 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
689 #ifdef TARGET_X86_64
690 if (env->hflags & HF_CS64_MASK) {
691 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
692 env->cc_src, env->cc_dst,
693 cc_op_name);
694 } else
695 #endif
697 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
698 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
699 cc_op_name);
702 if (flags & X86_DUMP_FPU) {
703 int fptag;
704 fptag = 0;
705 for(i = 0; i < 8; i++) {
706 fptag |= ((!env->fptags[i]) << i);
708 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
709 env->fpuc,
710 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
711 env->fpstt,
712 fptag,
713 env->mxcsr);
714 for(i=0;i<8;i++) {
715 #if defined(USE_X86LDOUBLE)
716 union {
717 long double d;
718 struct {
719 uint64_t lower;
720 uint16_t upper;
721 } l;
722 } tmp;
723 tmp.d = env->fpregs[i].d;
724 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
725 i, tmp.l.lower, tmp.l.upper);
726 #else
727 cpu_fprintf(f, "FPR%d=%016" PRIx64,
728 i, env->fpregs[i].mmx.q);
729 #endif
730 if ((i & 1) == 1)
731 cpu_fprintf(f, "\n");
732 else
733 cpu_fprintf(f, " ");
735 if (env->hflags & HF_CS64_MASK)
736 nb = 16;
737 else
738 nb = 8;
739 for(i=0;i<nb;i++) {
740 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
742 env->xmm_regs[i].XMM_L(3),
743 env->xmm_regs[i].XMM_L(2),
744 env->xmm_regs[i].XMM_L(1),
745 env->xmm_regs[i].XMM_L(0));
746 if ((i & 1) == 1)
747 cpu_fprintf(f, "\n");
748 else
749 cpu_fprintf(f, " ");
754 /***********************************************************/
755 /* x86 mmu */
756 /* XXX: add PGE support */
758 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
760 a20_state = (a20_state != 0);
761 if (a20_state != ((env->a20_mask >> 20) & 1)) {
762 #if defined(DEBUG_MMU)
763 printf("A20 update: a20=%d\n", a20_state);
764 #endif
765 /* if the cpu is currently executing code, we must unlink it and
766 all the potentially executing TB */
767 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
769 /* when a20 is changed, all the MMU mappings are invalid, so
770 we must flush everything */
771 tlb_flush(env, 1);
772 env->a20_mask = (~0x100000) | (a20_state << 20);
776 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
778 int pe_state;
780 #if defined(DEBUG_MMU)
781 printf("CR0 update: CR0=0x%08x\n", new_cr0);
782 #endif
783 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
784 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
785 tlb_flush(env, 1);
788 #ifdef TARGET_X86_64
789 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
790 (env->efer & MSR_EFER_LME)) {
791 /* enter in long mode */
792 /* XXX: generate an exception */
793 if (!(env->cr[4] & CR4_PAE_MASK))
794 return;
795 env->efer |= MSR_EFER_LMA;
796 env->hflags |= HF_LMA_MASK;
797 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
798 (env->efer & MSR_EFER_LMA)) {
799 /* exit long mode */
800 env->efer &= ~MSR_EFER_LMA;
801 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
802 env->eip &= 0xffffffff;
804 #endif
805 env->cr[0] = new_cr0 | CR0_ET_MASK;
807 /* update PE flag in hidden flags */
808 pe_state = (env->cr[0] & CR0_PE_MASK);
809 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
810 /* ensure that ADDSEG is always set in real mode */
811 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
812 /* update FPU flags */
813 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
814 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
817 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
818 the PDPT */
819 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
821 env->cr[3] = new_cr3;
822 if (env->cr[0] & CR0_PG_MASK) {
823 #if defined(DEBUG_MMU)
824 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
825 #endif
826 tlb_flush(env, 0);
830 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
832 #if defined(DEBUG_MMU)
833 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
834 #endif
835 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
836 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
837 tlb_flush(env, 1);
839 /* SSE handling */
840 if (!(env->cpuid_features & CPUID_SSE))
841 new_cr4 &= ~CR4_OSFXSR_MASK;
842 if (new_cr4 & CR4_OSFXSR_MASK)
843 env->hflags |= HF_OSFXSR_MASK;
844 else
845 env->hflags &= ~HF_OSFXSR_MASK;
847 env->cr[4] = new_cr4;
850 #if defined(CONFIG_USER_ONLY)
852 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
853 int is_write, int mmu_idx, int is_softmmu)
855 /* user mode only emulation */
856 is_write &= 1;
857 env->cr[2] = addr;
858 env->error_code = (is_write << PG_ERROR_W_BIT);
859 env->error_code |= PG_ERROR_U_MASK;
860 env->exception_index = EXCP0E_PAGE;
861 return 1;
864 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
866 return addr;
869 #else
871 /* XXX: This value should match the one returned by CPUID
872 * and in exec.c */
873 #if defined(USE_KQEMU)
874 #define PHYS_ADDR_MASK 0xfffff000LL
875 #else
876 # if defined(TARGET_X86_64)
877 # define PHYS_ADDR_MASK 0xfffffff000LL
878 # else
879 # define PHYS_ADDR_MASK 0xffffff000LL
880 # endif
881 #endif
883 /* return value:
884 -1 = cannot handle fault
885 0 = nothing more to do
886 1 = generate PF fault
887 2 = soft MMU activation required for this block
889 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
890 int is_write1, int mmu_idx, int is_softmmu)
892 uint64_t ptep, pte;
893 target_ulong pde_addr, pte_addr;
894 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
895 target_phys_addr_t paddr;
896 uint32_t page_offset;
897 target_ulong vaddr, virt_addr;
899 is_user = mmu_idx == MMU_USER_IDX;
900 #if defined(DEBUG_MMU)
901 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
902 addr, is_write1, is_user, env->eip);
903 #endif
904 is_write = is_write1 & 1;
906 if (!(env->cr[0] & CR0_PG_MASK)) {
907 pte = addr;
908 virt_addr = addr & TARGET_PAGE_MASK;
909 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
910 page_size = 4096;
911 goto do_mapping;
914 if (env->cr[4] & CR4_PAE_MASK) {
915 uint64_t pde, pdpe;
916 target_ulong pdpe_addr;
918 #ifdef TARGET_X86_64
919 if (env->hflags & HF_LMA_MASK) {
920 uint64_t pml4e_addr, pml4e;
921 int32_t sext;
923 /* test virtual address sign extension */
924 sext = (int64_t)addr >> 47;
925 if (sext != 0 && sext != -1) {
926 env->error_code = 0;
927 env->exception_index = EXCP0D_GPF;
928 return 1;
931 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
932 env->a20_mask;
933 pml4e = ldq_phys(pml4e_addr);
934 if (!(pml4e & PG_PRESENT_MASK)) {
935 error_code = 0;
936 goto do_fault;
938 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
939 error_code = PG_ERROR_RSVD_MASK;
940 goto do_fault;
942 if (!(pml4e & PG_ACCESSED_MASK)) {
943 pml4e |= PG_ACCESSED_MASK;
944 stl_phys_notdirty(pml4e_addr, pml4e);
946 ptep = pml4e ^ PG_NX_MASK;
947 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
948 env->a20_mask;
949 pdpe = ldq_phys(pdpe_addr);
950 if (!(pdpe & PG_PRESENT_MASK)) {
951 error_code = 0;
952 goto do_fault;
954 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
955 error_code = PG_ERROR_RSVD_MASK;
956 goto do_fault;
958 ptep &= pdpe ^ PG_NX_MASK;
959 if (!(pdpe & PG_ACCESSED_MASK)) {
960 pdpe |= PG_ACCESSED_MASK;
961 stl_phys_notdirty(pdpe_addr, pdpe);
963 } else
964 #endif
966 /* XXX: load them when cr3 is loaded ? */
967 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
968 env->a20_mask;
969 pdpe = ldq_phys(pdpe_addr);
970 if (!(pdpe & PG_PRESENT_MASK)) {
971 error_code = 0;
972 goto do_fault;
974 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
977 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
978 env->a20_mask;
979 pde = ldq_phys(pde_addr);
980 if (!(pde & PG_PRESENT_MASK)) {
981 error_code = 0;
982 goto do_fault;
984 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
985 error_code = PG_ERROR_RSVD_MASK;
986 goto do_fault;
988 ptep &= pde ^ PG_NX_MASK;
989 if (pde & PG_PSE_MASK) {
990 /* 2 MB page */
991 page_size = 2048 * 1024;
992 ptep ^= PG_NX_MASK;
993 if ((ptep & PG_NX_MASK) && is_write1 == 2)
994 goto do_fault_protect;
995 if (is_user) {
996 if (!(ptep & PG_USER_MASK))
997 goto do_fault_protect;
998 if (is_write && !(ptep & PG_RW_MASK))
999 goto do_fault_protect;
1000 } else {
1001 if ((env->cr[0] & CR0_WP_MASK) &&
1002 is_write && !(ptep & PG_RW_MASK))
1003 goto do_fault_protect;
1005 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1006 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1007 pde |= PG_ACCESSED_MASK;
1008 if (is_dirty)
1009 pde |= PG_DIRTY_MASK;
1010 stl_phys_notdirty(pde_addr, pde);
1012 /* align to page_size */
1013 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1014 virt_addr = addr & ~(page_size - 1);
1015 } else {
1016 /* 4 KB page */
1017 if (!(pde & PG_ACCESSED_MASK)) {
1018 pde |= PG_ACCESSED_MASK;
1019 stl_phys_notdirty(pde_addr, pde);
1021 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1022 env->a20_mask;
1023 pte = ldq_phys(pte_addr);
1024 if (!(pte & PG_PRESENT_MASK)) {
1025 error_code = 0;
1026 goto do_fault;
1028 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1029 error_code = PG_ERROR_RSVD_MASK;
1030 goto do_fault;
1032 /* combine pde and pte nx, user and rw protections */
1033 ptep &= pte ^ PG_NX_MASK;
1034 ptep ^= PG_NX_MASK;
1035 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1036 goto do_fault_protect;
1037 if (is_user) {
1038 if (!(ptep & PG_USER_MASK))
1039 goto do_fault_protect;
1040 if (is_write && !(ptep & PG_RW_MASK))
1041 goto do_fault_protect;
1042 } else {
1043 if ((env->cr[0] & CR0_WP_MASK) &&
1044 is_write && !(ptep & PG_RW_MASK))
1045 goto do_fault_protect;
1047 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1048 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1049 pte |= PG_ACCESSED_MASK;
1050 if (is_dirty)
1051 pte |= PG_DIRTY_MASK;
1052 stl_phys_notdirty(pte_addr, pte);
1054 page_size = 4096;
1055 virt_addr = addr & ~0xfff;
1056 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1058 } else {
1059 uint32_t pde;
1061 /* page directory entry */
1062 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1063 env->a20_mask;
1064 pde = ldl_phys(pde_addr);
1065 if (!(pde & PG_PRESENT_MASK)) {
1066 error_code = 0;
1067 goto do_fault;
1069 /* if PSE bit is set, then we use a 4MB page */
1070 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1071 page_size = 4096 * 1024;
1072 if (is_user) {
1073 if (!(pde & PG_USER_MASK))
1074 goto do_fault_protect;
1075 if (is_write && !(pde & PG_RW_MASK))
1076 goto do_fault_protect;
1077 } else {
1078 if ((env->cr[0] & CR0_WP_MASK) &&
1079 is_write && !(pde & PG_RW_MASK))
1080 goto do_fault_protect;
1082 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1083 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1084 pde |= PG_ACCESSED_MASK;
1085 if (is_dirty)
1086 pde |= PG_DIRTY_MASK;
1087 stl_phys_notdirty(pde_addr, pde);
1090 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1091 ptep = pte;
1092 virt_addr = addr & ~(page_size - 1);
1093 } else {
1094 if (!(pde & PG_ACCESSED_MASK)) {
1095 pde |= PG_ACCESSED_MASK;
1096 stl_phys_notdirty(pde_addr, pde);
1099 /* page directory entry */
1100 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1101 env->a20_mask;
1102 pte = ldl_phys(pte_addr);
1103 if (!(pte & PG_PRESENT_MASK)) {
1104 error_code = 0;
1105 goto do_fault;
1107 /* combine pde and pte user and rw protections */
1108 ptep = pte & pde;
1109 if (is_user) {
1110 if (!(ptep & PG_USER_MASK))
1111 goto do_fault_protect;
1112 if (is_write && !(ptep & PG_RW_MASK))
1113 goto do_fault_protect;
1114 } else {
1115 if ((env->cr[0] & CR0_WP_MASK) &&
1116 is_write && !(ptep & PG_RW_MASK))
1117 goto do_fault_protect;
1119 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1120 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1121 pte |= PG_ACCESSED_MASK;
1122 if (is_dirty)
1123 pte |= PG_DIRTY_MASK;
1124 stl_phys_notdirty(pte_addr, pte);
1126 page_size = 4096;
1127 virt_addr = addr & ~0xfff;
1130 /* the page can be put in the TLB */
1131 prot = PAGE_READ;
1132 if (!(ptep & PG_NX_MASK))
1133 prot |= PAGE_EXEC;
1134 if (pte & PG_DIRTY_MASK) {
1135 /* only set write access if already dirty... otherwise wait
1136 for dirty access */
1137 if (is_user) {
1138 if (ptep & PG_RW_MASK)
1139 prot |= PAGE_WRITE;
1140 } else {
1141 if (!(env->cr[0] & CR0_WP_MASK) ||
1142 (ptep & PG_RW_MASK))
1143 prot |= PAGE_WRITE;
1146 do_mapping:
1147 pte = pte & env->a20_mask;
1149 /* Even if 4MB pages, we map only one 4KB page in the cache to
1150 avoid filling it too fast */
1151 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1152 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1153 vaddr = virt_addr + page_offset;
1155 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1156 return ret;
1157 do_fault_protect:
1158 error_code = PG_ERROR_P_MASK;
1159 do_fault:
1160 error_code |= (is_write << PG_ERROR_W_BIT);
1161 if (is_user)
1162 error_code |= PG_ERROR_U_MASK;
1163 if (is_write1 == 2 &&
1164 (env->efer & MSR_EFER_NXE) &&
1165 (env->cr[4] & CR4_PAE_MASK))
1166 error_code |= PG_ERROR_I_D_MASK;
1167 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1168 /* cr2 is not modified in case of exceptions */
1169 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1170 addr);
1171 } else {
1172 env->cr[2] = addr;
1174 env->error_code = error_code;
1175 env->exception_index = EXCP0E_PAGE;
1176 return 1;
1179 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1181 target_ulong pde_addr, pte_addr;
1182 uint64_t pte;
1183 target_phys_addr_t paddr;
1184 uint32_t page_offset;
1185 int page_size;
1187 if (env->cr[4] & CR4_PAE_MASK) {
1188 target_ulong pdpe_addr;
1189 uint64_t pde, pdpe;
1191 #ifdef TARGET_X86_64
1192 if (env->hflags & HF_LMA_MASK) {
1193 uint64_t pml4e_addr, pml4e;
1194 int32_t sext;
1196 /* test virtual address sign extension */
1197 sext = (int64_t)addr >> 47;
1198 if (sext != 0 && sext != -1)
1199 return -1;
1201 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1202 env->a20_mask;
1203 pml4e = ldq_phys(pml4e_addr);
1204 if (!(pml4e & PG_PRESENT_MASK))
1205 return -1;
1207 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1208 env->a20_mask;
1209 pdpe = ldq_phys(pdpe_addr);
1210 if (!(pdpe & PG_PRESENT_MASK))
1211 return -1;
1212 } else
1213 #endif
1215 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1216 env->a20_mask;
1217 pdpe = ldq_phys(pdpe_addr);
1218 if (!(pdpe & PG_PRESENT_MASK))
1219 return -1;
1222 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1223 env->a20_mask;
1224 pde = ldq_phys(pde_addr);
1225 if (!(pde & PG_PRESENT_MASK)) {
1226 return -1;
1228 if (pde & PG_PSE_MASK) {
1229 /* 2 MB page */
1230 page_size = 2048 * 1024;
1231 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1232 } else {
1233 /* 4 KB page */
1234 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1235 env->a20_mask;
1236 page_size = 4096;
1237 pte = ldq_phys(pte_addr);
1239 if (!(pte & PG_PRESENT_MASK))
1240 return -1;
1241 } else {
1242 uint32_t pde;
1244 if (!(env->cr[0] & CR0_PG_MASK)) {
1245 pte = addr;
1246 page_size = 4096;
1247 } else {
1248 /* page directory entry */
1249 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1250 pde = ldl_phys(pde_addr);
1251 if (!(pde & PG_PRESENT_MASK))
1252 return -1;
1253 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1254 pte = pde & ~0x003ff000; /* align to 4MB */
1255 page_size = 4096 * 1024;
1256 } else {
1257 /* page directory entry */
1258 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1259 pte = ldl_phys(pte_addr);
1260 if (!(pte & PG_PRESENT_MASK))
1261 return -1;
1262 page_size = 4096;
1265 pte = pte & env->a20_mask;
1268 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1269 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1270 return paddr;
1273 void hw_breakpoint_insert(CPUState *env, int index)
1275 int type, err = 0;
1277 switch (hw_breakpoint_type(env->dr[7], index)) {
1278 case 0:
1279 if (hw_breakpoint_enabled(env->dr[7], index))
1280 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1281 &env->cpu_breakpoint[index]);
1282 break;
1283 case 1:
1284 type = BP_CPU | BP_MEM_WRITE;
1285 goto insert_wp;
1286 case 2:
1287 /* No support for I/O watchpoints yet */
1288 break;
1289 case 3:
1290 type = BP_CPU | BP_MEM_ACCESS;
1291 insert_wp:
1292 err = cpu_watchpoint_insert(env, env->dr[index],
1293 hw_breakpoint_len(env->dr[7], index),
1294 type, &env->cpu_watchpoint[index]);
1295 break;
1297 if (err)
1298 env->cpu_breakpoint[index] = NULL;
1301 void hw_breakpoint_remove(CPUState *env, int index)
1303 if (!env->cpu_breakpoint[index])
1304 return;
1305 switch (hw_breakpoint_type(env->dr[7], index)) {
1306 case 0:
1307 if (hw_breakpoint_enabled(env->dr[7], index))
1308 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1309 break;
1310 case 1:
1311 case 3:
1312 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1313 break;
1314 case 2:
1315 /* No support for I/O watchpoints yet */
1316 break;
1320 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1322 target_ulong dr6;
1323 int reg, type;
1324 int hit_enabled = 0;
1326 dr6 = env->dr[6] & ~0xf;
1327 for (reg = 0; reg < 4; reg++) {
1328 type = hw_breakpoint_type(env->dr[7], reg);
1329 if ((type == 0 && env->dr[reg] == env->eip) ||
1330 ((type & 1) && env->cpu_watchpoint[reg] &&
1331 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1332 dr6 |= 1 << reg;
1333 if (hw_breakpoint_enabled(env->dr[7], reg))
1334 hit_enabled = 1;
1337 if (hit_enabled || force_dr6_update)
1338 env->dr[6] = dr6;
1339 return hit_enabled;
1342 static CPUDebugExcpHandler *prev_debug_excp_handler;
1344 void raise_exception(int exception_index);
1346 static void breakpoint_handler(CPUState *env)
1348 CPUBreakpoint *bp;
1350 if (env->watchpoint_hit) {
1351 if (env->watchpoint_hit->flags & BP_CPU) {
1352 env->watchpoint_hit = NULL;
1353 if (check_hw_breakpoints(env, 0))
1354 raise_exception(EXCP01_DB);
1355 else
1356 cpu_resume_from_signal(env, NULL);
1358 } else {
1359 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1360 if (bp->pc == env->eip) {
1361 if (bp->flags & BP_CPU) {
1362 check_hw_breakpoints(env, 1);
1363 raise_exception(EXCP01_DB);
1365 break;
1368 if (prev_debug_excp_handler)
1369 prev_debug_excp_handler(env);
1371 #endif /* !CONFIG_USER_ONLY */
1373 static void host_cpuid(uint32_t function, uint32_t *eax, uint32_t *ebx,
1374 uint32_t *ecx, uint32_t *edx)
1376 #if defined(CONFIG_KVM)
1377 uint32_t vec[4];
1379 #ifdef __x86_64__
1380 asm volatile("cpuid"
1381 : "=a"(vec[0]), "=b"(vec[1]),
1382 "=c"(vec[2]), "=d"(vec[3])
1383 : "0"(function) : "cc");
1384 #else
1385 asm volatile("pusha \n\t"
1386 "cpuid \n\t"
1387 "mov %%eax, 0(%1) \n\t"
1388 "mov %%ebx, 4(%1) \n\t"
1389 "mov %%ecx, 8(%1) \n\t"
1390 "mov %%edx, 12(%1) \n\t"
1391 "popa"
1392 : : "a"(function), "S"(vec)
1393 : "memory", "cc");
1394 #endif
1396 if (eax)
1397 *eax = vec[0];
1398 if (ebx)
1399 *ebx = vec[1];
1400 if (ecx)
1401 *ecx = vec[2];
1402 if (edx)
1403 *edx = vec[3];
1404 #endif
1407 void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
1408 uint32_t *eax, uint32_t *ebx,
1409 uint32_t *ecx, uint32_t *edx)
1411 /* test if maximum index reached */
1412 if (index & 0x80000000) {
1413 if (index > env->cpuid_xlevel)
1414 index = env->cpuid_level;
1415 } else {
1416 if (index > env->cpuid_level)
1417 index = env->cpuid_level;
1420 switch(index) {
1421 case 0:
1422 *eax = env->cpuid_level;
1423 *ebx = env->cpuid_vendor1;
1424 *edx = env->cpuid_vendor2;
1425 *ecx = env->cpuid_vendor3;
1427 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1428 * isn't supported in compatibility mode on Intel. so advertise the
1429 * actuall cpu, and say goodbye to migration between different vendors
1430 * is you use compatibility mode. */
1431 if (kvm_enabled())
1432 host_cpuid(0, NULL, ebx, ecx, edx);
1433 break;
1434 case 1:
1435 *eax = env->cpuid_version;
1436 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1437 *ecx = env->cpuid_ext_features;
1438 *edx = env->cpuid_features;
1440 /* "Hypervisor present" bit required for Microsoft SVVP */
1441 if (kvm_enabled())
1442 *ecx |= (1 << 31);
1443 break;
1444 case 2:
1445 /* cache info: needed for Pentium Pro compatibility */
1446 *eax = 1;
1447 *ebx = 0;
1448 *ecx = 0;
1449 *edx = 0x2c307d;
1450 break;
1451 case 4:
1452 /* cache info: needed for Core compatibility */
1453 switch (*ecx) {
1454 case 0: /* L1 dcache info */
1455 *eax = 0x0000121;
1456 *ebx = 0x1c0003f;
1457 *ecx = 0x000003f;
1458 *edx = 0x0000001;
1459 break;
1460 case 1: /* L1 icache info */
1461 *eax = 0x0000122;
1462 *ebx = 0x1c0003f;
1463 *ecx = 0x000003f;
1464 *edx = 0x0000001;
1465 break;
1466 case 2: /* L2 cache info */
1467 *eax = 0x0000143;
1468 *ebx = 0x3c0003f;
1469 *ecx = 0x0000fff;
1470 *edx = 0x0000001;
1471 break;
1472 default: /* end of info */
1473 *eax = 0;
1474 *ebx = 0;
1475 *ecx = 0;
1476 *edx = 0;
1477 break;
1480 break;
1481 case 5:
1482 /* mwait info: needed for Core compatibility */
1483 *eax = 0; /* Smallest monitor-line size in bytes */
1484 *ebx = 0; /* Largest monitor-line size in bytes */
1485 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1486 *edx = 0;
1487 break;
1488 case 6:
1489 /* Thermal and Power Leaf */
1490 *eax = 0;
1491 *ebx = 0;
1492 *ecx = 0;
1493 *edx = 0;
1494 break;
1495 case 9:
1496 /* Direct Cache Access Information Leaf */
1497 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1498 *ebx = 0;
1499 *ecx = 0;
1500 *edx = 0;
1501 break;
1502 case 0xA:
1503 /* Architectural Performance Monitoring Leaf */
1504 *eax = 0;
1505 *ebx = 0;
1506 *ecx = 0;
1507 *edx = 0;
1508 break;
1509 case 0x80000000:
1510 *eax = env->cpuid_xlevel;
1511 *ebx = env->cpuid_vendor1;
1512 *edx = env->cpuid_vendor2;
1513 *ecx = env->cpuid_vendor3;
1514 break;
1515 case 0x80000001:
1516 *eax = env->cpuid_features;
1517 *ebx = 0;
1518 *ecx = env->cpuid_ext3_features;
1519 *edx = env->cpuid_ext2_features;
1521 if (kvm_enabled()) {
1522 uint32_t h_eax, h_edx;
1524 host_cpuid(0x80000001, &h_eax, NULL, NULL, &h_edx);
1526 /* disable CPU features that the host does not support */
1528 /* long mode */
1529 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1530 *edx &= ~0x20000000;
1531 /* syscall */
1532 if ((h_edx & 0x00000800) == 0)
1533 *edx &= ~0x00000800;
1534 /* nx */
1535 if ((h_edx & 0x00100000) == 0)
1536 *edx &= ~0x00100000;
1538 /* disable CPU features that KVM cannot support */
1540 /* svm */
1541 *ecx &= ~4UL;
1542 /* 3dnow */
1543 *edx &= ~0xc0000000;
1545 break;
1546 case 0x80000002:
1547 case 0x80000003:
1548 case 0x80000004:
1549 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1550 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1551 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1552 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1553 break;
1554 case 0x80000005:
1555 /* cache info (L1 cache) */
1556 *eax = 0x01ff01ff;
1557 *ebx = 0x01ff01ff;
1558 *ecx = 0x40020140;
1559 *edx = 0x40020140;
1560 break;
1561 case 0x80000006:
1562 /* cache info (L2 cache) */
1563 *eax = 0;
1564 *ebx = 0x42004200;
1565 *ecx = 0x02008140;
1566 *edx = 0;
1567 break;
1568 case 0x80000008:
1569 /* virtual & phys address size in low 2 bytes. */
1570 /* XXX: This value must match the one used in the MMU code. */
1571 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1572 /* 64 bit processor */
1573 #if defined(USE_KQEMU)
1574 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1575 #else
1576 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1577 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1578 #endif
1579 } else {
1580 #if defined(USE_KQEMU)
1581 *eax = 0x00000020; /* 32 bits physical */
1582 #else
1583 if (env->cpuid_features & CPUID_PSE36)
1584 *eax = 0x00000024; /* 36 bits physical */
1585 else
1586 *eax = 0x00000020; /* 32 bits physical */
1587 #endif
1589 *ebx = 0;
1590 *ecx = 0;
1591 *edx = 0;
1592 break;
1593 case 0x8000000A:
1594 *eax = 0x00000001; /* SVM Revision */
1595 *ebx = 0x00000010; /* nr of ASIDs */
1596 *ecx = 0;
1597 *edx = 0; /* optional features */
1598 break;
1599 default:
1600 /* reserved values: zero */
1601 *eax = 0;
1602 *ebx = 0;
1603 *ecx = 0;
1604 *edx = 0;
1605 break;
1609 CPUX86State *cpu_x86_init(const char *cpu_model)
1611 CPUX86State *env;
1612 static int inited;
1614 env = qemu_mallocz(sizeof(CPUX86State));
1615 if (!env)
1616 return NULL;
1617 cpu_exec_init(env);
1618 env->cpu_model_str = cpu_model;
1620 /* init various static tables */
1621 if (!inited) {
1622 inited = 1;
1623 optimize_flags_init();
1624 #ifndef CONFIG_USER_ONLY
1625 prev_debug_excp_handler =
1626 cpu_set_debug_excp_handler(breakpoint_handler);
1627 #endif
1629 if (cpu_x86_register(env, cpu_model) < 0) {
1630 cpu_x86_close(env);
1631 return NULL;
1633 cpu_reset(env);
1634 #ifdef USE_KQEMU
1635 kqemu_init(env);
1636 #endif
1637 if (kvm_enabled())
1638 kvm_init_vcpu(env);
1639 return env;