Device-assignment: free device if hotplug fails
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob4124b72658da3b359bc4f6af01b396e252328ed6
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
32 #include "kvm.h"
33 #include "helper.h"
35 #include "qemu-kvm.h"
37 //#define DEBUG_MMU
39 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
40 uint32_t *ext_features,
41 uint32_t *ext2_features,
42 uint32_t *ext3_features)
44 int i;
45 /* feature flags taken from "Intel Processor Identification and the CPUID
46 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
47 * about feature names, the Linux name is used. */
48 static const char *feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
52 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
54 static const char *ext_feature_name[] = {
55 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
56 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
57 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
60 static const char *ext2_feature_name[] = {
61 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
62 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
63 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
64 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
66 static const char *ext3_feature_name[] = {
67 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
68 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
69 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
70 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
73 for ( i = 0 ; i < 32 ; i++ )
74 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
75 *features |= 1 << i;
76 return;
78 for ( i = 0 ; i < 32 ; i++ )
79 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
80 *ext_features |= 1 << i;
81 return;
83 for ( i = 0 ; i < 32 ; i++ )
84 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
85 *ext2_features |= 1 << i;
86 return;
88 for ( i = 0 ; i < 32 ; i++ )
89 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
90 *ext3_features |= 1 << i;
91 return;
93 fprintf(stderr, "CPU feature %s not found\n", flagname);
96 extern const char *cpu_vendor_string;
98 typedef struct x86_def_t {
99 const char *name;
100 uint32_t level;
101 uint32_t vendor1, vendor2, vendor3;
102 int family;
103 int model;
104 int stepping;
105 uint32_t features, ext_features, ext2_features, ext3_features;
106 uint32_t xlevel;
107 char model_id[48];
108 } x86_def_t;
110 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
111 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
112 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
113 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
114 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
115 CPUID_PSE36 | CPUID_FXSR)
116 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
117 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
118 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
119 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
120 CPUID_PAE | CPUID_SEP | CPUID_APIC)
121 static x86_def_t x86_defs[] = {
122 #ifdef TARGET_X86_64
124 .name = "qemu64",
125 .level = 2,
126 .vendor1 = CPUID_VENDOR_AMD_1,
127 .vendor2 = CPUID_VENDOR_AMD_2,
128 .vendor3 = CPUID_VENDOR_AMD_3,
129 .family = 6,
130 .model = 2,
131 .stepping = 3,
132 .features = PPRO_FEATURES |
133 /* these features are needed for Win64 and aren't fully implemented */
134 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
135 /* this feature is needed for Solaris and isn't fully implemented */
136 CPUID_PSE36,
137 .ext_features = CPUID_EXT_SSE3,
138 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
139 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
140 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
141 .ext3_features = CPUID_EXT3_SVM,
142 .xlevel = 0x8000000A,
143 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
146 .name = "core2duo",
147 .level = 10,
148 .family = 6,
149 .model = 15,
150 .stepping = 11,
151 /* The original CPU also implements these features:
152 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
153 CPUID_TM, CPUID_PBE */
154 .features = PPRO_FEATURES |
155 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
156 CPUID_PSE36,
157 /* The original CPU also implements these ext features:
158 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
159 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
160 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
161 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
162 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
163 .xlevel = 0x80000008,
164 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
166 #endif
168 .name = "qemu32",
169 .level = 2,
170 .family = 6,
171 .model = 3,
172 .stepping = 3,
173 .features = PPRO_FEATURES,
174 .ext_features = CPUID_EXT_SSE3,
175 .xlevel = 0,
176 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
179 .name = "coreduo",
180 .level = 10,
181 .family = 6,
182 .model = 14,
183 .stepping = 8,
184 /* The original CPU also implements these features:
185 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
186 CPUID_TM, CPUID_PBE */
187 .features = PPRO_FEATURES | CPUID_VME |
188 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
189 /* The original CPU also implements these ext features:
190 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
191 CPUID_EXT_PDCM */
192 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
193 .ext2_features = CPUID_EXT2_NX,
194 .xlevel = 0x80000008,
195 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
198 .name = "486",
199 .level = 0,
200 .family = 4,
201 .model = 0,
202 .stepping = 0,
203 .features = I486_FEATURES,
204 .xlevel = 0,
207 .name = "pentium",
208 .level = 1,
209 .family = 5,
210 .model = 4,
211 .stepping = 3,
212 .features = PENTIUM_FEATURES,
213 .xlevel = 0,
216 .name = "pentium2",
217 .level = 2,
218 .family = 6,
219 .model = 5,
220 .stepping = 2,
221 .features = PENTIUM2_FEATURES,
222 .xlevel = 0,
225 .name = "pentium3",
226 .level = 2,
227 .family = 6,
228 .model = 7,
229 .stepping = 3,
230 .features = PENTIUM3_FEATURES,
231 .xlevel = 0,
234 .name = "athlon",
235 .level = 2,
236 .vendor1 = 0x68747541, /* "Auth" */
237 .vendor2 = 0x69746e65, /* "enti" */
238 .vendor3 = 0x444d4163, /* "cAMD" */
239 .family = 6,
240 .model = 2,
241 .stepping = 3,
242 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
243 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
244 .xlevel = 0x80000008,
245 /* XXX: put another string ? */
246 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
249 .name = "n270",
250 /* original is on level 10 */
251 .level = 5,
252 .family = 6,
253 .model = 28,
254 .stepping = 2,
255 .features = PPRO_FEATURES |
256 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
257 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
258 * CPUID_HT | CPUID_TM | CPUID_PBE */
259 /* Some CPUs got no CPUID_SEP */
260 .ext_features = CPUID_EXT_MONITOR |
261 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
262 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
263 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
264 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
265 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
266 .xlevel = 0x8000000A,
267 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
271 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
273 unsigned int i;
274 x86_def_t *def;
276 char *s = strdup(cpu_model);
277 char *featurestr, *name = strtok(s, ",");
278 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
279 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
280 int family = -1, model = -1, stepping = -1;
282 def = NULL;
283 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
284 if (strcmp(name, x86_defs[i].name) == 0) {
285 def = &x86_defs[i];
286 break;
289 if (!def)
290 goto error;
291 memcpy(x86_cpu_def, def, sizeof(*def));
293 featurestr = strtok(NULL, ",");
295 while (featurestr) {
296 char *val;
297 if (featurestr[0] == '+') {
298 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
299 } else if (featurestr[0] == '-') {
300 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
301 } else if ((val = strchr(featurestr, '='))) {
302 *val = 0; val++;
303 if (!strcmp(featurestr, "family")) {
304 char *err;
305 family = strtol(val, &err, 10);
306 if (!*val || *err || family < 0) {
307 fprintf(stderr, "bad numerical value %s\n", val);
308 goto error;
310 x86_cpu_def->family = family;
311 } else if (!strcmp(featurestr, "model")) {
312 char *err;
313 model = strtol(val, &err, 10);
314 if (!*val || *err || model < 0 || model > 0xff) {
315 fprintf(stderr, "bad numerical value %s\n", val);
316 goto error;
318 x86_cpu_def->model = model;
319 } else if (!strcmp(featurestr, "stepping")) {
320 char *err;
321 stepping = strtol(val, &err, 10);
322 if (!*val || *err || stepping < 0 || stepping > 0xf) {
323 fprintf(stderr, "bad numerical value %s\n", val);
324 goto error;
326 x86_cpu_def->stepping = stepping;
327 } else if (!strcmp(featurestr, "vendor")) {
328 if (strlen(val) != 12) {
329 fprintf(stderr, "vendor string must be 12 chars long\n");
330 goto error;
332 x86_cpu_def->vendor1 = 0;
333 x86_cpu_def->vendor2 = 0;
334 x86_cpu_def->vendor3 = 0;
335 for(i = 0; i < 4; i++) {
336 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
337 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
338 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
340 } else if (!strcmp(featurestr, "model_id")) {
341 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
342 val);
343 } else {
344 fprintf(stderr, "unrecognized feature %s\n", featurestr);
345 goto error;
347 } else {
348 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
349 goto error;
351 featurestr = strtok(NULL, ",");
353 x86_cpu_def->features |= plus_features;
354 x86_cpu_def->ext_features |= plus_ext_features;
355 x86_cpu_def->ext2_features |= plus_ext2_features;
356 x86_cpu_def->ext3_features |= plus_ext3_features;
357 x86_cpu_def->features &= ~minus_features;
358 x86_cpu_def->ext_features &= ~minus_ext_features;
359 x86_cpu_def->ext2_features &= ~minus_ext2_features;
360 x86_cpu_def->ext3_features &= ~minus_ext3_features;
361 free(s);
362 return 0;
364 error:
365 free(s);
366 return -1;
369 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
371 unsigned int i;
373 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
374 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
377 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
379 x86_def_t def1, *def = &def1;
381 if (cpu_x86_find_by_name(def, cpu_model) < 0)
382 return -1;
383 if (def->vendor1) {
384 env->cpuid_vendor1 = def->vendor1;
385 env->cpuid_vendor2 = def->vendor2;
386 env->cpuid_vendor3 = def->vendor3;
387 } else {
388 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
389 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
390 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
392 env->cpuid_level = def->level;
393 if (def->family > 0x0f)
394 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
395 else
396 env->cpuid_version = def->family << 8;
397 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
398 env->cpuid_version |= def->stepping;
399 env->cpuid_features = def->features;
400 env->pat = 0x0007040600070406ULL;
401 env->cpuid_ext_features = def->ext_features;
402 env->cpuid_ext2_features = def->ext2_features;
403 env->cpuid_xlevel = def->xlevel;
404 env->cpuid_ext3_features = def->ext3_features;
406 const char *model_id = def->model_id;
407 int c, len, i;
409 if (cpu_vendor_string != NULL)
410 model_id = cpu_vendor_string;
411 if (!model_id)
412 model_id = "";
413 len = strlen(model_id);
414 for(i = 0; i < 48; i++) {
415 if (i >= len)
416 c = '\0';
417 else
418 c = (uint8_t)model_id[i];
419 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
422 return 0;
425 /* NOTE: must be called outside the CPU execute loop */
426 void cpu_reset(CPUX86State *env)
428 int i;
430 memset(env, 0, offsetof(CPUX86State, breakpoints));
432 tlb_flush(env, 1);
434 env->old_exception = -1;
436 /* init to reset state */
438 #ifdef CONFIG_SOFTMMU
439 env->hflags |= HF_SOFTMMU_MASK;
440 #endif
441 env->hflags2 |= HF2_GIF_MASK;
443 cpu_x86_update_cr0(env, 0x60000010);
444 env->a20_mask = ~0x0;
445 env->smbase = 0x30000;
447 env->idt.limit = 0xffff;
448 env->gdt.limit = 0xffff;
449 env->ldt.limit = 0xffff;
450 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
451 env->tr.limit = 0xffff;
452 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
454 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
455 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
456 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
457 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
458 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
459 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
460 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
461 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
462 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
463 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
464 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
465 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
467 env->eip = 0xfff0;
468 env->regs[R_EDX] = env->cpuid_version;
470 env->eflags = 0x2;
472 /* FPU init */
473 for(i = 0;i < 8; i++)
474 env->fptags[i] = 1;
475 env->fpuc = 0x37f;
477 env->mxcsr = 0x1f80;
479 memset(env->dr, 0, sizeof(env->dr));
480 env->dr[6] = DR6_FIXED_1;
481 env->dr[7] = DR7_FIXED_1;
482 cpu_breakpoint_remove_all(env, BP_CPU);
483 cpu_watchpoint_remove_all(env, BP_CPU);
486 void cpu_x86_close(CPUX86State *env)
488 qemu_free(env);
491 /***********************************************************/
492 /* x86 debug */
494 static const char *cc_op_str[] = {
495 "DYNAMIC",
496 "EFLAGS",
498 "MULB",
499 "MULW",
500 "MULL",
501 "MULQ",
503 "ADDB",
504 "ADDW",
505 "ADDL",
506 "ADDQ",
508 "ADCB",
509 "ADCW",
510 "ADCL",
511 "ADCQ",
513 "SUBB",
514 "SUBW",
515 "SUBL",
516 "SUBQ",
518 "SBBB",
519 "SBBW",
520 "SBBL",
521 "SBBQ",
523 "LOGICB",
524 "LOGICW",
525 "LOGICL",
526 "LOGICQ",
528 "INCB",
529 "INCW",
530 "INCL",
531 "INCQ",
533 "DECB",
534 "DECW",
535 "DECL",
536 "DECQ",
538 "SHLB",
539 "SHLW",
540 "SHLL",
541 "SHLQ",
543 "SARB",
544 "SARW",
545 "SARL",
546 "SARQ",
549 void cpu_dump_state(CPUState *env, FILE *f,
550 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
551 int flags)
553 int eflags, i, nb;
554 char cc_op_name[32];
555 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
557 eflags = env->eflags;
558 #ifdef TARGET_X86_64
559 if (env->hflags & HF_CS64_MASK) {
560 cpu_fprintf(f,
561 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
562 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
563 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
564 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
565 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
566 env->regs[R_EAX],
567 env->regs[R_EBX],
568 env->regs[R_ECX],
569 env->regs[R_EDX],
570 env->regs[R_ESI],
571 env->regs[R_EDI],
572 env->regs[R_EBP],
573 env->regs[R_ESP],
574 env->regs[8],
575 env->regs[9],
576 env->regs[10],
577 env->regs[11],
578 env->regs[12],
579 env->regs[13],
580 env->regs[14],
581 env->regs[15],
582 env->eip, eflags,
583 eflags & DF_MASK ? 'D' : '-',
584 eflags & CC_O ? 'O' : '-',
585 eflags & CC_S ? 'S' : '-',
586 eflags & CC_Z ? 'Z' : '-',
587 eflags & CC_A ? 'A' : '-',
588 eflags & CC_P ? 'P' : '-',
589 eflags & CC_C ? 'C' : '-',
590 env->hflags & HF_CPL_MASK,
591 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
592 (int)(env->a20_mask >> 20) & 1,
593 (env->hflags >> HF_SMM_SHIFT) & 1,
594 env->halted);
595 } else
596 #endif
598 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
599 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
600 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
601 (uint32_t)env->regs[R_EAX],
602 (uint32_t)env->regs[R_EBX],
603 (uint32_t)env->regs[R_ECX],
604 (uint32_t)env->regs[R_EDX],
605 (uint32_t)env->regs[R_ESI],
606 (uint32_t)env->regs[R_EDI],
607 (uint32_t)env->regs[R_EBP],
608 (uint32_t)env->regs[R_ESP],
609 (uint32_t)env->eip, eflags,
610 eflags & DF_MASK ? 'D' : '-',
611 eflags & CC_O ? 'O' : '-',
612 eflags & CC_S ? 'S' : '-',
613 eflags & CC_Z ? 'Z' : '-',
614 eflags & CC_A ? 'A' : '-',
615 eflags & CC_P ? 'P' : '-',
616 eflags & CC_C ? 'C' : '-',
617 env->hflags & HF_CPL_MASK,
618 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
619 (int)(env->a20_mask >> 20) & 1,
620 (env->hflags >> HF_SMM_SHIFT) & 1,
621 env->halted);
624 #ifdef TARGET_X86_64
625 if (env->hflags & HF_LMA_MASK) {
626 for(i = 0; i < 6; i++) {
627 SegmentCache *sc = &env->segs[i];
628 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
629 seg_name[i],
630 sc->selector,
631 sc->base,
632 sc->limit,
633 sc->flags);
635 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
636 env->ldt.selector,
637 env->ldt.base,
638 env->ldt.limit,
639 env->ldt.flags);
640 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
641 env->tr.selector,
642 env->tr.base,
643 env->tr.limit,
644 env->tr.flags);
645 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
646 env->gdt.base, env->gdt.limit);
647 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
648 env->idt.base, env->idt.limit);
649 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
650 (uint32_t)env->cr[0],
651 env->cr[2],
652 env->cr[3],
653 (uint32_t)env->cr[4]);
654 for(i = 0; i < 4; i++)
655 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
656 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
657 env->dr[6], env->cr[7]);
658 } else
659 #endif
661 for(i = 0; i < 6; i++) {
662 SegmentCache *sc = &env->segs[i];
663 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
664 seg_name[i],
665 sc->selector,
666 (uint32_t)sc->base,
667 sc->limit,
668 sc->flags);
670 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
671 env->ldt.selector,
672 (uint32_t)env->ldt.base,
673 env->ldt.limit,
674 env->ldt.flags);
675 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
676 env->tr.selector,
677 (uint32_t)env->tr.base,
678 env->tr.limit,
679 env->tr.flags);
680 cpu_fprintf(f, "GDT= %08x %08x\n",
681 (uint32_t)env->gdt.base, env->gdt.limit);
682 cpu_fprintf(f, "IDT= %08x %08x\n",
683 (uint32_t)env->idt.base, env->idt.limit);
684 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
685 (uint32_t)env->cr[0],
686 (uint32_t)env->cr[2],
687 (uint32_t)env->cr[3],
688 (uint32_t)env->cr[4]);
689 for(i = 0; i < 4; i++)
690 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
691 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->cr[7]);
693 if (flags & X86_DUMP_CCOP) {
694 if ((unsigned)env->cc_op < CC_OP_NB)
695 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
696 else
697 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
698 #ifdef TARGET_X86_64
699 if (env->hflags & HF_CS64_MASK) {
700 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
701 env->cc_src, env->cc_dst,
702 cc_op_name);
703 } else
704 #endif
706 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
707 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
708 cc_op_name);
711 if (flags & X86_DUMP_FPU) {
712 int fptag;
713 fptag = 0;
714 for(i = 0; i < 8; i++) {
715 fptag |= ((!env->fptags[i]) << i);
717 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
718 env->fpuc,
719 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
720 env->fpstt,
721 fptag,
722 env->mxcsr);
723 for(i=0;i<8;i++) {
724 #if defined(USE_X86LDOUBLE)
725 union {
726 long double d;
727 struct {
728 uint64_t lower;
729 uint16_t upper;
730 } l;
731 } tmp;
732 tmp.d = env->fpregs[i].d;
733 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
734 i, tmp.l.lower, tmp.l.upper);
735 #else
736 cpu_fprintf(f, "FPR%d=%016" PRIx64,
737 i, env->fpregs[i].mmx.q);
738 #endif
739 if ((i & 1) == 1)
740 cpu_fprintf(f, "\n");
741 else
742 cpu_fprintf(f, " ");
744 if (env->hflags & HF_CS64_MASK)
745 nb = 16;
746 else
747 nb = 8;
748 for(i=0;i<nb;i++) {
749 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
751 env->xmm_regs[i].XMM_L(3),
752 env->xmm_regs[i].XMM_L(2),
753 env->xmm_regs[i].XMM_L(1),
754 env->xmm_regs[i].XMM_L(0));
755 if ((i & 1) == 1)
756 cpu_fprintf(f, "\n");
757 else
758 cpu_fprintf(f, " ");
763 /***********************************************************/
764 /* x86 mmu */
765 /* XXX: add PGE support */
767 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
769 a20_state = (a20_state != 0);
770 if (a20_state != ((env->a20_mask >> 20) & 1)) {
771 #if defined(DEBUG_MMU)
772 printf("A20 update: a20=%d\n", a20_state);
773 #endif
774 /* if the cpu is currently executing code, we must unlink it and
775 all the potentially executing TB */
776 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
778 /* when a20 is changed, all the MMU mappings are invalid, so
779 we must flush everything */
780 tlb_flush(env, 1);
781 env->a20_mask = (~0x100000) | (a20_state << 20);
785 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
787 int pe_state;
789 #if defined(DEBUG_MMU)
790 printf("CR0 update: CR0=0x%08x\n", new_cr0);
791 #endif
792 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
793 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
794 tlb_flush(env, 1);
797 #ifdef TARGET_X86_64
798 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
799 (env->efer & MSR_EFER_LME)) {
800 /* enter in long mode */
801 /* XXX: generate an exception */
802 if (!(env->cr[4] & CR4_PAE_MASK))
803 return;
804 env->efer |= MSR_EFER_LMA;
805 env->hflags |= HF_LMA_MASK;
806 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
807 (env->efer & MSR_EFER_LMA)) {
808 /* exit long mode */
809 env->efer &= ~MSR_EFER_LMA;
810 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
811 env->eip &= 0xffffffff;
813 #endif
814 env->cr[0] = new_cr0 | CR0_ET_MASK;
816 /* update PE flag in hidden flags */
817 pe_state = (env->cr[0] & CR0_PE_MASK);
818 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
819 /* ensure that ADDSEG is always set in real mode */
820 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
821 /* update FPU flags */
822 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
823 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
826 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
827 the PDPT */
828 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
830 env->cr[3] = new_cr3;
831 if (env->cr[0] & CR0_PG_MASK) {
832 #if defined(DEBUG_MMU)
833 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
834 #endif
835 tlb_flush(env, 0);
839 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
841 #if defined(DEBUG_MMU)
842 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
843 #endif
844 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
845 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
846 tlb_flush(env, 1);
848 /* SSE handling */
849 if (!(env->cpuid_features & CPUID_SSE))
850 new_cr4 &= ~CR4_OSFXSR_MASK;
851 if (new_cr4 & CR4_OSFXSR_MASK)
852 env->hflags |= HF_OSFXSR_MASK;
853 else
854 env->hflags &= ~HF_OSFXSR_MASK;
856 env->cr[4] = new_cr4;
859 /* XXX: also flush 4MB pages */
860 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
862 tlb_flush_page(env, addr);
865 #if defined(CONFIG_USER_ONLY)
867 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
868 int is_write, int mmu_idx, int is_softmmu)
870 /* user mode only emulation */
871 is_write &= 1;
872 env->cr[2] = addr;
873 env->error_code = (is_write << PG_ERROR_W_BIT);
874 env->error_code |= PG_ERROR_U_MASK;
875 env->exception_index = EXCP0E_PAGE;
876 return 1;
879 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
881 return addr;
884 #else
886 /* XXX: This value should match the one returned by CPUID
887 * and in exec.c */
888 #if defined(USE_KQEMU)
889 #define PHYS_ADDR_MASK 0xfffff000LL
890 #else
891 # if defined(TARGET_X86_64)
892 # define PHYS_ADDR_MASK 0xfffffff000LL
893 # else
894 # define PHYS_ADDR_MASK 0xffffff000LL
895 # endif
896 #endif
898 /* return value:
899 -1 = cannot handle fault
900 0 = nothing more to do
901 1 = generate PF fault
902 2 = soft MMU activation required for this block
904 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
905 int is_write1, int mmu_idx, int is_softmmu)
907 uint64_t ptep, pte;
908 target_ulong pde_addr, pte_addr;
909 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
910 target_phys_addr_t paddr;
911 uint32_t page_offset;
912 target_ulong vaddr, virt_addr;
914 is_user = mmu_idx == MMU_USER_IDX;
915 #if defined(DEBUG_MMU)
916 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
917 addr, is_write1, is_user, env->eip);
918 #endif
919 is_write = is_write1 & 1;
921 if (!(env->cr[0] & CR0_PG_MASK)) {
922 pte = addr;
923 virt_addr = addr & TARGET_PAGE_MASK;
924 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
925 page_size = 4096;
926 goto do_mapping;
929 if (env->cr[4] & CR4_PAE_MASK) {
930 uint64_t pde, pdpe;
931 target_ulong pdpe_addr;
933 #ifdef TARGET_X86_64
934 if (env->hflags & HF_LMA_MASK) {
935 uint64_t pml4e_addr, pml4e;
936 int32_t sext;
938 /* test virtual address sign extension */
939 sext = (int64_t)addr >> 47;
940 if (sext != 0 && sext != -1) {
941 env->error_code = 0;
942 env->exception_index = EXCP0D_GPF;
943 return 1;
946 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
947 env->a20_mask;
948 pml4e = ldq_phys(pml4e_addr);
949 if (!(pml4e & PG_PRESENT_MASK)) {
950 error_code = 0;
951 goto do_fault;
953 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
954 error_code = PG_ERROR_RSVD_MASK;
955 goto do_fault;
957 if (!(pml4e & PG_ACCESSED_MASK)) {
958 pml4e |= PG_ACCESSED_MASK;
959 stl_phys_notdirty(pml4e_addr, pml4e);
961 ptep = pml4e ^ PG_NX_MASK;
962 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
963 env->a20_mask;
964 pdpe = ldq_phys(pdpe_addr);
965 if (!(pdpe & PG_PRESENT_MASK)) {
966 error_code = 0;
967 goto do_fault;
969 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
970 error_code = PG_ERROR_RSVD_MASK;
971 goto do_fault;
973 ptep &= pdpe ^ PG_NX_MASK;
974 if (!(pdpe & PG_ACCESSED_MASK)) {
975 pdpe |= PG_ACCESSED_MASK;
976 stl_phys_notdirty(pdpe_addr, pdpe);
978 } else
979 #endif
981 /* XXX: load them when cr3 is loaded ? */
982 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
983 env->a20_mask;
984 pdpe = ldq_phys(pdpe_addr);
985 if (!(pdpe & PG_PRESENT_MASK)) {
986 error_code = 0;
987 goto do_fault;
989 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
992 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
993 env->a20_mask;
994 pde = ldq_phys(pde_addr);
995 if (!(pde & PG_PRESENT_MASK)) {
996 error_code = 0;
997 goto do_fault;
999 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1000 error_code = PG_ERROR_RSVD_MASK;
1001 goto do_fault;
1003 ptep &= pde ^ PG_NX_MASK;
1004 if (pde & PG_PSE_MASK) {
1005 /* 2 MB page */
1006 page_size = 2048 * 1024;
1007 ptep ^= PG_NX_MASK;
1008 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1009 goto do_fault_protect;
1010 if (is_user) {
1011 if (!(ptep & PG_USER_MASK))
1012 goto do_fault_protect;
1013 if (is_write && !(ptep & PG_RW_MASK))
1014 goto do_fault_protect;
1015 } else {
1016 if ((env->cr[0] & CR0_WP_MASK) &&
1017 is_write && !(ptep & PG_RW_MASK))
1018 goto do_fault_protect;
1020 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1021 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1022 pde |= PG_ACCESSED_MASK;
1023 if (is_dirty)
1024 pde |= PG_DIRTY_MASK;
1025 stl_phys_notdirty(pde_addr, pde);
1027 /* align to page_size */
1028 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1029 virt_addr = addr & ~(page_size - 1);
1030 } else {
1031 /* 4 KB page */
1032 if (!(pde & PG_ACCESSED_MASK)) {
1033 pde |= PG_ACCESSED_MASK;
1034 stl_phys_notdirty(pde_addr, pde);
1036 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1037 env->a20_mask;
1038 pte = ldq_phys(pte_addr);
1039 if (!(pte & PG_PRESENT_MASK)) {
1040 error_code = 0;
1041 goto do_fault;
1043 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1044 error_code = PG_ERROR_RSVD_MASK;
1045 goto do_fault;
1047 /* combine pde and pte nx, user and rw protections */
1048 ptep &= pte ^ PG_NX_MASK;
1049 ptep ^= PG_NX_MASK;
1050 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1051 goto do_fault_protect;
1052 if (is_user) {
1053 if (!(ptep & PG_USER_MASK))
1054 goto do_fault_protect;
1055 if (is_write && !(ptep & PG_RW_MASK))
1056 goto do_fault_protect;
1057 } else {
1058 if ((env->cr[0] & CR0_WP_MASK) &&
1059 is_write && !(ptep & PG_RW_MASK))
1060 goto do_fault_protect;
1062 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1063 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1064 pte |= PG_ACCESSED_MASK;
1065 if (is_dirty)
1066 pte |= PG_DIRTY_MASK;
1067 stl_phys_notdirty(pte_addr, pte);
1069 page_size = 4096;
1070 virt_addr = addr & ~0xfff;
1071 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1073 } else {
1074 uint32_t pde;
1076 /* page directory entry */
1077 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1078 env->a20_mask;
1079 pde = ldl_phys(pde_addr);
1080 if (!(pde & PG_PRESENT_MASK)) {
1081 error_code = 0;
1082 goto do_fault;
1084 /* if PSE bit is set, then we use a 4MB page */
1085 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1086 page_size = 4096 * 1024;
1087 if (is_user) {
1088 if (!(pde & PG_USER_MASK))
1089 goto do_fault_protect;
1090 if (is_write && !(pde & PG_RW_MASK))
1091 goto do_fault_protect;
1092 } else {
1093 if ((env->cr[0] & CR0_WP_MASK) &&
1094 is_write && !(pde & PG_RW_MASK))
1095 goto do_fault_protect;
1097 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1098 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1099 pde |= PG_ACCESSED_MASK;
1100 if (is_dirty)
1101 pde |= PG_DIRTY_MASK;
1102 stl_phys_notdirty(pde_addr, pde);
1105 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1106 ptep = pte;
1107 virt_addr = addr & ~(page_size - 1);
1108 } else {
1109 if (!(pde & PG_ACCESSED_MASK)) {
1110 pde |= PG_ACCESSED_MASK;
1111 stl_phys_notdirty(pde_addr, pde);
1114 /* page directory entry */
1115 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1116 env->a20_mask;
1117 pte = ldl_phys(pte_addr);
1118 if (!(pte & PG_PRESENT_MASK)) {
1119 error_code = 0;
1120 goto do_fault;
1122 /* combine pde and pte user and rw protections */
1123 ptep = pte & pde;
1124 if (is_user) {
1125 if (!(ptep & PG_USER_MASK))
1126 goto do_fault_protect;
1127 if (is_write && !(ptep & PG_RW_MASK))
1128 goto do_fault_protect;
1129 } else {
1130 if ((env->cr[0] & CR0_WP_MASK) &&
1131 is_write && !(ptep & PG_RW_MASK))
1132 goto do_fault_protect;
1134 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1135 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1136 pte |= PG_ACCESSED_MASK;
1137 if (is_dirty)
1138 pte |= PG_DIRTY_MASK;
1139 stl_phys_notdirty(pte_addr, pte);
1141 page_size = 4096;
1142 virt_addr = addr & ~0xfff;
1145 /* the page can be put in the TLB */
1146 prot = PAGE_READ;
1147 if (!(ptep & PG_NX_MASK))
1148 prot |= PAGE_EXEC;
1149 if (pte & PG_DIRTY_MASK) {
1150 /* only set write access if already dirty... otherwise wait
1151 for dirty access */
1152 if (is_user) {
1153 if (ptep & PG_RW_MASK)
1154 prot |= PAGE_WRITE;
1155 } else {
1156 if (!(env->cr[0] & CR0_WP_MASK) ||
1157 (ptep & PG_RW_MASK))
1158 prot |= PAGE_WRITE;
1161 do_mapping:
1162 pte = pte & env->a20_mask;
1164 /* Even if 4MB pages, we map only one 4KB page in the cache to
1165 avoid filling it too fast */
1166 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1167 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1168 vaddr = virt_addr + page_offset;
1170 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1171 return ret;
1172 do_fault_protect:
1173 error_code = PG_ERROR_P_MASK;
1174 do_fault:
1175 error_code |= (is_write << PG_ERROR_W_BIT);
1176 if (is_user)
1177 error_code |= PG_ERROR_U_MASK;
1178 if (is_write1 == 2 &&
1179 (env->efer & MSR_EFER_NXE) &&
1180 (env->cr[4] & CR4_PAE_MASK))
1181 error_code |= PG_ERROR_I_D_MASK;
1182 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1183 /* cr2 is not modified in case of exceptions */
1184 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1185 addr);
1186 } else {
1187 env->cr[2] = addr;
1189 env->error_code = error_code;
1190 env->exception_index = EXCP0E_PAGE;
1191 return 1;
1194 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1196 target_ulong pde_addr, pte_addr;
1197 uint64_t pte;
1198 target_phys_addr_t paddr;
1199 uint32_t page_offset;
1200 int page_size;
1202 if (env->cr[4] & CR4_PAE_MASK) {
1203 target_ulong pdpe_addr;
1204 uint64_t pde, pdpe;
1206 #ifdef TARGET_X86_64
1207 if (env->hflags & HF_LMA_MASK) {
1208 uint64_t pml4e_addr, pml4e;
1209 int32_t sext;
1211 /* test virtual address sign extension */
1212 sext = (int64_t)addr >> 47;
1213 if (sext != 0 && sext != -1)
1214 return -1;
1216 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1217 env->a20_mask;
1218 pml4e = ldq_phys(pml4e_addr);
1219 if (!(pml4e & PG_PRESENT_MASK))
1220 return -1;
1222 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1223 env->a20_mask;
1224 pdpe = ldq_phys(pdpe_addr);
1225 if (!(pdpe & PG_PRESENT_MASK))
1226 return -1;
1227 } else
1228 #endif
1230 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1231 env->a20_mask;
1232 pdpe = ldq_phys(pdpe_addr);
1233 if (!(pdpe & PG_PRESENT_MASK))
1234 return -1;
1237 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1238 env->a20_mask;
1239 pde = ldq_phys(pde_addr);
1240 if (!(pde & PG_PRESENT_MASK)) {
1241 return -1;
1243 if (pde & PG_PSE_MASK) {
1244 /* 2 MB page */
1245 page_size = 2048 * 1024;
1246 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1247 } else {
1248 /* 4 KB page */
1249 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1250 env->a20_mask;
1251 page_size = 4096;
1252 pte = ldq_phys(pte_addr);
1254 if (!(pte & PG_PRESENT_MASK))
1255 return -1;
1256 } else {
1257 uint32_t pde;
1259 if (!(env->cr[0] & CR0_PG_MASK)) {
1260 pte = addr;
1261 page_size = 4096;
1262 } else {
1263 /* page directory entry */
1264 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1265 pde = ldl_phys(pde_addr);
1266 if (!(pde & PG_PRESENT_MASK))
1267 return -1;
1268 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1269 pte = pde & ~0x003ff000; /* align to 4MB */
1270 page_size = 4096 * 1024;
1271 } else {
1272 /* page directory entry */
1273 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1274 pte = ldl_phys(pte_addr);
1275 if (!(pte & PG_PRESENT_MASK))
1276 return -1;
1277 page_size = 4096;
1280 pte = pte & env->a20_mask;
1283 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1284 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1285 return paddr;
1288 void hw_breakpoint_insert(CPUState *env, int index)
1290 int type, err = 0;
1292 switch (hw_breakpoint_type(env->dr[7], index)) {
1293 case 0:
1294 if (hw_breakpoint_enabled(env->dr[7], index))
1295 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1296 &env->cpu_breakpoint[index]);
1297 break;
1298 case 1:
1299 type = BP_CPU | BP_MEM_WRITE;
1300 goto insert_wp;
1301 case 2:
1302 /* No support for I/O watchpoints yet */
1303 break;
1304 case 3:
1305 type = BP_CPU | BP_MEM_ACCESS;
1306 insert_wp:
1307 err = cpu_watchpoint_insert(env, env->dr[index],
1308 hw_breakpoint_len(env->dr[7], index),
1309 type, &env->cpu_watchpoint[index]);
1310 break;
1312 if (err)
1313 env->cpu_breakpoint[index] = NULL;
1316 void hw_breakpoint_remove(CPUState *env, int index)
1318 if (!env->cpu_breakpoint[index])
1319 return;
1320 switch (hw_breakpoint_type(env->dr[7], index)) {
1321 case 0:
1322 if (hw_breakpoint_enabled(env->dr[7], index))
1323 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1324 break;
1325 case 1:
1326 case 3:
1327 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1328 break;
1329 case 2:
1330 /* No support for I/O watchpoints yet */
1331 break;
1335 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1337 target_ulong dr6;
1338 int reg, type;
1339 int hit_enabled = 0;
1341 dr6 = env->dr[6] & ~0xf;
1342 for (reg = 0; reg < 4; reg++) {
1343 type = hw_breakpoint_type(env->dr[7], reg);
1344 if ((type == 0 && env->dr[reg] == env->eip) ||
1345 ((type & 1) && env->cpu_watchpoint[reg] &&
1346 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1347 dr6 |= 1 << reg;
1348 if (hw_breakpoint_enabled(env->dr[7], reg))
1349 hit_enabled = 1;
1352 if (hit_enabled || force_dr6_update)
1353 env->dr[6] = dr6;
1354 return hit_enabled;
1357 static CPUDebugExcpHandler *prev_debug_excp_handler;
1359 void raise_exception(int exception_index);
1361 static void breakpoint_handler(CPUState *env)
1363 CPUBreakpoint *bp;
1365 if (env->watchpoint_hit) {
1366 if (env->watchpoint_hit->flags & BP_CPU) {
1367 env->watchpoint_hit = NULL;
1368 if (check_hw_breakpoints(env, 0))
1369 raise_exception(EXCP01_DB);
1370 else
1371 cpu_resume_from_signal(env, NULL);
1373 } else {
1374 for (bp = env->breakpoints; bp != NULL; bp = bp->next)
1375 if (bp->pc == env->eip) {
1376 if (bp->flags & BP_CPU) {
1377 check_hw_breakpoints(env, 1);
1378 raise_exception(EXCP01_DB);
1380 break;
1383 if (prev_debug_excp_handler)
1384 prev_debug_excp_handler(env);
1386 #endif /* !CONFIG_USER_ONLY */
1388 static void host_cpuid(uint32_t function, uint32_t *eax, uint32_t *ebx,
1389 uint32_t *ecx, uint32_t *edx)
1391 #if defined(CONFIG_KVM) || defined(USE_KVM)
1392 uint32_t vec[4];
1394 #ifdef __x86_64__
1395 asm volatile("cpuid"
1396 : "=a"(vec[0]), "=b"(vec[1]),
1397 "=c"(vec[2]), "=d"(vec[3])
1398 : "0"(function) : "cc");
1399 #else
1400 asm volatile("pusha \n\t"
1401 "cpuid \n\t"
1402 "mov %%eax, 0(%1) \n\t"
1403 "mov %%ebx, 4(%1) \n\t"
1404 "mov %%ecx, 8(%1) \n\t"
1405 "mov %%edx, 12(%1) \n\t"
1406 "popa"
1407 : : "a"(function), "S"(vec)
1408 : "memory", "cc");
1409 #endif
1411 if (eax)
1412 *eax = vec[0];
1413 if (ebx)
1414 *ebx = vec[1];
1415 if (ecx)
1416 *ecx = vec[2];
1417 if (edx)
1418 *edx = vec[3];
1419 #endif
1422 void cpu_x86_cpuid(CPUX86State *env, uint32_t index,
1423 uint32_t *eax, uint32_t *ebx,
1424 uint32_t *ecx, uint32_t *edx)
1426 /* test if maximum index reached */
1427 if (index & 0x80000000) {
1428 if (index > env->cpuid_xlevel)
1429 index = env->cpuid_level;
1430 } else {
1431 if (index > env->cpuid_level)
1432 index = env->cpuid_level;
1435 switch(index) {
1436 case 0:
1437 *eax = env->cpuid_level;
1438 *ebx = env->cpuid_vendor1;
1439 *edx = env->cpuid_vendor2;
1440 *ecx = env->cpuid_vendor3;
1442 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1443 * isn't supported in compatibility mode on Intel. so advertise the
1444 * actuall cpu, and say goodbye to migration between different vendors
1445 * is you use compatibility mode. */
1446 if (kvm_enabled())
1447 host_cpuid(0, NULL, ebx, ecx, edx);
1448 break;
1449 case 1:
1450 *eax = env->cpuid_version;
1451 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1452 *ecx = env->cpuid_ext_features;
1453 *edx = env->cpuid_features;
1455 /* "Hypervisor present" bit required for Microsoft SVVP */
1456 if (kvm_enabled())
1457 *ecx |= (1 << 31);
1458 break;
1459 case 2:
1460 /* cache info: needed for Pentium Pro compatibility */
1461 *eax = 1;
1462 *ebx = 0;
1463 *ecx = 0;
1464 *edx = 0x2c307d;
1465 break;
1466 case 4:
1467 /* cache info: needed for Core compatibility */
1468 switch (*ecx) {
1469 case 0: /* L1 dcache info */
1470 *eax = 0x0000121;
1471 *ebx = 0x1c0003f;
1472 *ecx = 0x000003f;
1473 *edx = 0x0000001;
1474 break;
1475 case 1: /* L1 icache info */
1476 *eax = 0x0000122;
1477 *ebx = 0x1c0003f;
1478 *ecx = 0x000003f;
1479 *edx = 0x0000001;
1480 break;
1481 case 2: /* L2 cache info */
1482 *eax = 0x0000143;
1483 *ebx = 0x3c0003f;
1484 *ecx = 0x0000fff;
1485 *edx = 0x0000001;
1486 break;
1487 default: /* end of info */
1488 *eax = 0;
1489 *ebx = 0;
1490 *ecx = 0;
1491 *edx = 0;
1492 break;
1495 break;
1496 case 5:
1497 /* mwait info: needed for Core compatibility */
1498 *eax = 0; /* Smallest monitor-line size in bytes */
1499 *ebx = 0; /* Largest monitor-line size in bytes */
1500 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1501 *edx = 0;
1502 break;
1503 case 6:
1504 /* Thermal and Power Leaf */
1505 *eax = 0;
1506 *ebx = 0;
1507 *ecx = 0;
1508 *edx = 0;
1509 break;
1510 case 9:
1511 /* Direct Cache Access Information Leaf */
1512 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1513 *ebx = 0;
1514 *ecx = 0;
1515 *edx = 0;
1516 break;
1517 case 0xA:
1518 /* Architectural Performance Monitoring Leaf */
1519 *eax = 0;
1520 *ebx = 0;
1521 *ecx = 0;
1522 *edx = 0;
1523 break;
1524 case 0x80000000:
1525 *eax = env->cpuid_xlevel;
1526 *ebx = env->cpuid_vendor1;
1527 *edx = env->cpuid_vendor2;
1528 *ecx = env->cpuid_vendor3;
1529 break;
1530 case 0x80000001:
1531 *eax = env->cpuid_features;
1532 *ebx = 0;
1533 *ecx = env->cpuid_ext3_features;
1534 *edx = env->cpuid_ext2_features;
1536 if (kvm_enabled()) {
1537 uint32_t h_eax, h_edx;
1539 host_cpuid(0x80000001, &h_eax, NULL, NULL, &h_edx);
1541 /* disable CPU features that the host does not support */
1543 /* long mode */
1544 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1545 *edx &= ~0x20000000;
1546 /* syscall */
1547 if ((h_edx & 0x00000800) == 0)
1548 *edx &= ~0x00000800;
1549 /* nx */
1550 if ((h_edx & 0x00100000) == 0)
1551 *edx &= ~0x00100000;
1553 /* disable CPU features that KVM cannot support */
1555 /* svm */
1556 *ecx &= ~4UL;
1557 /* 3dnow */
1558 *edx &= ~0xc0000000;
1560 break;
1561 case 0x80000002:
1562 case 0x80000003:
1563 case 0x80000004:
1564 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1565 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1566 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1567 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1568 break;
1569 case 0x80000005:
1570 /* cache info (L1 cache) */
1571 *eax = 0x01ff01ff;
1572 *ebx = 0x01ff01ff;
1573 *ecx = 0x40020140;
1574 *edx = 0x40020140;
1575 break;
1576 case 0x80000006:
1577 /* cache info (L2 cache) */
1578 *eax = 0;
1579 *ebx = 0x42004200;
1580 *ecx = 0x02008140;
1581 *edx = 0;
1582 break;
1583 case 0x80000008:
1584 /* virtual & phys address size in low 2 bytes. */
1585 /* XXX: This value must match the one used in the MMU code. */
1586 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1587 /* 64 bit processor */
1588 #if defined(USE_KQEMU)
1589 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1590 #else
1591 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1592 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1593 #endif
1594 } else {
1595 #if defined(USE_KQEMU)
1596 *eax = 0x00000020; /* 32 bits physical */
1597 #else
1598 if (env->cpuid_features & CPUID_PSE36)
1599 *eax = 0x00000024; /* 36 bits physical */
1600 else
1601 *eax = 0x00000020; /* 32 bits physical */
1602 #endif
1604 *ebx = 0;
1605 *ecx = 0;
1606 *edx = 0;
1607 break;
1608 case 0x8000000A:
1609 *eax = 0x00000001; /* SVM Revision */
1610 *ebx = 0x00000010; /* nr of ASIDs */
1611 *ecx = 0;
1612 *edx = 0; /* optional features */
1613 break;
1614 default:
1615 /* reserved values: zero */
1616 *eax = 0;
1617 *ebx = 0;
1618 *ecx = 0;
1619 *edx = 0;
1620 break;
1624 CPUX86State *cpu_x86_init(const char *cpu_model)
1626 CPUX86State *env;
1627 static int inited;
1629 env = qemu_mallocz(sizeof(CPUX86State));
1630 if (!env)
1631 return NULL;
1632 cpu_exec_init(env);
1633 env->cpu_model_str = cpu_model;
1635 /* init various static tables */
1636 if (!inited) {
1637 inited = 1;
1638 optimize_flags_init();
1639 #ifndef CONFIG_USER_ONLY
1640 prev_debug_excp_handler =
1641 cpu_set_debug_excp_handler(breakpoint_handler);
1642 #endif
1644 if (cpu_x86_register(env, cpu_model) < 0) {
1645 cpu_x86_close(env);
1646 return NULL;
1648 cpu_reset(env);
1649 #ifdef USE_KQEMU
1650 kqemu_init(env);
1651 #endif
1652 if (kvm_enabled())
1653 kvm_init_vcpu(env);
1654 return env;