Work around supported cpuid ioctl() brokenness
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob23efcf46bdb3b81a5ff8cfda25a95803dffb9777
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 #include "qemu-kvm.h"
35 //#define DEBUG_MMU
37 /* feature flags taken from "Intel Processor Identification and the CPUID
38 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
39 * about feature names, the Linux name is used. */
40 static const char *feature_name[] = {
41 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
42 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
43 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
44 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
46 static const char *ext_feature_name[] = {
47 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
48 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
49 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
52 static const char *ext2_feature_name[] = {
53 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
54 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
55 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
56 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
58 static const char *ext3_feature_name[] = {
59 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
60 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
61 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
65 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
66 uint32_t *ext_features,
67 uint32_t *ext2_features,
68 uint32_t *ext3_features)
70 int i;
71 int found = 0;
73 for ( i = 0 ; i < 32 ; i++ )
74 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
75 *features |= 1 << i;
76 found = 1;
78 for ( i = 0 ; i < 32 ; i++ )
79 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
80 *ext_features |= 1 << i;
81 found = 1;
83 for ( i = 0 ; i < 32 ; i++ )
84 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
85 *ext2_features |= 1 << i;
86 found = 1;
88 for ( i = 0 ; i < 32 ; i++ )
89 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
90 *ext3_features |= 1 << i;
91 found = 1;
93 if (!found) {
94 fprintf(stderr, "CPU feature %s not found\n", flagname);
98 static void kvm_trim_features(uint32_t *features, uint32_t supported,
99 const char *names[])
101 int i;
102 uint32_t mask;
104 for (i = 0; i < 32; ++i) {
105 mask = 1U << i;
106 if ((*features & mask) && !(supported & mask)) {
107 *features &= ~mask;
112 typedef struct x86_def_t {
113 const char *name;
114 uint32_t level;
115 uint32_t vendor1, vendor2, vendor3;
116 int family;
117 int model;
118 int stepping;
119 uint32_t features, ext_features, ext2_features, ext3_features;
120 uint32_t xlevel;
121 char model_id[48];
122 } x86_def_t;
124 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
125 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
126 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
127 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
128 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
129 CPUID_PSE36 | CPUID_FXSR)
130 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
131 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
132 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
133 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
134 CPUID_PAE | CPUID_SEP | CPUID_APIC)
135 static x86_def_t x86_defs[] = {
136 #ifdef TARGET_X86_64
138 .name = "qemu64",
139 .level = 2,
140 .vendor1 = CPUID_VENDOR_AMD_1,
141 .vendor2 = CPUID_VENDOR_AMD_2,
142 .vendor3 = CPUID_VENDOR_AMD_3,
143 .family = 6,
144 .model = 2,
145 .stepping = 3,
146 .features = PPRO_FEATURES |
147 /* these features are needed for Win64 and aren't fully implemented */
148 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
149 /* this feature is needed for Solaris and isn't fully implemented */
150 CPUID_PSE36,
151 .ext_features = CPUID_EXT_SSE3,
152 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
153 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
154 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
155 .ext3_features = CPUID_EXT3_SVM,
156 .xlevel = 0x8000000A,
157 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
160 .name = "phenom",
161 .level = 5,
162 .vendor1 = CPUID_VENDOR_AMD_1,
163 .vendor2 = CPUID_VENDOR_AMD_2,
164 .vendor3 = CPUID_VENDOR_AMD_3,
165 .family = 16,
166 .model = 2,
167 .stepping = 3,
168 /* Missing: CPUID_VME, CPUID_HT */
169 .features = PPRO_FEATURES |
170 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
171 CPUID_PSE36,
172 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
173 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
174 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
175 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
176 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
177 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
178 CPUID_EXT2_FFXSR,
179 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
180 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
181 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
182 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
183 .ext3_features = CPUID_EXT3_SVM,
184 .xlevel = 0x8000001A,
185 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
188 .name = "core2duo",
189 .level = 10,
190 .family = 6,
191 .model = 15,
192 .stepping = 11,
193 /* The original CPU also implements these features:
194 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
195 CPUID_TM, CPUID_PBE */
196 .features = PPRO_FEATURES |
197 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
198 CPUID_PSE36,
199 /* The original CPU also implements these ext features:
200 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
201 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
202 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
203 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
204 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
205 .xlevel = 0x80000008,
206 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
208 #endif
210 .name = "qemu32",
211 .level = 2,
212 .family = 6,
213 .model = 3,
214 .stepping = 3,
215 .features = PPRO_FEATURES,
216 .ext_features = CPUID_EXT_SSE3,
217 .xlevel = 0,
218 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
221 .name = "coreduo",
222 .level = 10,
223 .family = 6,
224 .model = 14,
225 .stepping = 8,
226 /* The original CPU also implements these features:
227 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
228 CPUID_TM, CPUID_PBE */
229 .features = PPRO_FEATURES | CPUID_VME |
230 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
231 /* The original CPU also implements these ext features:
232 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
233 CPUID_EXT_PDCM */
234 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
235 .ext2_features = CPUID_EXT2_NX,
236 .xlevel = 0x80000008,
237 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
240 .name = "486",
241 .level = 0,
242 .family = 4,
243 .model = 0,
244 .stepping = 0,
245 .features = I486_FEATURES,
246 .xlevel = 0,
249 .name = "pentium",
250 .level = 1,
251 .family = 5,
252 .model = 4,
253 .stepping = 3,
254 .features = PENTIUM_FEATURES,
255 .xlevel = 0,
258 .name = "pentium2",
259 .level = 2,
260 .family = 6,
261 .model = 5,
262 .stepping = 2,
263 .features = PENTIUM2_FEATURES,
264 .xlevel = 0,
267 .name = "pentium3",
268 .level = 2,
269 .family = 6,
270 .model = 7,
271 .stepping = 3,
272 .features = PENTIUM3_FEATURES,
273 .xlevel = 0,
276 .name = "athlon",
277 .level = 2,
278 .vendor1 = 0x68747541, /* "Auth" */
279 .vendor2 = 0x69746e65, /* "enti" */
280 .vendor3 = 0x444d4163, /* "cAMD" */
281 .family = 6,
282 .model = 2,
283 .stepping = 3,
284 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
285 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
286 .xlevel = 0x80000008,
287 /* XXX: put another string ? */
288 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
291 .name = "n270",
292 /* original is on level 10 */
293 .level = 5,
294 .family = 6,
295 .model = 28,
296 .stepping = 2,
297 .features = PPRO_FEATURES |
298 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
299 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
300 * CPUID_HT | CPUID_TM | CPUID_PBE */
301 /* Some CPUs got no CPUID_SEP */
302 .ext_features = CPUID_EXT_MONITOR |
303 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
304 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
305 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
306 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
307 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
308 .xlevel = 0x8000000A,
309 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
313 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
315 unsigned int i;
316 x86_def_t *def;
318 char *s = strdup(cpu_model);
319 char *featurestr, *name = strtok(s, ",");
320 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
321 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
322 int family = -1, model = -1, stepping = -1;
324 def = NULL;
325 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
326 if (strcmp(name, x86_defs[i].name) == 0) {
327 def = &x86_defs[i];
328 break;
331 if (!def)
332 goto error;
333 memcpy(x86_cpu_def, def, sizeof(*def));
335 featurestr = strtok(NULL, ",");
337 while (featurestr) {
338 char *val;
339 if (featurestr[0] == '+') {
340 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
341 } else if (featurestr[0] == '-') {
342 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
343 } else if ((val = strchr(featurestr, '='))) {
344 *val = 0; val++;
345 if (!strcmp(featurestr, "family")) {
346 char *err;
347 family = strtol(val, &err, 10);
348 if (!*val || *err || family < 0) {
349 fprintf(stderr, "bad numerical value %s\n", val);
350 goto error;
352 x86_cpu_def->family = family;
353 } else if (!strcmp(featurestr, "model")) {
354 char *err;
355 model = strtol(val, &err, 10);
356 if (!*val || *err || model < 0 || model > 0xff) {
357 fprintf(stderr, "bad numerical value %s\n", val);
358 goto error;
360 x86_cpu_def->model = model;
361 } else if (!strcmp(featurestr, "stepping")) {
362 char *err;
363 stepping = strtol(val, &err, 10);
364 if (!*val || *err || stepping < 0 || stepping > 0xf) {
365 fprintf(stderr, "bad numerical value %s\n", val);
366 goto error;
368 x86_cpu_def->stepping = stepping;
369 } else if (!strcmp(featurestr, "vendor")) {
370 if (strlen(val) != 12) {
371 fprintf(stderr, "vendor string must be 12 chars long\n");
372 goto error;
374 x86_cpu_def->vendor1 = 0;
375 x86_cpu_def->vendor2 = 0;
376 x86_cpu_def->vendor3 = 0;
377 for(i = 0; i < 4; i++) {
378 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
379 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
380 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
382 } else if (!strcmp(featurestr, "model_id")) {
383 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
384 val);
385 } else {
386 fprintf(stderr, "unrecognized feature %s\n", featurestr);
387 goto error;
389 } else {
390 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
391 goto error;
393 featurestr = strtok(NULL, ",");
395 x86_cpu_def->features |= plus_features;
396 x86_cpu_def->ext_features |= plus_ext_features;
397 x86_cpu_def->ext2_features |= plus_ext2_features;
398 x86_cpu_def->ext3_features |= plus_ext3_features;
399 x86_cpu_def->features &= ~minus_features;
400 x86_cpu_def->ext_features &= ~minus_ext_features;
401 x86_cpu_def->ext2_features &= ~minus_ext2_features;
402 x86_cpu_def->ext3_features &= ~minus_ext3_features;
403 free(s);
404 return 0;
406 error:
407 free(s);
408 return -1;
411 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
413 unsigned int i;
415 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
416 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
419 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
421 x86_def_t def1, *def = &def1;
423 if (cpu_x86_find_by_name(def, cpu_model) < 0)
424 return -1;
425 if (def->vendor1) {
426 env->cpuid_vendor1 = def->vendor1;
427 env->cpuid_vendor2 = def->vendor2;
428 env->cpuid_vendor3 = def->vendor3;
429 } else {
430 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
431 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
432 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
434 env->cpuid_level = def->level;
435 if (def->family > 0x0f)
436 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
437 else
438 env->cpuid_version = def->family << 8;
439 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
440 env->cpuid_version |= def->stepping;
441 env->cpuid_features = def->features;
442 env->pat = 0x0007040600070406ULL;
443 env->cpuid_ext_features = def->ext_features;
444 env->cpuid_ext2_features = def->ext2_features;
445 env->cpuid_xlevel = def->xlevel;
446 env->cpuid_ext3_features = def->ext3_features;
448 const char *model_id = def->model_id;
449 int c, len, i;
450 if (!model_id)
451 model_id = "";
452 len = strlen(model_id);
453 for(i = 0; i < 48; i++) {
454 if (i >= len)
455 c = '\0';
456 else
457 c = (uint8_t)model_id[i];
458 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
461 return 0;
464 /* NOTE: must be called outside the CPU execute loop */
465 void cpu_reset(CPUX86State *env)
467 int i;
469 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
470 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
471 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
474 memset(env, 0, offsetof(CPUX86State, breakpoints));
476 tlb_flush(env, 1);
478 env->old_exception = -1;
480 /* init to reset state */
482 #ifdef CONFIG_SOFTMMU
483 env->hflags |= HF_SOFTMMU_MASK;
484 #endif
485 env->hflags2 |= HF2_GIF_MASK;
487 cpu_x86_update_cr0(env, 0x60000010);
488 env->a20_mask = ~0x0;
489 env->smbase = 0x30000;
491 env->idt.limit = 0xffff;
492 env->gdt.limit = 0xffff;
493 env->ldt.limit = 0xffff;
494 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
495 env->tr.limit = 0xffff;
496 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
498 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
499 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
500 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
501 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
502 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
503 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
504 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
505 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
506 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
507 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
508 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
509 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
511 env->eip = 0xfff0;
512 env->regs[R_EDX] = env->cpuid_version;
514 env->eflags = 0x2;
516 /* FPU init */
517 for(i = 0;i < 8; i++)
518 env->fptags[i] = 1;
519 env->fpuc = 0x37f;
521 env->mxcsr = 0x1f80;
523 memset(env->dr, 0, sizeof(env->dr));
524 env->dr[6] = DR6_FIXED_1;
525 env->dr[7] = DR7_FIXED_1;
526 cpu_breakpoint_remove_all(env, BP_CPU);
527 cpu_watchpoint_remove_all(env, BP_CPU);
530 void cpu_x86_close(CPUX86State *env)
532 qemu_free(env);
535 /***********************************************************/
536 /* x86 debug */
538 static const char *cc_op_str[] = {
539 "DYNAMIC",
540 "EFLAGS",
542 "MULB",
543 "MULW",
544 "MULL",
545 "MULQ",
547 "ADDB",
548 "ADDW",
549 "ADDL",
550 "ADDQ",
552 "ADCB",
553 "ADCW",
554 "ADCL",
555 "ADCQ",
557 "SUBB",
558 "SUBW",
559 "SUBL",
560 "SUBQ",
562 "SBBB",
563 "SBBW",
564 "SBBL",
565 "SBBQ",
567 "LOGICB",
568 "LOGICW",
569 "LOGICL",
570 "LOGICQ",
572 "INCB",
573 "INCW",
574 "INCL",
575 "INCQ",
577 "DECB",
578 "DECW",
579 "DECL",
580 "DECQ",
582 "SHLB",
583 "SHLW",
584 "SHLL",
585 "SHLQ",
587 "SARB",
588 "SARW",
589 "SARL",
590 "SARQ",
593 static void
594 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
595 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
596 const char *name, struct SegmentCache *sc)
598 #ifdef TARGET_X86_64
599 if (env->hflags & HF_CS64_MASK) {
600 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
601 sc->selector, sc->base, sc->limit, sc->flags);
602 } else
603 #endif
605 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
606 (uint32_t)sc->base, sc->limit, sc->flags);
609 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
610 goto done;
612 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
613 if (sc->flags & DESC_S_MASK) {
614 if (sc->flags & DESC_CS_MASK) {
615 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
616 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
617 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
618 (sc->flags & DESC_R_MASK) ? 'R' : '-');
619 } else {
620 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
621 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
622 (sc->flags & DESC_W_MASK) ? 'W' : '-');
624 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
625 } else {
626 static const char *sys_type_name[2][16] = {
627 { /* 32 bit mode */
628 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
629 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
630 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
631 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
633 { /* 64 bit mode */
634 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
635 "Reserved", "Reserved", "Reserved", "Reserved",
636 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
637 "Reserved", "IntGate64", "TrapGate64"
640 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
641 [(sc->flags & DESC_TYPE_MASK)
642 >> DESC_TYPE_SHIFT]);
644 done:
645 cpu_fprintf(f, "\n");
648 void cpu_dump_state(CPUState *env, FILE *f,
649 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
650 int flags)
652 int eflags, i, nb;
653 char cc_op_name[32];
654 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
656 if (kvm_enabled())
657 kvm_arch_get_registers(env);
659 eflags = env->eflags;
660 #ifdef TARGET_X86_64
661 if (env->hflags & HF_CS64_MASK) {
662 cpu_fprintf(f,
663 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
664 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
665 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
666 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
667 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
668 env->regs[R_EAX],
669 env->regs[R_EBX],
670 env->regs[R_ECX],
671 env->regs[R_EDX],
672 env->regs[R_ESI],
673 env->regs[R_EDI],
674 env->regs[R_EBP],
675 env->regs[R_ESP],
676 env->regs[8],
677 env->regs[9],
678 env->regs[10],
679 env->regs[11],
680 env->regs[12],
681 env->regs[13],
682 env->regs[14],
683 env->regs[15],
684 env->eip, eflags,
685 eflags & DF_MASK ? 'D' : '-',
686 eflags & CC_O ? 'O' : '-',
687 eflags & CC_S ? 'S' : '-',
688 eflags & CC_Z ? 'Z' : '-',
689 eflags & CC_A ? 'A' : '-',
690 eflags & CC_P ? 'P' : '-',
691 eflags & CC_C ? 'C' : '-',
692 env->hflags & HF_CPL_MASK,
693 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
694 (int)(env->a20_mask >> 20) & 1,
695 (env->hflags >> HF_SMM_SHIFT) & 1,
696 env->halted);
697 } else
698 #endif
700 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
701 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
702 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
703 (uint32_t)env->regs[R_EAX],
704 (uint32_t)env->regs[R_EBX],
705 (uint32_t)env->regs[R_ECX],
706 (uint32_t)env->regs[R_EDX],
707 (uint32_t)env->regs[R_ESI],
708 (uint32_t)env->regs[R_EDI],
709 (uint32_t)env->regs[R_EBP],
710 (uint32_t)env->regs[R_ESP],
711 (uint32_t)env->eip, eflags,
712 eflags & DF_MASK ? 'D' : '-',
713 eflags & CC_O ? 'O' : '-',
714 eflags & CC_S ? 'S' : '-',
715 eflags & CC_Z ? 'Z' : '-',
716 eflags & CC_A ? 'A' : '-',
717 eflags & CC_P ? 'P' : '-',
718 eflags & CC_C ? 'C' : '-',
719 env->hflags & HF_CPL_MASK,
720 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
721 (int)(env->a20_mask >> 20) & 1,
722 (env->hflags >> HF_SMM_SHIFT) & 1,
723 env->halted);
726 for(i = 0; i < 6; i++) {
727 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
728 &env->segs[i]);
730 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
731 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
733 #ifdef TARGET_X86_64
734 if (env->hflags & HF_LMA_MASK) {
735 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
736 env->gdt.base, env->gdt.limit);
737 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
738 env->idt.base, env->idt.limit);
739 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
740 (uint32_t)env->cr[0],
741 env->cr[2],
742 env->cr[3],
743 (uint32_t)env->cr[4]);
744 for(i = 0; i < 4; i++)
745 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
746 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
747 env->dr[6], env->dr[7]);
748 } else
749 #endif
751 cpu_fprintf(f, "GDT= %08x %08x\n",
752 (uint32_t)env->gdt.base, env->gdt.limit);
753 cpu_fprintf(f, "IDT= %08x %08x\n",
754 (uint32_t)env->idt.base, env->idt.limit);
755 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
756 (uint32_t)env->cr[0],
757 (uint32_t)env->cr[2],
758 (uint32_t)env->cr[3],
759 (uint32_t)env->cr[4]);
760 for(i = 0; i < 4; i++)
761 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
762 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
764 if (flags & X86_DUMP_CCOP) {
765 if ((unsigned)env->cc_op < CC_OP_NB)
766 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
767 else
768 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
769 #ifdef TARGET_X86_64
770 if (env->hflags & HF_CS64_MASK) {
771 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
772 env->cc_src, env->cc_dst,
773 cc_op_name);
774 } else
775 #endif
777 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
778 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
779 cc_op_name);
782 if (flags & X86_DUMP_FPU) {
783 int fptag;
784 fptag = 0;
785 for(i = 0; i < 8; i++) {
786 fptag |= ((!env->fptags[i]) << i);
788 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
789 env->fpuc,
790 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
791 env->fpstt,
792 fptag,
793 env->mxcsr);
794 for(i=0;i<8;i++) {
795 #if defined(USE_X86LDOUBLE)
796 union {
797 long double d;
798 struct {
799 uint64_t lower;
800 uint16_t upper;
801 } l;
802 } tmp;
803 tmp.d = env->fpregs[i].d;
804 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
805 i, tmp.l.lower, tmp.l.upper);
806 #else
807 cpu_fprintf(f, "FPR%d=%016" PRIx64,
808 i, env->fpregs[i].mmx.q);
809 #endif
810 if ((i & 1) == 1)
811 cpu_fprintf(f, "\n");
812 else
813 cpu_fprintf(f, " ");
815 if (env->hflags & HF_CS64_MASK)
816 nb = 16;
817 else
818 nb = 8;
819 for(i=0;i<nb;i++) {
820 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
822 env->xmm_regs[i].XMM_L(3),
823 env->xmm_regs[i].XMM_L(2),
824 env->xmm_regs[i].XMM_L(1),
825 env->xmm_regs[i].XMM_L(0));
826 if ((i & 1) == 1)
827 cpu_fprintf(f, "\n");
828 else
829 cpu_fprintf(f, " ");
834 /***********************************************************/
835 /* x86 mmu */
836 /* XXX: add PGE support */
838 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
840 a20_state = (a20_state != 0);
841 if (a20_state != ((env->a20_mask >> 20) & 1)) {
842 #if defined(DEBUG_MMU)
843 printf("A20 update: a20=%d\n", a20_state);
844 #endif
845 /* if the cpu is currently executing code, we must unlink it and
846 all the potentially executing TB */
847 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
849 /* when a20 is changed, all the MMU mappings are invalid, so
850 we must flush everything */
851 tlb_flush(env, 1);
852 env->a20_mask = (~0x100000) | (a20_state << 20);
856 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
858 int pe_state;
860 #if defined(DEBUG_MMU)
861 printf("CR0 update: CR0=0x%08x\n", new_cr0);
862 #endif
863 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
864 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
865 tlb_flush(env, 1);
868 #ifdef TARGET_X86_64
869 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
870 (env->efer & MSR_EFER_LME)) {
871 /* enter in long mode */
872 /* XXX: generate an exception */
873 if (!(env->cr[4] & CR4_PAE_MASK))
874 return;
875 env->efer |= MSR_EFER_LMA;
876 env->hflags |= HF_LMA_MASK;
877 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
878 (env->efer & MSR_EFER_LMA)) {
879 /* exit long mode */
880 env->efer &= ~MSR_EFER_LMA;
881 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
882 env->eip &= 0xffffffff;
884 #endif
885 env->cr[0] = new_cr0 | CR0_ET_MASK;
887 /* update PE flag in hidden flags */
888 pe_state = (env->cr[0] & CR0_PE_MASK);
889 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
890 /* ensure that ADDSEG is always set in real mode */
891 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
892 /* update FPU flags */
893 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
894 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
897 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
898 the PDPT */
899 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
901 env->cr[3] = new_cr3;
902 if (env->cr[0] & CR0_PG_MASK) {
903 #if defined(DEBUG_MMU)
904 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
905 #endif
906 tlb_flush(env, 0);
910 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
912 #if defined(DEBUG_MMU)
913 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
914 #endif
915 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
916 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
917 tlb_flush(env, 1);
919 /* SSE handling */
920 if (!(env->cpuid_features & CPUID_SSE))
921 new_cr4 &= ~CR4_OSFXSR_MASK;
922 if (new_cr4 & CR4_OSFXSR_MASK)
923 env->hflags |= HF_OSFXSR_MASK;
924 else
925 env->hflags &= ~HF_OSFXSR_MASK;
927 env->cr[4] = new_cr4;
930 #if defined(CONFIG_USER_ONLY)
932 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
933 int is_write, int mmu_idx, int is_softmmu)
935 /* user mode only emulation */
936 is_write &= 1;
937 env->cr[2] = addr;
938 env->error_code = (is_write << PG_ERROR_W_BIT);
939 env->error_code |= PG_ERROR_U_MASK;
940 env->exception_index = EXCP0E_PAGE;
941 return 1;
944 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
946 return addr;
949 #else
951 /* XXX: This value should match the one returned by CPUID
952 * and in exec.c */
953 #if defined(CONFIG_KQEMU)
954 #define PHYS_ADDR_MASK 0xfffff000LL
955 #else
956 # if defined(TARGET_X86_64)
957 # define PHYS_ADDR_MASK 0xfffffff000LL
958 # else
959 # define PHYS_ADDR_MASK 0xffffff000LL
960 # endif
961 #endif
963 /* return value:
964 -1 = cannot handle fault
965 0 = nothing more to do
966 1 = generate PF fault
967 2 = soft MMU activation required for this block
969 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
970 int is_write1, int mmu_idx, int is_softmmu)
972 uint64_t ptep, pte;
973 target_ulong pde_addr, pte_addr;
974 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
975 target_phys_addr_t paddr;
976 uint32_t page_offset;
977 target_ulong vaddr, virt_addr;
979 is_user = mmu_idx == MMU_USER_IDX;
980 #if defined(DEBUG_MMU)
981 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
982 addr, is_write1, is_user, env->eip);
983 #endif
984 is_write = is_write1 & 1;
986 if (!(env->cr[0] & CR0_PG_MASK)) {
987 pte = addr;
988 virt_addr = addr & TARGET_PAGE_MASK;
989 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
990 page_size = 4096;
991 goto do_mapping;
994 if (env->cr[4] & CR4_PAE_MASK) {
995 uint64_t pde, pdpe;
996 target_ulong pdpe_addr;
998 #ifdef TARGET_X86_64
999 if (env->hflags & HF_LMA_MASK) {
1000 uint64_t pml4e_addr, pml4e;
1001 int32_t sext;
1003 /* test virtual address sign extension */
1004 sext = (int64_t)addr >> 47;
1005 if (sext != 0 && sext != -1) {
1006 env->error_code = 0;
1007 env->exception_index = EXCP0D_GPF;
1008 return 1;
1011 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1012 env->a20_mask;
1013 pml4e = ldq_phys(pml4e_addr);
1014 if (!(pml4e & PG_PRESENT_MASK)) {
1015 error_code = 0;
1016 goto do_fault;
1018 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1019 error_code = PG_ERROR_RSVD_MASK;
1020 goto do_fault;
1022 if (!(pml4e & PG_ACCESSED_MASK)) {
1023 pml4e |= PG_ACCESSED_MASK;
1024 stl_phys_notdirty(pml4e_addr, pml4e);
1026 ptep = pml4e ^ PG_NX_MASK;
1027 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1028 env->a20_mask;
1029 pdpe = ldq_phys(pdpe_addr);
1030 if (!(pdpe & PG_PRESENT_MASK)) {
1031 error_code = 0;
1032 goto do_fault;
1034 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1035 error_code = PG_ERROR_RSVD_MASK;
1036 goto do_fault;
1038 ptep &= pdpe ^ PG_NX_MASK;
1039 if (!(pdpe & PG_ACCESSED_MASK)) {
1040 pdpe |= PG_ACCESSED_MASK;
1041 stl_phys_notdirty(pdpe_addr, pdpe);
1043 } else
1044 #endif
1046 /* XXX: load them when cr3 is loaded ? */
1047 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1048 env->a20_mask;
1049 pdpe = ldq_phys(pdpe_addr);
1050 if (!(pdpe & PG_PRESENT_MASK)) {
1051 error_code = 0;
1052 goto do_fault;
1054 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1057 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1058 env->a20_mask;
1059 pde = ldq_phys(pde_addr);
1060 if (!(pde & PG_PRESENT_MASK)) {
1061 error_code = 0;
1062 goto do_fault;
1064 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1065 error_code = PG_ERROR_RSVD_MASK;
1066 goto do_fault;
1068 ptep &= pde ^ PG_NX_MASK;
1069 if (pde & PG_PSE_MASK) {
1070 /* 2 MB page */
1071 page_size = 2048 * 1024;
1072 ptep ^= PG_NX_MASK;
1073 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1074 goto do_fault_protect;
1075 if (is_user) {
1076 if (!(ptep & PG_USER_MASK))
1077 goto do_fault_protect;
1078 if (is_write && !(ptep & PG_RW_MASK))
1079 goto do_fault_protect;
1080 } else {
1081 if ((env->cr[0] & CR0_WP_MASK) &&
1082 is_write && !(ptep & PG_RW_MASK))
1083 goto do_fault_protect;
1085 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1086 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1087 pde |= PG_ACCESSED_MASK;
1088 if (is_dirty)
1089 pde |= PG_DIRTY_MASK;
1090 stl_phys_notdirty(pde_addr, pde);
1092 /* align to page_size */
1093 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1094 virt_addr = addr & ~(page_size - 1);
1095 } else {
1096 /* 4 KB page */
1097 if (!(pde & PG_ACCESSED_MASK)) {
1098 pde |= PG_ACCESSED_MASK;
1099 stl_phys_notdirty(pde_addr, pde);
1101 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1102 env->a20_mask;
1103 pte = ldq_phys(pte_addr);
1104 if (!(pte & PG_PRESENT_MASK)) {
1105 error_code = 0;
1106 goto do_fault;
1108 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1109 error_code = PG_ERROR_RSVD_MASK;
1110 goto do_fault;
1112 /* combine pde and pte nx, user and rw protections */
1113 ptep &= pte ^ PG_NX_MASK;
1114 ptep ^= PG_NX_MASK;
1115 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1116 goto do_fault_protect;
1117 if (is_user) {
1118 if (!(ptep & PG_USER_MASK))
1119 goto do_fault_protect;
1120 if (is_write && !(ptep & PG_RW_MASK))
1121 goto do_fault_protect;
1122 } else {
1123 if ((env->cr[0] & CR0_WP_MASK) &&
1124 is_write && !(ptep & PG_RW_MASK))
1125 goto do_fault_protect;
1127 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1128 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1129 pte |= PG_ACCESSED_MASK;
1130 if (is_dirty)
1131 pte |= PG_DIRTY_MASK;
1132 stl_phys_notdirty(pte_addr, pte);
1134 page_size = 4096;
1135 virt_addr = addr & ~0xfff;
1136 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1138 } else {
1139 uint32_t pde;
1141 /* page directory entry */
1142 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1143 env->a20_mask;
1144 pde = ldl_phys(pde_addr);
1145 if (!(pde & PG_PRESENT_MASK)) {
1146 error_code = 0;
1147 goto do_fault;
1149 /* if PSE bit is set, then we use a 4MB page */
1150 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1151 page_size = 4096 * 1024;
1152 if (is_user) {
1153 if (!(pde & PG_USER_MASK))
1154 goto do_fault_protect;
1155 if (is_write && !(pde & PG_RW_MASK))
1156 goto do_fault_protect;
1157 } else {
1158 if ((env->cr[0] & CR0_WP_MASK) &&
1159 is_write && !(pde & PG_RW_MASK))
1160 goto do_fault_protect;
1162 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1163 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1164 pde |= PG_ACCESSED_MASK;
1165 if (is_dirty)
1166 pde |= PG_DIRTY_MASK;
1167 stl_phys_notdirty(pde_addr, pde);
1170 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1171 ptep = pte;
1172 virt_addr = addr & ~(page_size - 1);
1173 } else {
1174 if (!(pde & PG_ACCESSED_MASK)) {
1175 pde |= PG_ACCESSED_MASK;
1176 stl_phys_notdirty(pde_addr, pde);
1179 /* page directory entry */
1180 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1181 env->a20_mask;
1182 pte = ldl_phys(pte_addr);
1183 if (!(pte & PG_PRESENT_MASK)) {
1184 error_code = 0;
1185 goto do_fault;
1187 /* combine pde and pte user and rw protections */
1188 ptep = pte & pde;
1189 if (is_user) {
1190 if (!(ptep & PG_USER_MASK))
1191 goto do_fault_protect;
1192 if (is_write && !(ptep & PG_RW_MASK))
1193 goto do_fault_protect;
1194 } else {
1195 if ((env->cr[0] & CR0_WP_MASK) &&
1196 is_write && !(ptep & PG_RW_MASK))
1197 goto do_fault_protect;
1199 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1200 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1201 pte |= PG_ACCESSED_MASK;
1202 if (is_dirty)
1203 pte |= PG_DIRTY_MASK;
1204 stl_phys_notdirty(pte_addr, pte);
1206 page_size = 4096;
1207 virt_addr = addr & ~0xfff;
1210 /* the page can be put in the TLB */
1211 prot = PAGE_READ;
1212 if (!(ptep & PG_NX_MASK))
1213 prot |= PAGE_EXEC;
1214 if (pte & PG_DIRTY_MASK) {
1215 /* only set write access if already dirty... otherwise wait
1216 for dirty access */
1217 if (is_user) {
1218 if (ptep & PG_RW_MASK)
1219 prot |= PAGE_WRITE;
1220 } else {
1221 if (!(env->cr[0] & CR0_WP_MASK) ||
1222 (ptep & PG_RW_MASK))
1223 prot |= PAGE_WRITE;
1226 do_mapping:
1227 pte = pte & env->a20_mask;
1229 /* Even if 4MB pages, we map only one 4KB page in the cache to
1230 avoid filling it too fast */
1231 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1232 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1233 vaddr = virt_addr + page_offset;
1235 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1236 return ret;
1237 do_fault_protect:
1238 error_code = PG_ERROR_P_MASK;
1239 do_fault:
1240 error_code |= (is_write << PG_ERROR_W_BIT);
1241 if (is_user)
1242 error_code |= PG_ERROR_U_MASK;
1243 if (is_write1 == 2 &&
1244 (env->efer & MSR_EFER_NXE) &&
1245 (env->cr[4] & CR4_PAE_MASK))
1246 error_code |= PG_ERROR_I_D_MASK;
1247 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1248 /* cr2 is not modified in case of exceptions */
1249 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1250 addr);
1251 } else {
1252 env->cr[2] = addr;
1254 env->error_code = error_code;
1255 env->exception_index = EXCP0E_PAGE;
1256 return 1;
1259 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1261 target_ulong pde_addr, pte_addr;
1262 uint64_t pte;
1263 target_phys_addr_t paddr;
1264 uint32_t page_offset;
1265 int page_size;
1267 if (env->cr[4] & CR4_PAE_MASK) {
1268 target_ulong pdpe_addr;
1269 uint64_t pde, pdpe;
1271 #ifdef TARGET_X86_64
1272 if (env->hflags & HF_LMA_MASK) {
1273 uint64_t pml4e_addr, pml4e;
1274 int32_t sext;
1276 /* test virtual address sign extension */
1277 sext = (int64_t)addr >> 47;
1278 if (sext != 0 && sext != -1)
1279 return -1;
1281 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1282 env->a20_mask;
1283 pml4e = ldq_phys(pml4e_addr);
1284 if (!(pml4e & PG_PRESENT_MASK))
1285 return -1;
1287 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1288 env->a20_mask;
1289 pdpe = ldq_phys(pdpe_addr);
1290 if (!(pdpe & PG_PRESENT_MASK))
1291 return -1;
1292 } else
1293 #endif
1295 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1296 env->a20_mask;
1297 pdpe = ldq_phys(pdpe_addr);
1298 if (!(pdpe & PG_PRESENT_MASK))
1299 return -1;
1302 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1303 env->a20_mask;
1304 pde = ldq_phys(pde_addr);
1305 if (!(pde & PG_PRESENT_MASK)) {
1306 return -1;
1308 if (pde & PG_PSE_MASK) {
1309 /* 2 MB page */
1310 page_size = 2048 * 1024;
1311 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1312 } else {
1313 /* 4 KB page */
1314 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1315 env->a20_mask;
1316 page_size = 4096;
1317 pte = ldq_phys(pte_addr);
1319 if (!(pte & PG_PRESENT_MASK))
1320 return -1;
1321 } else {
1322 uint32_t pde;
1324 if (!(env->cr[0] & CR0_PG_MASK)) {
1325 pte = addr;
1326 page_size = 4096;
1327 } else {
1328 /* page directory entry */
1329 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1330 pde = ldl_phys(pde_addr);
1331 if (!(pde & PG_PRESENT_MASK))
1332 return -1;
1333 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1334 pte = pde & ~0x003ff000; /* align to 4MB */
1335 page_size = 4096 * 1024;
1336 } else {
1337 /* page directory entry */
1338 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1339 pte = ldl_phys(pte_addr);
1340 if (!(pte & PG_PRESENT_MASK))
1341 return -1;
1342 page_size = 4096;
1345 pte = pte & env->a20_mask;
1348 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1349 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1350 return paddr;
1353 void hw_breakpoint_insert(CPUState *env, int index)
1355 int type, err = 0;
1357 switch (hw_breakpoint_type(env->dr[7], index)) {
1358 case 0:
1359 if (hw_breakpoint_enabled(env->dr[7], index))
1360 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1361 &env->cpu_breakpoint[index]);
1362 break;
1363 case 1:
1364 type = BP_CPU | BP_MEM_WRITE;
1365 goto insert_wp;
1366 case 2:
1367 /* No support for I/O watchpoints yet */
1368 break;
1369 case 3:
1370 type = BP_CPU | BP_MEM_ACCESS;
1371 insert_wp:
1372 err = cpu_watchpoint_insert(env, env->dr[index],
1373 hw_breakpoint_len(env->dr[7], index),
1374 type, &env->cpu_watchpoint[index]);
1375 break;
1377 if (err)
1378 env->cpu_breakpoint[index] = NULL;
1381 void hw_breakpoint_remove(CPUState *env, int index)
1383 if (!env->cpu_breakpoint[index])
1384 return;
1385 switch (hw_breakpoint_type(env->dr[7], index)) {
1386 case 0:
1387 if (hw_breakpoint_enabled(env->dr[7], index))
1388 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1389 break;
1390 case 1:
1391 case 3:
1392 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1393 break;
1394 case 2:
1395 /* No support for I/O watchpoints yet */
1396 break;
1400 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1402 target_ulong dr6;
1403 int reg, type;
1404 int hit_enabled = 0;
1406 dr6 = env->dr[6] & ~0xf;
1407 for (reg = 0; reg < 4; reg++) {
1408 type = hw_breakpoint_type(env->dr[7], reg);
1409 if ((type == 0 && env->dr[reg] == env->eip) ||
1410 ((type & 1) && env->cpu_watchpoint[reg] &&
1411 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1412 dr6 |= 1 << reg;
1413 if (hw_breakpoint_enabled(env->dr[7], reg))
1414 hit_enabled = 1;
1417 if (hit_enabled || force_dr6_update)
1418 env->dr[6] = dr6;
1419 return hit_enabled;
1422 static CPUDebugExcpHandler *prev_debug_excp_handler;
1424 void raise_exception(int exception_index);
1426 static void breakpoint_handler(CPUState *env)
1428 CPUBreakpoint *bp;
1430 if (env->watchpoint_hit) {
1431 if (env->watchpoint_hit->flags & BP_CPU) {
1432 env->watchpoint_hit = NULL;
1433 if (check_hw_breakpoints(env, 0))
1434 raise_exception(EXCP01_DB);
1435 else
1436 cpu_resume_from_signal(env, NULL);
1438 } else {
1439 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1440 if (bp->pc == env->eip) {
1441 if (bp->flags & BP_CPU) {
1442 check_hw_breakpoints(env, 1);
1443 raise_exception(EXCP01_DB);
1445 break;
1448 if (prev_debug_excp_handler)
1449 prev_debug_excp_handler(env);
1451 #endif /* !CONFIG_USER_ONLY */
1453 static void host_cpuid(uint32_t function, uint32_t count,
1454 uint32_t *eax, uint32_t *ebx,
1455 uint32_t *ecx, uint32_t *edx)
1457 #if defined(CONFIG_KVM) || defined(USE_KVM)
1458 uint32_t vec[4];
1460 #ifdef __x86_64__
1461 asm volatile("cpuid"
1462 : "=a"(vec[0]), "=b"(vec[1]),
1463 "=c"(vec[2]), "=d"(vec[3])
1464 : "0"(function), "c"(count) : "cc");
1465 #else
1466 asm volatile("pusha \n\t"
1467 "cpuid \n\t"
1468 "mov %%eax, 0(%2) \n\t"
1469 "mov %%ebx, 4(%2) \n\t"
1470 "mov %%ecx, 8(%2) \n\t"
1471 "mov %%edx, 12(%2) \n\t"
1472 "popa"
1473 : : "a"(function), "c"(count), "S"(vec)
1474 : "memory", "cc");
1475 #endif
1477 if (eax)
1478 *eax = vec[0];
1479 if (ebx)
1480 *ebx = vec[1];
1481 if (ecx)
1482 *ecx = vec[2];
1483 if (edx)
1484 *edx = vec[3];
1485 #endif
1488 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1489 uint32_t *eax, uint32_t *ebx,
1490 uint32_t *ecx, uint32_t *edx)
1492 /* test if maximum index reached */
1493 if (index & 0x80000000) {
1494 if (index > env->cpuid_xlevel)
1495 index = env->cpuid_level;
1496 } else {
1497 if (index > env->cpuid_level)
1498 index = env->cpuid_level;
1501 switch(index) {
1502 case 0:
1503 *eax = env->cpuid_level;
1504 *ebx = env->cpuid_vendor1;
1505 *edx = env->cpuid_vendor2;
1506 *ecx = env->cpuid_vendor3;
1508 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1509 * isn't supported in compatibility mode on Intel. so advertise the
1510 * actuall cpu, and say goodbye to migration between different vendors
1511 * is you use compatibility mode. */
1512 if (kvm_enabled())
1513 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1514 break;
1515 case 1:
1516 *eax = env->cpuid_version;
1517 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1518 *ecx = env->cpuid_ext_features;
1519 *edx = env->cpuid_features;
1521 /* "Hypervisor present" bit required for Microsoft SVVP */
1522 if (kvm_enabled())
1523 *ecx |= (1 << 31);
1524 break;
1525 case 2:
1526 /* cache info: needed for Pentium Pro compatibility */
1527 *eax = 1;
1528 *ebx = 0;
1529 *ecx = 0;
1530 *edx = 0x2c307d;
1531 break;
1532 case 4:
1533 /* cache info: needed for Core compatibility */
1534 switch (count) {
1535 case 0: /* L1 dcache info */
1536 *eax = 0x0000121;
1537 *ebx = 0x1c0003f;
1538 *ecx = 0x000003f;
1539 *edx = 0x0000001;
1540 break;
1541 case 1: /* L1 icache info */
1542 *eax = 0x0000122;
1543 *ebx = 0x1c0003f;
1544 *ecx = 0x000003f;
1545 *edx = 0x0000001;
1546 break;
1547 case 2: /* L2 cache info */
1548 *eax = 0x0000143;
1549 *ebx = 0x3c0003f;
1550 *ecx = 0x0000fff;
1551 *edx = 0x0000001;
1552 break;
1553 default: /* end of info */
1554 *eax = 0;
1555 *ebx = 0;
1556 *ecx = 0;
1557 *edx = 0;
1558 break;
1560 break;
1561 case 5:
1562 /* mwait info: needed for Core compatibility */
1563 *eax = 0; /* Smallest monitor-line size in bytes */
1564 *ebx = 0; /* Largest monitor-line size in bytes */
1565 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1566 *edx = 0;
1567 break;
1568 case 6:
1569 /* Thermal and Power Leaf */
1570 *eax = 0;
1571 *ebx = 0;
1572 *ecx = 0;
1573 *edx = 0;
1574 break;
1575 case 9:
1576 /* Direct Cache Access Information Leaf */
1577 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1578 *ebx = 0;
1579 *ecx = 0;
1580 *edx = 0;
1581 break;
1582 case 0xA:
1583 /* Architectural Performance Monitoring Leaf */
1584 *eax = 0;
1585 *ebx = 0;
1586 *ecx = 0;
1587 *edx = 0;
1588 break;
1589 case 0x80000000:
1590 *eax = env->cpuid_xlevel;
1591 *ebx = env->cpuid_vendor1;
1592 *edx = env->cpuid_vendor2;
1593 *ecx = env->cpuid_vendor3;
1594 break;
1595 case 0x80000001:
1596 *eax = env->cpuid_features;
1597 *ebx = 0;
1598 *ecx = env->cpuid_ext3_features;
1599 *edx = env->cpuid_ext2_features;
1601 if (kvm_enabled()) {
1602 uint32_t h_eax, h_edx;
1604 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1606 /* disable CPU features that the host does not support */
1608 /* long mode */
1609 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1610 *edx &= ~0x20000000;
1611 /* syscall */
1612 if ((h_edx & 0x00000800) == 0)
1613 *edx &= ~0x00000800;
1614 /* nx */
1615 if ((h_edx & 0x00100000) == 0)
1616 *edx &= ~0x00100000;
1618 /* disable CPU features that KVM cannot support */
1620 /* svm */
1621 if (!kvm_nested)
1622 *ecx &= ~4UL;
1623 /* 3dnow */
1624 *edx &= ~0xc0000000;
1626 break;
1627 case 0x80000002:
1628 case 0x80000003:
1629 case 0x80000004:
1630 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1631 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1632 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1633 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1634 break;
1635 case 0x80000005:
1636 /* cache info (L1 cache) */
1637 *eax = 0x01ff01ff;
1638 *ebx = 0x01ff01ff;
1639 *ecx = 0x40020140;
1640 *edx = 0x40020140;
1641 break;
1642 case 0x80000006:
1643 /* cache info (L2 cache) */
1644 *eax = 0;
1645 *ebx = 0x42004200;
1646 *ecx = 0x02008140;
1647 *edx = 0;
1648 break;
1649 case 0x80000008:
1650 /* virtual & phys address size in low 2 bytes. */
1651 /* XXX: This value must match the one used in the MMU code. */
1652 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1653 /* 64 bit processor */
1654 #if defined(CONFIG_KQEMU)
1655 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1656 #else
1657 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1658 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1659 #endif
1660 } else {
1661 #if defined(CONFIG_KQEMU)
1662 *eax = 0x00000020; /* 32 bits physical */
1663 #else
1664 if (env->cpuid_features & CPUID_PSE36)
1665 *eax = 0x00000024; /* 36 bits physical */
1666 else
1667 *eax = 0x00000020; /* 32 bits physical */
1668 #endif
1670 *ebx = 0;
1671 *ecx = 0;
1672 *edx = 0;
1673 break;
1674 case 0x8000000A:
1675 *eax = 0x00000001; /* SVM Revision */
1676 *ebx = 0x00000010; /* nr of ASIDs */
1677 *ecx = 0;
1678 *edx = 0; /* optional features */
1679 break;
1680 default:
1681 /* reserved values: zero */
1682 *eax = 0;
1683 *ebx = 0;
1684 *ecx = 0;
1685 *edx = 0;
1686 break;
1690 CPUX86State *cpu_x86_init(const char *cpu_model)
1692 CPUX86State *env;
1693 static int inited;
1695 env = qemu_mallocz(sizeof(CPUX86State));
1696 cpu_exec_init(env);
1697 env->cpu_model_str = cpu_model;
1699 /* init various static tables */
1700 if (!inited) {
1701 inited = 1;
1702 optimize_flags_init();
1703 #ifndef CONFIG_USER_ONLY
1704 prev_debug_excp_handler =
1705 cpu_set_debug_excp_handler(breakpoint_handler);
1706 #endif
1708 if (cpu_x86_register(env, cpu_model) < 0) {
1709 cpu_x86_close(env);
1710 return NULL;
1712 cpu_reset(env);
1713 #ifdef CONFIG_KQEMU
1714 kqemu_init(env);
1715 #endif
1717 qemu_init_vcpu(env);
1719 if (kvm_enabled()) {
1720 kvm_trim_features(&env->cpuid_features,
1721 kvm_arch_get_supported_cpuid(env, 1, R_EDX),
1722 feature_name);
1723 kvm_trim_features(&env->cpuid_ext_features,
1724 kvm_arch_get_supported_cpuid(env, 1, R_ECX),
1725 ext_feature_name);
1726 kvm_trim_features(&env->cpuid_ext2_features,
1727 kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX),
1728 ext2_feature_name);
1729 kvm_trim_features(&env->cpuid_ext3_features,
1730 kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX),
1731 ext3_feature_name);
1734 return env;