Merge branch 'stable-0.10' of git://git.sv.gnu.org/qemu into stable-0.10
[qemu-kvm/fedora.git] / target-i386 / helper.c
blobc783d6e71d25e9525577b1f7a8988c680c59f080
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 #include "qemu-kvm.h"
35 //#define DEBUG_MMU
37 /* feature flags taken from "Intel Processor Identification and the CPUID
38 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
39 * about feature names, the Linux name is used. */
40 static const char *feature_name[] = {
41 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
42 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
43 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
44 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
46 static const char *ext_feature_name[] = {
47 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
48 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
49 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
52 static const char *ext2_feature_name[] = {
53 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
54 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
55 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
56 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
58 static const char *ext3_feature_name[] = {
59 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
60 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
61 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
65 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
66 uint32_t *ext_features,
67 uint32_t *ext2_features,
68 uint32_t *ext3_features)
70 int i;
71 int found = 0;
73 for ( i = 0 ; i < 32 ; i++ )
74 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
75 *features |= 1 << i;
76 found = 1;
78 for ( i = 0 ; i < 32 ; i++ )
79 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
80 *ext_features |= 1 << i;
81 found = 1;
83 for ( i = 0 ; i < 32 ; i++ )
84 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
85 *ext2_features |= 1 << i;
86 found = 1;
88 for ( i = 0 ; i < 32 ; i++ )
89 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
90 *ext3_features |= 1 << i;
91 found = 1;
93 if (!found) {
94 fprintf(stderr, "CPU feature %s not found\n", flagname);
98 static void kvm_trim_features(uint32_t *features, uint32_t supported,
99 const char *names[])
101 int i;
102 uint32_t mask;
104 for (i = 0; i < 32; ++i) {
105 mask = 1U << i;
106 if ((*features & mask) && !(supported & mask)) {
107 *features &= ~mask;
112 extern const char *cpu_vendor_string;
114 typedef struct x86_def_t {
115 const char *name;
116 uint32_t level;
117 uint32_t vendor1, vendor2, vendor3;
118 int family;
119 int model;
120 int stepping;
121 uint32_t features, ext_features, ext2_features, ext3_features;
122 uint32_t xlevel;
123 char model_id[48];
124 } x86_def_t;
126 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
127 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
128 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
129 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
130 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
131 CPUID_PSE36 | CPUID_FXSR)
132 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
133 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
134 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
135 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
136 CPUID_PAE | CPUID_SEP | CPUID_APIC)
137 static x86_def_t x86_defs[] = {
138 #ifdef TARGET_X86_64
140 .name = "qemu64",
141 .level = 2,
142 .vendor1 = CPUID_VENDOR_AMD_1,
143 .vendor2 = CPUID_VENDOR_AMD_2,
144 .vendor3 = CPUID_VENDOR_AMD_3,
145 .family = 6,
146 .model = 2,
147 .stepping = 3,
148 .features = PPRO_FEATURES |
149 /* these features are needed for Win64 and aren't fully implemented */
150 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
151 /* this feature is needed for Solaris and isn't fully implemented */
152 CPUID_PSE36,
153 .ext_features = CPUID_EXT_SSE3,
154 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
155 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
156 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
157 .ext3_features = CPUID_EXT3_SVM,
158 .xlevel = 0x8000000A,
159 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
162 .name = "phenom",
163 .level = 5,
164 .vendor1 = CPUID_VENDOR_AMD_1,
165 .vendor2 = CPUID_VENDOR_AMD_2,
166 .vendor3 = CPUID_VENDOR_AMD_3,
167 .family = 16,
168 .model = 2,
169 .stepping = 3,
170 /* Missing: CPUID_VME, CPUID_HT */
171 .features = PPRO_FEATURES |
172 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
173 CPUID_PSE36,
174 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
175 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
176 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
177 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
178 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
179 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
180 CPUID_EXT2_FFXSR,
181 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
182 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
183 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
184 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
185 .ext3_features = CPUID_EXT3_SVM,
186 .xlevel = 0x8000001A,
187 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
190 .name = "core2duo",
191 .level = 10,
192 .family = 6,
193 .model = 15,
194 .stepping = 11,
195 /* The original CPU also implements these features:
196 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
197 CPUID_TM, CPUID_PBE */
198 .features = PPRO_FEATURES |
199 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
200 CPUID_PSE36,
201 /* The original CPU also implements these ext features:
202 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
203 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
204 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
205 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
206 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
207 .xlevel = 0x80000008,
208 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
210 #endif
212 .name = "qemu32",
213 .level = 2,
214 .family = 6,
215 .model = 3,
216 .stepping = 3,
217 .features = PPRO_FEATURES,
218 .ext_features = CPUID_EXT_SSE3,
219 .xlevel = 0,
220 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
223 .name = "coreduo",
224 .level = 10,
225 .family = 6,
226 .model = 14,
227 .stepping = 8,
228 /* The original CPU also implements these features:
229 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
230 CPUID_TM, CPUID_PBE */
231 .features = PPRO_FEATURES | CPUID_VME |
232 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
233 /* The original CPU also implements these ext features:
234 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
235 CPUID_EXT_PDCM */
236 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
237 .ext2_features = CPUID_EXT2_NX,
238 .xlevel = 0x80000008,
239 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
242 .name = "486",
243 .level = 0,
244 .family = 4,
245 .model = 0,
246 .stepping = 0,
247 .features = I486_FEATURES,
248 .xlevel = 0,
251 .name = "pentium",
252 .level = 1,
253 .family = 5,
254 .model = 4,
255 .stepping = 3,
256 .features = PENTIUM_FEATURES,
257 .xlevel = 0,
260 .name = "pentium2",
261 .level = 2,
262 .family = 6,
263 .model = 5,
264 .stepping = 2,
265 .features = PENTIUM2_FEATURES,
266 .xlevel = 0,
269 .name = "pentium3",
270 .level = 2,
271 .family = 6,
272 .model = 7,
273 .stepping = 3,
274 .features = PENTIUM3_FEATURES,
275 .xlevel = 0,
278 .name = "athlon",
279 .level = 2,
280 .vendor1 = 0x68747541, /* "Auth" */
281 .vendor2 = 0x69746e65, /* "enti" */
282 .vendor3 = 0x444d4163, /* "cAMD" */
283 .family = 6,
284 .model = 2,
285 .stepping = 3,
286 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
287 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
288 .xlevel = 0x80000008,
289 /* XXX: put another string ? */
290 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
293 .name = "n270",
294 /* original is on level 10 */
295 .level = 5,
296 .family = 6,
297 .model = 28,
298 .stepping = 2,
299 .features = PPRO_FEATURES |
300 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
301 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
302 * CPUID_HT | CPUID_TM | CPUID_PBE */
303 /* Some CPUs got no CPUID_SEP */
304 .ext_features = CPUID_EXT_MONITOR |
305 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
306 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
307 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
308 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
309 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
310 .xlevel = 0x8000000A,
311 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
315 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
317 unsigned int i;
318 x86_def_t *def;
320 char *s = strdup(cpu_model);
321 char *featurestr, *name = strtok(s, ",");
322 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
323 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
324 int family = -1, model = -1, stepping = -1;
326 def = NULL;
327 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
328 if (strcmp(name, x86_defs[i].name) == 0) {
329 def = &x86_defs[i];
330 break;
333 if (!def)
334 goto error;
335 memcpy(x86_cpu_def, def, sizeof(*def));
337 featurestr = strtok(NULL, ",");
339 while (featurestr) {
340 char *val;
341 if (featurestr[0] == '+') {
342 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
343 } else if (featurestr[0] == '-') {
344 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
345 } else if ((val = strchr(featurestr, '='))) {
346 *val = 0; val++;
347 if (!strcmp(featurestr, "family")) {
348 char *err;
349 family = strtol(val, &err, 10);
350 if (!*val || *err || family < 0) {
351 fprintf(stderr, "bad numerical value %s\n", val);
352 goto error;
354 x86_cpu_def->family = family;
355 } else if (!strcmp(featurestr, "model")) {
356 char *err;
357 model = strtol(val, &err, 10);
358 if (!*val || *err || model < 0 || model > 0xff) {
359 fprintf(stderr, "bad numerical value %s\n", val);
360 goto error;
362 x86_cpu_def->model = model;
363 } else if (!strcmp(featurestr, "stepping")) {
364 char *err;
365 stepping = strtol(val, &err, 10);
366 if (!*val || *err || stepping < 0 || stepping > 0xf) {
367 fprintf(stderr, "bad numerical value %s\n", val);
368 goto error;
370 x86_cpu_def->stepping = stepping;
371 } else if (!strcmp(featurestr, "vendor")) {
372 if (strlen(val) != 12) {
373 fprintf(stderr, "vendor string must be 12 chars long\n");
374 goto error;
376 x86_cpu_def->vendor1 = 0;
377 x86_cpu_def->vendor2 = 0;
378 x86_cpu_def->vendor3 = 0;
379 for(i = 0; i < 4; i++) {
380 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
381 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
382 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
384 } else if (!strcmp(featurestr, "model_id")) {
385 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
386 val);
387 } else {
388 fprintf(stderr, "unrecognized feature %s\n", featurestr);
389 goto error;
391 } else {
392 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
393 goto error;
395 featurestr = strtok(NULL, ",");
397 x86_cpu_def->features |= plus_features;
398 x86_cpu_def->ext_features |= plus_ext_features;
399 x86_cpu_def->ext2_features |= plus_ext2_features;
400 x86_cpu_def->ext3_features |= plus_ext3_features;
401 x86_cpu_def->features &= ~minus_features;
402 x86_cpu_def->ext_features &= ~minus_ext_features;
403 x86_cpu_def->ext2_features &= ~minus_ext2_features;
404 x86_cpu_def->ext3_features &= ~minus_ext3_features;
405 free(s);
406 return 0;
408 error:
409 free(s);
410 return -1;
413 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
415 unsigned int i;
417 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
418 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
421 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
423 x86_def_t def1, *def = &def1;
425 if (cpu_x86_find_by_name(def, cpu_model) < 0)
426 return -1;
427 if (def->vendor1) {
428 env->cpuid_vendor1 = def->vendor1;
429 env->cpuid_vendor2 = def->vendor2;
430 env->cpuid_vendor3 = def->vendor3;
431 } else {
432 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
433 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
434 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
436 env->cpuid_level = def->level;
437 if (def->family > 0x0f)
438 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
439 else
440 env->cpuid_version = def->family << 8;
441 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
442 env->cpuid_version |= def->stepping;
443 env->cpuid_features = def->features;
444 env->pat = 0x0007040600070406ULL;
445 env->cpuid_ext_features = def->ext_features;
446 env->cpuid_ext2_features = def->ext2_features;
447 env->cpuid_xlevel = def->xlevel;
448 env->cpuid_ext3_features = def->ext3_features;
450 const char *model_id = def->model_id;
451 int c, len, i;
453 if (cpu_vendor_string != NULL)
454 model_id = cpu_vendor_string;
455 if (!model_id)
456 model_id = "";
457 len = strlen(model_id);
458 for(i = 0; i < 48; i++) {
459 if (i >= len)
460 c = '\0';
461 else
462 c = (uint8_t)model_id[i];
463 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
466 return 0;
469 /* NOTE: must be called outside the CPU execute loop */
470 void cpu_reset(CPUX86State *env)
472 int i;
474 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
475 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
476 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
479 memset(env, 0, offsetof(CPUX86State, breakpoints));
481 tlb_flush(env, 1);
483 env->old_exception = -1;
485 /* init to reset state */
487 #ifdef CONFIG_SOFTMMU
488 env->hflags |= HF_SOFTMMU_MASK;
489 #endif
490 env->hflags2 |= HF2_GIF_MASK;
492 cpu_x86_update_cr0(env, 0x60000010);
493 env->a20_mask = ~0x0;
494 env->smbase = 0x30000;
496 env->idt.limit = 0xffff;
497 env->gdt.limit = 0xffff;
498 env->ldt.limit = 0xffff;
499 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
500 env->tr.limit = 0xffff;
501 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
503 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
504 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
505 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
506 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
507 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
508 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
509 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
510 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
511 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
512 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
513 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
514 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
516 env->eip = 0xfff0;
517 env->regs[R_EDX] = env->cpuid_version;
519 env->eflags = 0x2;
521 /* FPU init */
522 for(i = 0;i < 8; i++)
523 env->fptags[i] = 1;
524 env->fpuc = 0x37f;
526 env->mxcsr = 0x1f80;
528 memset(env->dr, 0, sizeof(env->dr));
529 env->dr[6] = DR6_FIXED_1;
530 env->dr[7] = DR7_FIXED_1;
531 cpu_breakpoint_remove_all(env, BP_CPU);
532 cpu_watchpoint_remove_all(env, BP_CPU);
535 void cpu_x86_close(CPUX86State *env)
537 qemu_free(env);
540 /***********************************************************/
541 /* x86 debug */
543 static const char *cc_op_str[] = {
544 "DYNAMIC",
545 "EFLAGS",
547 "MULB",
548 "MULW",
549 "MULL",
550 "MULQ",
552 "ADDB",
553 "ADDW",
554 "ADDL",
555 "ADDQ",
557 "ADCB",
558 "ADCW",
559 "ADCL",
560 "ADCQ",
562 "SUBB",
563 "SUBW",
564 "SUBL",
565 "SUBQ",
567 "SBBB",
568 "SBBW",
569 "SBBL",
570 "SBBQ",
572 "LOGICB",
573 "LOGICW",
574 "LOGICL",
575 "LOGICQ",
577 "INCB",
578 "INCW",
579 "INCL",
580 "INCQ",
582 "DECB",
583 "DECW",
584 "DECL",
585 "DECQ",
587 "SHLB",
588 "SHLW",
589 "SHLL",
590 "SHLQ",
592 "SARB",
593 "SARW",
594 "SARL",
595 "SARQ",
598 void cpu_dump_state(CPUState *env, FILE *f,
599 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
600 int flags)
602 int eflags, i, nb;
603 char cc_op_name[32];
604 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
606 if (kvm_enabled())
607 kvm_arch_get_registers(env);
609 eflags = env->eflags;
610 #ifdef TARGET_X86_64
611 if (env->hflags & HF_CS64_MASK) {
612 cpu_fprintf(f,
613 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
614 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
615 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
616 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
617 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
618 env->regs[R_EAX],
619 env->regs[R_EBX],
620 env->regs[R_ECX],
621 env->regs[R_EDX],
622 env->regs[R_ESI],
623 env->regs[R_EDI],
624 env->regs[R_EBP],
625 env->regs[R_ESP],
626 env->regs[8],
627 env->regs[9],
628 env->regs[10],
629 env->regs[11],
630 env->regs[12],
631 env->regs[13],
632 env->regs[14],
633 env->regs[15],
634 env->eip, eflags,
635 eflags & DF_MASK ? 'D' : '-',
636 eflags & CC_O ? 'O' : '-',
637 eflags & CC_S ? 'S' : '-',
638 eflags & CC_Z ? 'Z' : '-',
639 eflags & CC_A ? 'A' : '-',
640 eflags & CC_P ? 'P' : '-',
641 eflags & CC_C ? 'C' : '-',
642 env->hflags & HF_CPL_MASK,
643 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
644 (int)(env->a20_mask >> 20) & 1,
645 (env->hflags >> HF_SMM_SHIFT) & 1,
646 env->halted);
647 } else
648 #endif
650 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
651 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
652 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
653 (uint32_t)env->regs[R_EAX],
654 (uint32_t)env->regs[R_EBX],
655 (uint32_t)env->regs[R_ECX],
656 (uint32_t)env->regs[R_EDX],
657 (uint32_t)env->regs[R_ESI],
658 (uint32_t)env->regs[R_EDI],
659 (uint32_t)env->regs[R_EBP],
660 (uint32_t)env->regs[R_ESP],
661 (uint32_t)env->eip, eflags,
662 eflags & DF_MASK ? 'D' : '-',
663 eflags & CC_O ? 'O' : '-',
664 eflags & CC_S ? 'S' : '-',
665 eflags & CC_Z ? 'Z' : '-',
666 eflags & CC_A ? 'A' : '-',
667 eflags & CC_P ? 'P' : '-',
668 eflags & CC_C ? 'C' : '-',
669 env->hflags & HF_CPL_MASK,
670 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
671 (int)(env->a20_mask >> 20) & 1,
672 (env->hflags >> HF_SMM_SHIFT) & 1,
673 env->halted);
676 #ifdef TARGET_X86_64
677 if (env->hflags & HF_LMA_MASK) {
678 for(i = 0; i < 6; i++) {
679 SegmentCache *sc = &env->segs[i];
680 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
681 seg_name[i],
682 sc->selector,
683 sc->base,
684 sc->limit,
685 sc->flags);
687 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
688 env->ldt.selector,
689 env->ldt.base,
690 env->ldt.limit,
691 env->ldt.flags);
692 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
693 env->tr.selector,
694 env->tr.base,
695 env->tr.limit,
696 env->tr.flags);
697 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
698 env->gdt.base, env->gdt.limit);
699 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
700 env->idt.base, env->idt.limit);
701 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
702 (uint32_t)env->cr[0],
703 env->cr[2],
704 env->cr[3],
705 (uint32_t)env->cr[4]);
706 for(i = 0; i < 4; i++)
707 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
708 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
709 env->dr[6], env->dr[7]);
710 } else
711 #endif
713 for(i = 0; i < 6; i++) {
714 SegmentCache *sc = &env->segs[i];
715 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
716 seg_name[i],
717 sc->selector,
718 (uint32_t)sc->base,
719 sc->limit,
720 sc->flags);
722 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
723 env->ldt.selector,
724 (uint32_t)env->ldt.base,
725 env->ldt.limit,
726 env->ldt.flags);
727 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
728 env->tr.selector,
729 (uint32_t)env->tr.base,
730 env->tr.limit,
731 env->tr.flags);
732 cpu_fprintf(f, "GDT= %08x %08x\n",
733 (uint32_t)env->gdt.base, env->gdt.limit);
734 cpu_fprintf(f, "IDT= %08x %08x\n",
735 (uint32_t)env->idt.base, env->idt.limit);
736 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
737 (uint32_t)env->cr[0],
738 (uint32_t)env->cr[2],
739 (uint32_t)env->cr[3],
740 (uint32_t)env->cr[4]);
741 for(i = 0; i < 4; i++)
742 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
743 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
745 if (flags & X86_DUMP_CCOP) {
746 if ((unsigned)env->cc_op < CC_OP_NB)
747 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
748 else
749 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
750 #ifdef TARGET_X86_64
751 if (env->hflags & HF_CS64_MASK) {
752 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
753 env->cc_src, env->cc_dst,
754 cc_op_name);
755 } else
756 #endif
758 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
759 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
760 cc_op_name);
763 if (flags & X86_DUMP_FPU) {
764 int fptag;
765 fptag = 0;
766 for(i = 0; i < 8; i++) {
767 fptag |= ((!env->fptags[i]) << i);
769 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
770 env->fpuc,
771 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
772 env->fpstt,
773 fptag,
774 env->mxcsr);
775 for(i=0;i<8;i++) {
776 #if defined(USE_X86LDOUBLE)
777 union {
778 long double d;
779 struct {
780 uint64_t lower;
781 uint16_t upper;
782 } l;
783 } tmp;
784 tmp.d = env->fpregs[i].d;
785 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
786 i, tmp.l.lower, tmp.l.upper);
787 #else
788 cpu_fprintf(f, "FPR%d=%016" PRIx64,
789 i, env->fpregs[i].mmx.q);
790 #endif
791 if ((i & 1) == 1)
792 cpu_fprintf(f, "\n");
793 else
794 cpu_fprintf(f, " ");
796 if (env->hflags & HF_CS64_MASK)
797 nb = 16;
798 else
799 nb = 8;
800 for(i=0;i<nb;i++) {
801 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
803 env->xmm_regs[i].XMM_L(3),
804 env->xmm_regs[i].XMM_L(2),
805 env->xmm_regs[i].XMM_L(1),
806 env->xmm_regs[i].XMM_L(0));
807 if ((i & 1) == 1)
808 cpu_fprintf(f, "\n");
809 else
810 cpu_fprintf(f, " ");
815 /***********************************************************/
816 /* x86 mmu */
817 /* XXX: add PGE support */
819 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
821 a20_state = (a20_state != 0);
822 if (a20_state != ((env->a20_mask >> 20) & 1)) {
823 #if defined(DEBUG_MMU)
824 printf("A20 update: a20=%d\n", a20_state);
825 #endif
826 /* if the cpu is currently executing code, we must unlink it and
827 all the potentially executing TB */
828 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
830 /* when a20 is changed, all the MMU mappings are invalid, so
831 we must flush everything */
832 tlb_flush(env, 1);
833 env->a20_mask = (~0x100000) | (a20_state << 20);
837 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
839 int pe_state;
841 #if defined(DEBUG_MMU)
842 printf("CR0 update: CR0=0x%08x\n", new_cr0);
843 #endif
844 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
845 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
846 tlb_flush(env, 1);
849 #ifdef TARGET_X86_64
850 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
851 (env->efer & MSR_EFER_LME)) {
852 /* enter in long mode */
853 /* XXX: generate an exception */
854 if (!(env->cr[4] & CR4_PAE_MASK))
855 return;
856 env->efer |= MSR_EFER_LMA;
857 env->hflags |= HF_LMA_MASK;
858 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
859 (env->efer & MSR_EFER_LMA)) {
860 /* exit long mode */
861 env->efer &= ~MSR_EFER_LMA;
862 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
863 env->eip &= 0xffffffff;
865 #endif
866 env->cr[0] = new_cr0 | CR0_ET_MASK;
868 /* update PE flag in hidden flags */
869 pe_state = (env->cr[0] & CR0_PE_MASK);
870 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
871 /* ensure that ADDSEG is always set in real mode */
872 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
873 /* update FPU flags */
874 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
875 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
878 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
879 the PDPT */
880 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
882 env->cr[3] = new_cr3;
883 if (env->cr[0] & CR0_PG_MASK) {
884 #if defined(DEBUG_MMU)
885 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
886 #endif
887 tlb_flush(env, 0);
891 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
893 #if defined(DEBUG_MMU)
894 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
895 #endif
896 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
897 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
898 tlb_flush(env, 1);
900 /* SSE handling */
901 if (!(env->cpuid_features & CPUID_SSE))
902 new_cr4 &= ~CR4_OSFXSR_MASK;
903 if (new_cr4 & CR4_OSFXSR_MASK)
904 env->hflags |= HF_OSFXSR_MASK;
905 else
906 env->hflags &= ~HF_OSFXSR_MASK;
908 env->cr[4] = new_cr4;
911 #if defined(CONFIG_USER_ONLY)
913 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
914 int is_write, int mmu_idx, int is_softmmu)
916 /* user mode only emulation */
917 is_write &= 1;
918 env->cr[2] = addr;
919 env->error_code = (is_write << PG_ERROR_W_BIT);
920 env->error_code |= PG_ERROR_U_MASK;
921 env->exception_index = EXCP0E_PAGE;
922 return 1;
925 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
927 return addr;
930 #else
932 /* XXX: This value should match the one returned by CPUID
933 * and in exec.c */
934 #if defined(USE_KQEMU)
935 #define PHYS_ADDR_MASK 0xfffff000LL
936 #else
937 # if defined(TARGET_X86_64)
938 # define PHYS_ADDR_MASK 0xfffffff000LL
939 # else
940 # define PHYS_ADDR_MASK 0xffffff000LL
941 # endif
942 #endif
944 /* return value:
945 -1 = cannot handle fault
946 0 = nothing more to do
947 1 = generate PF fault
948 2 = soft MMU activation required for this block
950 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
951 int is_write1, int mmu_idx, int is_softmmu)
953 uint64_t ptep, pte;
954 target_ulong pde_addr, pte_addr;
955 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
956 target_phys_addr_t paddr;
957 uint32_t page_offset;
958 target_ulong vaddr, virt_addr;
960 is_user = mmu_idx == MMU_USER_IDX;
961 #if defined(DEBUG_MMU)
962 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
963 addr, is_write1, is_user, env->eip);
964 #endif
965 is_write = is_write1 & 1;
967 if (!(env->cr[0] & CR0_PG_MASK)) {
968 pte = addr;
969 virt_addr = addr & TARGET_PAGE_MASK;
970 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
971 page_size = 4096;
972 goto do_mapping;
975 if (env->cr[4] & CR4_PAE_MASK) {
976 uint64_t pde, pdpe;
977 target_ulong pdpe_addr;
979 #ifdef TARGET_X86_64
980 if (env->hflags & HF_LMA_MASK) {
981 uint64_t pml4e_addr, pml4e;
982 int32_t sext;
984 /* test virtual address sign extension */
985 sext = (int64_t)addr >> 47;
986 if (sext != 0 && sext != -1) {
987 env->error_code = 0;
988 env->exception_index = EXCP0D_GPF;
989 return 1;
992 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
993 env->a20_mask;
994 pml4e = ldq_phys(pml4e_addr);
995 if (!(pml4e & PG_PRESENT_MASK)) {
996 error_code = 0;
997 goto do_fault;
999 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1000 error_code = PG_ERROR_RSVD_MASK;
1001 goto do_fault;
1003 if (!(pml4e & PG_ACCESSED_MASK)) {
1004 pml4e |= PG_ACCESSED_MASK;
1005 stl_phys_notdirty(pml4e_addr, pml4e);
1007 ptep = pml4e ^ PG_NX_MASK;
1008 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1009 env->a20_mask;
1010 pdpe = ldq_phys(pdpe_addr);
1011 if (!(pdpe & PG_PRESENT_MASK)) {
1012 error_code = 0;
1013 goto do_fault;
1015 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1016 error_code = PG_ERROR_RSVD_MASK;
1017 goto do_fault;
1019 ptep &= pdpe ^ PG_NX_MASK;
1020 if (!(pdpe & PG_ACCESSED_MASK)) {
1021 pdpe |= PG_ACCESSED_MASK;
1022 stl_phys_notdirty(pdpe_addr, pdpe);
1024 } else
1025 #endif
1027 /* XXX: load them when cr3 is loaded ? */
1028 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1029 env->a20_mask;
1030 pdpe = ldq_phys(pdpe_addr);
1031 if (!(pdpe & PG_PRESENT_MASK)) {
1032 error_code = 0;
1033 goto do_fault;
1035 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1038 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1039 env->a20_mask;
1040 pde = ldq_phys(pde_addr);
1041 if (!(pde & PG_PRESENT_MASK)) {
1042 error_code = 0;
1043 goto do_fault;
1045 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1046 error_code = PG_ERROR_RSVD_MASK;
1047 goto do_fault;
1049 ptep &= pde ^ PG_NX_MASK;
1050 if (pde & PG_PSE_MASK) {
1051 /* 2 MB page */
1052 page_size = 2048 * 1024;
1053 ptep ^= PG_NX_MASK;
1054 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1055 goto do_fault_protect;
1056 if (is_user) {
1057 if (!(ptep & PG_USER_MASK))
1058 goto do_fault_protect;
1059 if (is_write && !(ptep & PG_RW_MASK))
1060 goto do_fault_protect;
1061 } else {
1062 if ((env->cr[0] & CR0_WP_MASK) &&
1063 is_write && !(ptep & PG_RW_MASK))
1064 goto do_fault_protect;
1066 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1067 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1068 pde |= PG_ACCESSED_MASK;
1069 if (is_dirty)
1070 pde |= PG_DIRTY_MASK;
1071 stl_phys_notdirty(pde_addr, pde);
1073 /* align to page_size */
1074 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1075 virt_addr = addr & ~(page_size - 1);
1076 } else {
1077 /* 4 KB page */
1078 if (!(pde & PG_ACCESSED_MASK)) {
1079 pde |= PG_ACCESSED_MASK;
1080 stl_phys_notdirty(pde_addr, pde);
1082 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1083 env->a20_mask;
1084 pte = ldq_phys(pte_addr);
1085 if (!(pte & PG_PRESENT_MASK)) {
1086 error_code = 0;
1087 goto do_fault;
1089 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1090 error_code = PG_ERROR_RSVD_MASK;
1091 goto do_fault;
1093 /* combine pde and pte nx, user and rw protections */
1094 ptep &= pte ^ PG_NX_MASK;
1095 ptep ^= PG_NX_MASK;
1096 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1097 goto do_fault_protect;
1098 if (is_user) {
1099 if (!(ptep & PG_USER_MASK))
1100 goto do_fault_protect;
1101 if (is_write && !(ptep & PG_RW_MASK))
1102 goto do_fault_protect;
1103 } else {
1104 if ((env->cr[0] & CR0_WP_MASK) &&
1105 is_write && !(ptep & PG_RW_MASK))
1106 goto do_fault_protect;
1108 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1109 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1110 pte |= PG_ACCESSED_MASK;
1111 if (is_dirty)
1112 pte |= PG_DIRTY_MASK;
1113 stl_phys_notdirty(pte_addr, pte);
1115 page_size = 4096;
1116 virt_addr = addr & ~0xfff;
1117 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1119 } else {
1120 uint32_t pde;
1122 /* page directory entry */
1123 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1124 env->a20_mask;
1125 pde = ldl_phys(pde_addr);
1126 if (!(pde & PG_PRESENT_MASK)) {
1127 error_code = 0;
1128 goto do_fault;
1130 /* if PSE bit is set, then we use a 4MB page */
1131 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1132 page_size = 4096 * 1024;
1133 if (is_user) {
1134 if (!(pde & PG_USER_MASK))
1135 goto do_fault_protect;
1136 if (is_write && !(pde & PG_RW_MASK))
1137 goto do_fault_protect;
1138 } else {
1139 if ((env->cr[0] & CR0_WP_MASK) &&
1140 is_write && !(pde & PG_RW_MASK))
1141 goto do_fault_protect;
1143 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1144 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1145 pde |= PG_ACCESSED_MASK;
1146 if (is_dirty)
1147 pde |= PG_DIRTY_MASK;
1148 stl_phys_notdirty(pde_addr, pde);
1151 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1152 ptep = pte;
1153 virt_addr = addr & ~(page_size - 1);
1154 } else {
1155 if (!(pde & PG_ACCESSED_MASK)) {
1156 pde |= PG_ACCESSED_MASK;
1157 stl_phys_notdirty(pde_addr, pde);
1160 /* page directory entry */
1161 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1162 env->a20_mask;
1163 pte = ldl_phys(pte_addr);
1164 if (!(pte & PG_PRESENT_MASK)) {
1165 error_code = 0;
1166 goto do_fault;
1168 /* combine pde and pte user and rw protections */
1169 ptep = pte & pde;
1170 if (is_user) {
1171 if (!(ptep & PG_USER_MASK))
1172 goto do_fault_protect;
1173 if (is_write && !(ptep & PG_RW_MASK))
1174 goto do_fault_protect;
1175 } else {
1176 if ((env->cr[0] & CR0_WP_MASK) &&
1177 is_write && !(ptep & PG_RW_MASK))
1178 goto do_fault_protect;
1180 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1181 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1182 pte |= PG_ACCESSED_MASK;
1183 if (is_dirty)
1184 pte |= PG_DIRTY_MASK;
1185 stl_phys_notdirty(pte_addr, pte);
1187 page_size = 4096;
1188 virt_addr = addr & ~0xfff;
1191 /* the page can be put in the TLB */
1192 prot = PAGE_READ;
1193 if (!(ptep & PG_NX_MASK))
1194 prot |= PAGE_EXEC;
1195 if (pte & PG_DIRTY_MASK) {
1196 /* only set write access if already dirty... otherwise wait
1197 for dirty access */
1198 if (is_user) {
1199 if (ptep & PG_RW_MASK)
1200 prot |= PAGE_WRITE;
1201 } else {
1202 if (!(env->cr[0] & CR0_WP_MASK) ||
1203 (ptep & PG_RW_MASK))
1204 prot |= PAGE_WRITE;
1207 do_mapping:
1208 pte = pte & env->a20_mask;
1210 /* Even if 4MB pages, we map only one 4KB page in the cache to
1211 avoid filling it too fast */
1212 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1213 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1214 vaddr = virt_addr + page_offset;
1216 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1217 return ret;
1218 do_fault_protect:
1219 error_code = PG_ERROR_P_MASK;
1220 do_fault:
1221 error_code |= (is_write << PG_ERROR_W_BIT);
1222 if (is_user)
1223 error_code |= PG_ERROR_U_MASK;
1224 if (is_write1 == 2 &&
1225 (env->efer & MSR_EFER_NXE) &&
1226 (env->cr[4] & CR4_PAE_MASK))
1227 error_code |= PG_ERROR_I_D_MASK;
1228 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1229 /* cr2 is not modified in case of exceptions */
1230 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1231 addr);
1232 } else {
1233 env->cr[2] = addr;
1235 env->error_code = error_code;
1236 env->exception_index = EXCP0E_PAGE;
1237 return 1;
1240 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1242 target_ulong pde_addr, pte_addr;
1243 uint64_t pte;
1244 target_phys_addr_t paddr;
1245 uint32_t page_offset;
1246 int page_size;
1248 if (env->cr[4] & CR4_PAE_MASK) {
1249 target_ulong pdpe_addr;
1250 uint64_t pde, pdpe;
1252 #ifdef TARGET_X86_64
1253 if (env->hflags & HF_LMA_MASK) {
1254 uint64_t pml4e_addr, pml4e;
1255 int32_t sext;
1257 /* test virtual address sign extension */
1258 sext = (int64_t)addr >> 47;
1259 if (sext != 0 && sext != -1)
1260 return -1;
1262 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1263 env->a20_mask;
1264 pml4e = ldq_phys(pml4e_addr);
1265 if (!(pml4e & PG_PRESENT_MASK))
1266 return -1;
1268 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1269 env->a20_mask;
1270 pdpe = ldq_phys(pdpe_addr);
1271 if (!(pdpe & PG_PRESENT_MASK))
1272 return -1;
1273 } else
1274 #endif
1276 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1277 env->a20_mask;
1278 pdpe = ldq_phys(pdpe_addr);
1279 if (!(pdpe & PG_PRESENT_MASK))
1280 return -1;
1283 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1284 env->a20_mask;
1285 pde = ldq_phys(pde_addr);
1286 if (!(pde & PG_PRESENT_MASK)) {
1287 return -1;
1289 if (pde & PG_PSE_MASK) {
1290 /* 2 MB page */
1291 page_size = 2048 * 1024;
1292 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1293 } else {
1294 /* 4 KB page */
1295 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1296 env->a20_mask;
1297 page_size = 4096;
1298 pte = ldq_phys(pte_addr);
1300 if (!(pte & PG_PRESENT_MASK))
1301 return -1;
1302 } else {
1303 uint32_t pde;
1305 if (!(env->cr[0] & CR0_PG_MASK)) {
1306 pte = addr;
1307 page_size = 4096;
1308 } else {
1309 /* page directory entry */
1310 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1311 pde = ldl_phys(pde_addr);
1312 if (!(pde & PG_PRESENT_MASK))
1313 return -1;
1314 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1315 pte = pde & ~0x003ff000; /* align to 4MB */
1316 page_size = 4096 * 1024;
1317 } else {
1318 /* page directory entry */
1319 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1320 pte = ldl_phys(pte_addr);
1321 if (!(pte & PG_PRESENT_MASK))
1322 return -1;
1323 page_size = 4096;
1326 pte = pte & env->a20_mask;
1329 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1330 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1331 return paddr;
1334 void hw_breakpoint_insert(CPUState *env, int index)
1336 int type, err = 0;
1338 switch (hw_breakpoint_type(env->dr[7], index)) {
1339 case 0:
1340 if (hw_breakpoint_enabled(env->dr[7], index))
1341 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1342 &env->cpu_breakpoint[index]);
1343 break;
1344 case 1:
1345 type = BP_CPU | BP_MEM_WRITE;
1346 goto insert_wp;
1347 case 2:
1348 /* No support for I/O watchpoints yet */
1349 break;
1350 case 3:
1351 type = BP_CPU | BP_MEM_ACCESS;
1352 insert_wp:
1353 err = cpu_watchpoint_insert(env, env->dr[index],
1354 hw_breakpoint_len(env->dr[7], index),
1355 type, &env->cpu_watchpoint[index]);
1356 break;
1358 if (err)
1359 env->cpu_breakpoint[index] = NULL;
1362 void hw_breakpoint_remove(CPUState *env, int index)
1364 if (!env->cpu_breakpoint[index])
1365 return;
1366 switch (hw_breakpoint_type(env->dr[7], index)) {
1367 case 0:
1368 if (hw_breakpoint_enabled(env->dr[7], index))
1369 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1370 break;
1371 case 1:
1372 case 3:
1373 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1374 break;
1375 case 2:
1376 /* No support for I/O watchpoints yet */
1377 break;
1381 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1383 target_ulong dr6;
1384 int reg, type;
1385 int hit_enabled = 0;
1387 dr6 = env->dr[6] & ~0xf;
1388 for (reg = 0; reg < 4; reg++) {
1389 type = hw_breakpoint_type(env->dr[7], reg);
1390 if ((type == 0 && env->dr[reg] == env->eip) ||
1391 ((type & 1) && env->cpu_watchpoint[reg] &&
1392 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1393 dr6 |= 1 << reg;
1394 if (hw_breakpoint_enabled(env->dr[7], reg))
1395 hit_enabled = 1;
1398 if (hit_enabled || force_dr6_update)
1399 env->dr[6] = dr6;
1400 return hit_enabled;
1403 static CPUDebugExcpHandler *prev_debug_excp_handler;
1405 void raise_exception(int exception_index);
1407 static void breakpoint_handler(CPUState *env)
1409 CPUBreakpoint *bp;
1411 if (env->watchpoint_hit) {
1412 if (env->watchpoint_hit->flags & BP_CPU) {
1413 env->watchpoint_hit = NULL;
1414 if (check_hw_breakpoints(env, 0))
1415 raise_exception(EXCP01_DB);
1416 else
1417 cpu_resume_from_signal(env, NULL);
1419 } else {
1420 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1421 if (bp->pc == env->eip) {
1422 if (bp->flags & BP_CPU) {
1423 check_hw_breakpoints(env, 1);
1424 raise_exception(EXCP01_DB);
1426 break;
1429 if (prev_debug_excp_handler)
1430 prev_debug_excp_handler(env);
1432 #endif /* !CONFIG_USER_ONLY */
1434 static void host_cpuid(uint32_t function, uint32_t count,
1435 uint32_t *eax, uint32_t *ebx,
1436 uint32_t *ecx, uint32_t *edx)
1438 #if defined(CONFIG_KVM) || defined(USE_KVM)
1439 uint32_t vec[4];
1441 #ifdef __x86_64__
1442 asm volatile("cpuid"
1443 : "=a"(vec[0]), "=b"(vec[1]),
1444 "=c"(vec[2]), "=d"(vec[3])
1445 : "0"(function), "c"(count) : "cc");
1446 #else
1447 asm volatile("pusha \n\t"
1448 "cpuid \n\t"
1449 "mov %%eax, 0(%2) \n\t"
1450 "mov %%ebx, 4(%2) \n\t"
1451 "mov %%ecx, 8(%2) \n\t"
1452 "mov %%edx, 12(%2) \n\t"
1453 "popa"
1454 : : "a"(function), "c"(count), "S"(vec)
1455 : "memory", "cc");
1456 #endif
1458 if (eax)
1459 *eax = vec[0];
1460 if (ebx)
1461 *ebx = vec[1];
1462 if (ecx)
1463 *ecx = vec[2];
1464 if (edx)
1465 *edx = vec[3];
1466 #endif
1469 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1470 uint32_t *eax, uint32_t *ebx,
1471 uint32_t *ecx, uint32_t *edx)
1473 /* test if maximum index reached */
1474 if (index & 0x80000000) {
1475 if (index > env->cpuid_xlevel)
1476 index = env->cpuid_level;
1477 } else {
1478 if (index > env->cpuid_level)
1479 index = env->cpuid_level;
1482 switch(index) {
1483 case 0:
1484 *eax = env->cpuid_level;
1485 *ebx = env->cpuid_vendor1;
1486 *edx = env->cpuid_vendor2;
1487 *ecx = env->cpuid_vendor3;
1489 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1490 * isn't supported in compatibility mode on Intel. so advertise the
1491 * actuall cpu, and say goodbye to migration between different vendors
1492 * is you use compatibility mode. */
1493 if (kvm_enabled())
1494 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1495 break;
1496 case 1:
1497 *eax = env->cpuid_version;
1498 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1499 *ecx = env->cpuid_ext_features;
1500 *edx = env->cpuid_features;
1502 /* "Hypervisor present" bit required for Microsoft SVVP */
1503 if (kvm_enabled())
1504 *ecx |= (1 << 31);
1505 break;
1506 case 2:
1507 /* cache info: needed for Pentium Pro compatibility */
1508 *eax = 1;
1509 *ebx = 0;
1510 *ecx = 0;
1511 *edx = 0x2c307d;
1512 break;
1513 case 4:
1514 /* cache info: needed for Core compatibility */
1515 switch (count) {
1516 case 0: /* L1 dcache info */
1517 *eax = 0x0000121;
1518 *ebx = 0x1c0003f;
1519 *ecx = 0x000003f;
1520 *edx = 0x0000001;
1521 break;
1522 case 1: /* L1 icache info */
1523 *eax = 0x0000122;
1524 *ebx = 0x1c0003f;
1525 *ecx = 0x000003f;
1526 *edx = 0x0000001;
1527 break;
1528 case 2: /* L2 cache info */
1529 *eax = 0x0000143;
1530 *ebx = 0x3c0003f;
1531 *ecx = 0x0000fff;
1532 *edx = 0x0000001;
1533 break;
1534 default: /* end of info */
1535 *eax = 0;
1536 *ebx = 0;
1537 *ecx = 0;
1538 *edx = 0;
1539 break;
1541 break;
1542 case 5:
1543 /* mwait info: needed for Core compatibility */
1544 *eax = 0; /* Smallest monitor-line size in bytes */
1545 *ebx = 0; /* Largest monitor-line size in bytes */
1546 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1547 *edx = 0;
1548 break;
1549 case 6:
1550 /* Thermal and Power Leaf */
1551 *eax = 0;
1552 *ebx = 0;
1553 *ecx = 0;
1554 *edx = 0;
1555 break;
1556 case 9:
1557 /* Direct Cache Access Information Leaf */
1558 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1559 *ebx = 0;
1560 *ecx = 0;
1561 *edx = 0;
1562 break;
1563 case 0xA:
1564 /* Architectural Performance Monitoring Leaf */
1565 *eax = 0;
1566 *ebx = 0;
1567 *ecx = 0;
1568 *edx = 0;
1569 break;
1570 case 0x80000000:
1571 *eax = env->cpuid_xlevel;
1572 *ebx = env->cpuid_vendor1;
1573 *edx = env->cpuid_vendor2;
1574 *ecx = env->cpuid_vendor3;
1575 break;
1576 case 0x80000001:
1577 *eax = env->cpuid_features;
1578 *ebx = 0;
1579 *ecx = env->cpuid_ext3_features;
1580 *edx = env->cpuid_ext2_features;
1582 if (kvm_enabled()) {
1583 uint32_t h_eax, h_edx;
1585 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1587 /* disable CPU features that the host does not support */
1589 /* long mode */
1590 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1591 *edx &= ~0x20000000;
1592 /* syscall */
1593 if ((h_edx & 0x00000800) == 0)
1594 *edx &= ~0x00000800;
1595 /* nx */
1596 if ((h_edx & 0x00100000) == 0)
1597 *edx &= ~0x00100000;
1599 /* disable CPU features that KVM cannot support */
1601 /* svm */
1602 if (!kvm_nested)
1603 *ecx &= ~4UL;
1604 /* 3dnow */
1605 *edx &= ~0xc0000000;
1607 break;
1608 case 0x80000002:
1609 case 0x80000003:
1610 case 0x80000004:
1611 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1612 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1613 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1614 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1615 break;
1616 case 0x80000005:
1617 /* cache info (L1 cache) */
1618 *eax = 0x01ff01ff;
1619 *ebx = 0x01ff01ff;
1620 *ecx = 0x40020140;
1621 *edx = 0x40020140;
1622 break;
1623 case 0x80000006:
1624 /* cache info (L2 cache) */
1625 *eax = 0;
1626 *ebx = 0x42004200;
1627 *ecx = 0x02008140;
1628 *edx = 0;
1629 break;
1630 case 0x80000008:
1631 /* virtual & phys address size in low 2 bytes. */
1632 /* XXX: This value must match the one used in the MMU code. */
1633 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1634 /* 64 bit processor */
1635 #if defined(USE_KQEMU)
1636 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1637 #else
1638 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1639 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1640 #endif
1641 } else {
1642 #if defined(USE_KQEMU)
1643 *eax = 0x00000020; /* 32 bits physical */
1644 #else
1645 if (env->cpuid_features & CPUID_PSE36)
1646 *eax = 0x00000024; /* 36 bits physical */
1647 else
1648 *eax = 0x00000020; /* 32 bits physical */
1649 #endif
1651 *ebx = 0;
1652 *ecx = 0;
1653 *edx = 0;
1654 break;
1655 case 0x8000000A:
1656 *eax = 0x00000001; /* SVM Revision */
1657 *ebx = 0x00000010; /* nr of ASIDs */
1658 *ecx = 0;
1659 *edx = 0; /* optional features */
1660 break;
1661 default:
1662 /* reserved values: zero */
1663 *eax = 0;
1664 *ebx = 0;
1665 *ecx = 0;
1666 *edx = 0;
1667 break;
1671 CPUX86State *cpu_x86_init(const char *cpu_model)
1673 CPUX86State *env;
1674 static int inited;
1676 env = qemu_mallocz(sizeof(CPUX86State));
1677 cpu_exec_init(env);
1678 env->cpu_model_str = cpu_model;
1680 /* init various static tables */
1681 if (!inited) {
1682 inited = 1;
1683 optimize_flags_init();
1684 #ifndef CONFIG_USER_ONLY
1685 prev_debug_excp_handler =
1686 cpu_set_debug_excp_handler(breakpoint_handler);
1687 #endif
1689 if (cpu_x86_register(env, cpu_model) < 0) {
1690 cpu_x86_close(env);
1691 return NULL;
1693 cpu_reset(env);
1694 #ifdef USE_KQEMU
1695 kqemu_init(env);
1696 #endif
1697 if (kvm_enabled())
1698 kvm_init_vcpu(env);
1699 if (kvm_enabled()) {
1700 kvm_trim_features(&env->cpuid_features,
1701 kvm_arch_get_supported_cpuid(env, 1, R_EDX),
1702 feature_name);
1703 kvm_trim_features(&env->cpuid_ext_features,
1704 kvm_arch_get_supported_cpuid(env, 1, R_ECX),
1705 ext_feature_name);
1706 kvm_trim_features(&env->cpuid_ext2_features,
1707 kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX),
1708 ext2_feature_name);
1709 kvm_trim_features(&env->cpuid_ext3_features,
1710 kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX),
1711 ext3_feature_name);
1714 return env;