Update changelog
[qemu/mini2440/sniper_sniper_test.git] / target-i386 / helper.c
blob82137039d1c2033a4eb973e706993df68255e0a8
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 //#define DEBUG_MMU
35 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
36 uint32_t *ext_features,
37 uint32_t *ext2_features,
38 uint32_t *ext3_features)
40 int i;
41 /* feature flags taken from "Intel Processor Identification and the CPUID
42 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
43 * about feature names, the Linux name is used. */
44 static const char *feature_name[] = {
45 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
46 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
47 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
48 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
50 static const char *ext_feature_name[] = {
51 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
52 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
53 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
54 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
56 static const char *ext2_feature_name[] = {
57 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
58 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
59 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
60 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
62 static const char *ext3_feature_name[] = {
63 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
64 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
65 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
66 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 return;
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 return;
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 return;
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 return;
89 fprintf(stderr, "CPU feature %s not found\n", flagname);
92 typedef struct x86_def_t {
93 const char *name;
94 uint32_t level;
95 uint32_t vendor1, vendor2, vendor3;
96 int family;
97 int model;
98 int stepping;
99 uint32_t features, ext_features, ext2_features, ext3_features;
100 uint32_t xlevel;
101 char model_id[48];
102 } x86_def_t;
104 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
105 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
106 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
107 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
108 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
109 CPUID_PSE36 | CPUID_FXSR)
110 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
111 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
112 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
113 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
114 CPUID_PAE | CPUID_SEP | CPUID_APIC)
115 static x86_def_t x86_defs[] = {
116 #ifdef TARGET_X86_64
118 .name = "qemu64",
119 .level = 2,
120 .vendor1 = CPUID_VENDOR_AMD_1,
121 .vendor2 = CPUID_VENDOR_AMD_2,
122 .vendor3 = CPUID_VENDOR_AMD_3,
123 .family = 6,
124 .model = 2,
125 .stepping = 3,
126 .features = PPRO_FEATURES |
127 /* these features are needed for Win64 and aren't fully implemented */
128 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
129 /* this feature is needed for Solaris and isn't fully implemented */
130 CPUID_PSE36,
131 .ext_features = CPUID_EXT_SSE3,
132 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
133 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
134 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
135 .ext3_features = CPUID_EXT3_SVM,
136 .xlevel = 0x8000000A,
137 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
140 .name = "phenom",
141 .level = 5,
142 .vendor1 = CPUID_VENDOR_AMD_1,
143 .vendor2 = CPUID_VENDOR_AMD_2,
144 .vendor3 = CPUID_VENDOR_AMD_3,
145 .family = 16,
146 .model = 2,
147 .stepping = 3,
148 /* Missing: CPUID_VME, CPUID_HT */
149 .features = PPRO_FEATURES |
150 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
151 CPUID_PSE36,
152 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
153 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
154 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
155 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
156 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
157 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
158 CPUID_EXT2_FFXSR,
159 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
160 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
161 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
162 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
163 .ext3_features = CPUID_EXT3_SVM,
164 .xlevel = 0x8000001A,
165 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
168 .name = "core2duo",
169 .level = 10,
170 .family = 6,
171 .model = 15,
172 .stepping = 11,
173 /* The original CPU also implements these features:
174 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
175 CPUID_TM, CPUID_PBE */
176 .features = PPRO_FEATURES |
177 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
178 CPUID_PSE36,
179 /* The original CPU also implements these ext features:
180 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
181 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
182 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
183 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
184 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
185 .xlevel = 0x80000008,
186 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
188 #endif
190 .name = "qemu32",
191 .level = 2,
192 .family = 6,
193 .model = 3,
194 .stepping = 3,
195 .features = PPRO_FEATURES,
196 .ext_features = CPUID_EXT_SSE3,
197 .xlevel = 0,
198 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
201 .name = "coreduo",
202 .level = 10,
203 .family = 6,
204 .model = 14,
205 .stepping = 8,
206 /* The original CPU also implements these features:
207 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
208 CPUID_TM, CPUID_PBE */
209 .features = PPRO_FEATURES | CPUID_VME |
210 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
211 /* The original CPU also implements these ext features:
212 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
213 CPUID_EXT_PDCM */
214 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
215 .ext2_features = CPUID_EXT2_NX,
216 .xlevel = 0x80000008,
217 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
220 .name = "486",
221 .level = 0,
222 .family = 4,
223 .model = 0,
224 .stepping = 0,
225 .features = I486_FEATURES,
226 .xlevel = 0,
229 .name = "pentium",
230 .level = 1,
231 .family = 5,
232 .model = 4,
233 .stepping = 3,
234 .features = PENTIUM_FEATURES,
235 .xlevel = 0,
238 .name = "pentium2",
239 .level = 2,
240 .family = 6,
241 .model = 5,
242 .stepping = 2,
243 .features = PENTIUM2_FEATURES,
244 .xlevel = 0,
247 .name = "pentium3",
248 .level = 2,
249 .family = 6,
250 .model = 7,
251 .stepping = 3,
252 .features = PENTIUM3_FEATURES,
253 .xlevel = 0,
256 .name = "athlon",
257 .level = 2,
258 .vendor1 = 0x68747541, /* "Auth" */
259 .vendor2 = 0x69746e65, /* "enti" */
260 .vendor3 = 0x444d4163, /* "cAMD" */
261 .family = 6,
262 .model = 2,
263 .stepping = 3,
264 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
265 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
266 .xlevel = 0x80000008,
267 /* XXX: put another string ? */
268 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
271 .name = "n270",
272 /* original is on level 10 */
273 .level = 5,
274 .family = 6,
275 .model = 28,
276 .stepping = 2,
277 .features = PPRO_FEATURES |
278 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
279 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
280 * CPUID_HT | CPUID_TM | CPUID_PBE */
281 /* Some CPUs got no CPUID_SEP */
282 .ext_features = CPUID_EXT_MONITOR |
283 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
284 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
285 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
286 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
287 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
288 .xlevel = 0x8000000A,
289 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
293 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
295 unsigned int i;
296 x86_def_t *def;
298 char *s = strdup(cpu_model);
299 char *featurestr, *name = strtok(s, ",");
300 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
301 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
302 int family = -1, model = -1, stepping = -1;
304 def = NULL;
305 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
306 if (strcmp(name, x86_defs[i].name) == 0) {
307 def = &x86_defs[i];
308 break;
311 if (!def)
312 goto error;
313 memcpy(x86_cpu_def, def, sizeof(*def));
315 featurestr = strtok(NULL, ",");
317 while (featurestr) {
318 char *val;
319 if (featurestr[0] == '+') {
320 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
321 } else if (featurestr[0] == '-') {
322 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
323 } else if ((val = strchr(featurestr, '='))) {
324 *val = 0; val++;
325 if (!strcmp(featurestr, "family")) {
326 char *err;
327 family = strtol(val, &err, 10);
328 if (!*val || *err || family < 0) {
329 fprintf(stderr, "bad numerical value %s\n", val);
330 goto error;
332 x86_cpu_def->family = family;
333 } else if (!strcmp(featurestr, "model")) {
334 char *err;
335 model = strtol(val, &err, 10);
336 if (!*val || *err || model < 0 || model > 0xff) {
337 fprintf(stderr, "bad numerical value %s\n", val);
338 goto error;
340 x86_cpu_def->model = model;
341 } else if (!strcmp(featurestr, "stepping")) {
342 char *err;
343 stepping = strtol(val, &err, 10);
344 if (!*val || *err || stepping < 0 || stepping > 0xf) {
345 fprintf(stderr, "bad numerical value %s\n", val);
346 goto error;
348 x86_cpu_def->stepping = stepping;
349 } else if (!strcmp(featurestr, "vendor")) {
350 if (strlen(val) != 12) {
351 fprintf(stderr, "vendor string must be 12 chars long\n");
352 goto error;
354 x86_cpu_def->vendor1 = 0;
355 x86_cpu_def->vendor2 = 0;
356 x86_cpu_def->vendor3 = 0;
357 for(i = 0; i < 4; i++) {
358 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
359 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
360 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
362 } else if (!strcmp(featurestr, "model_id")) {
363 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
364 val);
365 } else {
366 fprintf(stderr, "unrecognized feature %s\n", featurestr);
367 goto error;
369 } else {
370 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
371 goto error;
373 featurestr = strtok(NULL, ",");
375 x86_cpu_def->features |= plus_features;
376 x86_cpu_def->ext_features |= plus_ext_features;
377 x86_cpu_def->ext2_features |= plus_ext2_features;
378 x86_cpu_def->ext3_features |= plus_ext3_features;
379 x86_cpu_def->features &= ~minus_features;
380 x86_cpu_def->ext_features &= ~minus_ext_features;
381 x86_cpu_def->ext2_features &= ~minus_ext2_features;
382 x86_cpu_def->ext3_features &= ~minus_ext3_features;
383 free(s);
384 return 0;
386 error:
387 free(s);
388 return -1;
391 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
393 unsigned int i;
395 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
396 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
399 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
401 x86_def_t def1, *def = &def1;
403 if (cpu_x86_find_by_name(def, cpu_model) < 0)
404 return -1;
405 if (def->vendor1) {
406 env->cpuid_vendor1 = def->vendor1;
407 env->cpuid_vendor2 = def->vendor2;
408 env->cpuid_vendor3 = def->vendor3;
409 } else {
410 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
411 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
412 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
414 env->cpuid_level = def->level;
415 if (def->family > 0x0f)
416 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
417 else
418 env->cpuid_version = def->family << 8;
419 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
420 env->cpuid_version |= def->stepping;
421 env->cpuid_features = def->features;
422 env->pat = 0x0007040600070406ULL;
423 env->cpuid_ext_features = def->ext_features;
424 env->cpuid_ext2_features = def->ext2_features;
425 env->cpuid_xlevel = def->xlevel;
426 env->cpuid_ext3_features = def->ext3_features;
428 const char *model_id = def->model_id;
429 int c, len, i;
430 if (!model_id)
431 model_id = "";
432 len = strlen(model_id);
433 for(i = 0; i < 48; i++) {
434 if (i >= len)
435 c = '\0';
436 else
437 c = (uint8_t)model_id[i];
438 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
441 return 0;
444 /* NOTE: must be called outside the CPU execute loop */
445 void cpu_reset(CPUX86State *env)
447 int i;
449 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
450 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
451 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
454 memset(env, 0, offsetof(CPUX86State, breakpoints));
456 tlb_flush(env, 1);
458 env->old_exception = -1;
460 /* init to reset state */
462 #ifdef CONFIG_SOFTMMU
463 env->hflags |= HF_SOFTMMU_MASK;
464 #endif
465 env->hflags2 |= HF2_GIF_MASK;
467 cpu_x86_update_cr0(env, 0x60000010);
468 env->a20_mask = ~0x0;
469 env->smbase = 0x30000;
471 env->idt.limit = 0xffff;
472 env->gdt.limit = 0xffff;
473 env->ldt.limit = 0xffff;
474 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
475 env->tr.limit = 0xffff;
476 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
478 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
479 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
480 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
481 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
482 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
483 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
484 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
485 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
486 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
487 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
488 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
489 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
491 env->eip = 0xfff0;
492 env->regs[R_EDX] = env->cpuid_version;
494 env->eflags = 0x2;
496 /* FPU init */
497 for(i = 0;i < 8; i++)
498 env->fptags[i] = 1;
499 env->fpuc = 0x37f;
501 env->mxcsr = 0x1f80;
503 memset(env->dr, 0, sizeof(env->dr));
504 env->dr[6] = DR6_FIXED_1;
505 env->dr[7] = DR7_FIXED_1;
506 cpu_breakpoint_remove_all(env, BP_CPU);
507 cpu_watchpoint_remove_all(env, BP_CPU);
510 void cpu_x86_close(CPUX86State *env)
512 qemu_free(env);
515 /***********************************************************/
516 /* x86 debug */
518 static const char *cc_op_str[] = {
519 "DYNAMIC",
520 "EFLAGS",
522 "MULB",
523 "MULW",
524 "MULL",
525 "MULQ",
527 "ADDB",
528 "ADDW",
529 "ADDL",
530 "ADDQ",
532 "ADCB",
533 "ADCW",
534 "ADCL",
535 "ADCQ",
537 "SUBB",
538 "SUBW",
539 "SUBL",
540 "SUBQ",
542 "SBBB",
543 "SBBW",
544 "SBBL",
545 "SBBQ",
547 "LOGICB",
548 "LOGICW",
549 "LOGICL",
550 "LOGICQ",
552 "INCB",
553 "INCW",
554 "INCL",
555 "INCQ",
557 "DECB",
558 "DECW",
559 "DECL",
560 "DECQ",
562 "SHLB",
563 "SHLW",
564 "SHLL",
565 "SHLQ",
567 "SARB",
568 "SARW",
569 "SARL",
570 "SARQ",
573 void cpu_dump_state(CPUState *env, FILE *f,
574 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
575 int flags)
577 int eflags, i, nb;
578 char cc_op_name[32];
579 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
581 if (kvm_enabled())
582 kvm_arch_get_registers(env);
584 eflags = env->eflags;
585 #ifdef TARGET_X86_64
586 if (env->hflags & HF_CS64_MASK) {
587 cpu_fprintf(f,
588 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
589 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
590 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
591 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
592 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
593 env->regs[R_EAX],
594 env->regs[R_EBX],
595 env->regs[R_ECX],
596 env->regs[R_EDX],
597 env->regs[R_ESI],
598 env->regs[R_EDI],
599 env->regs[R_EBP],
600 env->regs[R_ESP],
601 env->regs[8],
602 env->regs[9],
603 env->regs[10],
604 env->regs[11],
605 env->regs[12],
606 env->regs[13],
607 env->regs[14],
608 env->regs[15],
609 env->eip, eflags,
610 eflags & DF_MASK ? 'D' : '-',
611 eflags & CC_O ? 'O' : '-',
612 eflags & CC_S ? 'S' : '-',
613 eflags & CC_Z ? 'Z' : '-',
614 eflags & CC_A ? 'A' : '-',
615 eflags & CC_P ? 'P' : '-',
616 eflags & CC_C ? 'C' : '-',
617 env->hflags & HF_CPL_MASK,
618 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
619 (int)(env->a20_mask >> 20) & 1,
620 (env->hflags >> HF_SMM_SHIFT) & 1,
621 env->halted);
622 } else
623 #endif
625 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
626 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
627 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
628 (uint32_t)env->regs[R_EAX],
629 (uint32_t)env->regs[R_EBX],
630 (uint32_t)env->regs[R_ECX],
631 (uint32_t)env->regs[R_EDX],
632 (uint32_t)env->regs[R_ESI],
633 (uint32_t)env->regs[R_EDI],
634 (uint32_t)env->regs[R_EBP],
635 (uint32_t)env->regs[R_ESP],
636 (uint32_t)env->eip, eflags,
637 eflags & DF_MASK ? 'D' : '-',
638 eflags & CC_O ? 'O' : '-',
639 eflags & CC_S ? 'S' : '-',
640 eflags & CC_Z ? 'Z' : '-',
641 eflags & CC_A ? 'A' : '-',
642 eflags & CC_P ? 'P' : '-',
643 eflags & CC_C ? 'C' : '-',
644 env->hflags & HF_CPL_MASK,
645 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
646 (int)(env->a20_mask >> 20) & 1,
647 (env->hflags >> HF_SMM_SHIFT) & 1,
648 env->halted);
651 #ifdef TARGET_X86_64
652 if (env->hflags & HF_LMA_MASK) {
653 for(i = 0; i < 6; i++) {
654 SegmentCache *sc = &env->segs[i];
655 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
656 seg_name[i],
657 sc->selector,
658 sc->base,
659 sc->limit,
660 sc->flags);
662 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
663 env->ldt.selector,
664 env->ldt.base,
665 env->ldt.limit,
666 env->ldt.flags);
667 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
668 env->tr.selector,
669 env->tr.base,
670 env->tr.limit,
671 env->tr.flags);
672 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
673 env->gdt.base, env->gdt.limit);
674 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
675 env->idt.base, env->idt.limit);
676 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
677 (uint32_t)env->cr[0],
678 env->cr[2],
679 env->cr[3],
680 (uint32_t)env->cr[4]);
681 for(i = 0; i < 4; i++)
682 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
683 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
684 env->dr[6], env->dr[7]);
685 } else
686 #endif
688 for(i = 0; i < 6; i++) {
689 SegmentCache *sc = &env->segs[i];
690 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
691 seg_name[i],
692 sc->selector,
693 (uint32_t)sc->base,
694 sc->limit,
695 sc->flags);
697 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
698 env->ldt.selector,
699 (uint32_t)env->ldt.base,
700 env->ldt.limit,
701 env->ldt.flags);
702 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
703 env->tr.selector,
704 (uint32_t)env->tr.base,
705 env->tr.limit,
706 env->tr.flags);
707 cpu_fprintf(f, "GDT= %08x %08x\n",
708 (uint32_t)env->gdt.base, env->gdt.limit);
709 cpu_fprintf(f, "IDT= %08x %08x\n",
710 (uint32_t)env->idt.base, env->idt.limit);
711 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
712 (uint32_t)env->cr[0],
713 (uint32_t)env->cr[2],
714 (uint32_t)env->cr[3],
715 (uint32_t)env->cr[4]);
716 for(i = 0; i < 4; i++)
717 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
718 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
720 if (flags & X86_DUMP_CCOP) {
721 if ((unsigned)env->cc_op < CC_OP_NB)
722 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
723 else
724 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
725 #ifdef TARGET_X86_64
726 if (env->hflags & HF_CS64_MASK) {
727 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
728 env->cc_src, env->cc_dst,
729 cc_op_name);
730 } else
731 #endif
733 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
734 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
735 cc_op_name);
738 if (flags & X86_DUMP_FPU) {
739 int fptag;
740 fptag = 0;
741 for(i = 0; i < 8; i++) {
742 fptag |= ((!env->fptags[i]) << i);
744 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
745 env->fpuc,
746 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
747 env->fpstt,
748 fptag,
749 env->mxcsr);
750 for(i=0;i<8;i++) {
751 #if defined(USE_X86LDOUBLE)
752 union {
753 long double d;
754 struct {
755 uint64_t lower;
756 uint16_t upper;
757 } l;
758 } tmp;
759 tmp.d = env->fpregs[i].d;
760 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
761 i, tmp.l.lower, tmp.l.upper);
762 #else
763 cpu_fprintf(f, "FPR%d=%016" PRIx64,
764 i, env->fpregs[i].mmx.q);
765 #endif
766 if ((i & 1) == 1)
767 cpu_fprintf(f, "\n");
768 else
769 cpu_fprintf(f, " ");
771 if (env->hflags & HF_CS64_MASK)
772 nb = 16;
773 else
774 nb = 8;
775 for(i=0;i<nb;i++) {
776 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
778 env->xmm_regs[i].XMM_L(3),
779 env->xmm_regs[i].XMM_L(2),
780 env->xmm_regs[i].XMM_L(1),
781 env->xmm_regs[i].XMM_L(0));
782 if ((i & 1) == 1)
783 cpu_fprintf(f, "\n");
784 else
785 cpu_fprintf(f, " ");
790 /***********************************************************/
791 /* x86 mmu */
792 /* XXX: add PGE support */
794 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
796 a20_state = (a20_state != 0);
797 if (a20_state != ((env->a20_mask >> 20) & 1)) {
798 #if defined(DEBUG_MMU)
799 printf("A20 update: a20=%d\n", a20_state);
800 #endif
801 /* if the cpu is currently executing code, we must unlink it and
802 all the potentially executing TB */
803 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
805 /* when a20 is changed, all the MMU mappings are invalid, so
806 we must flush everything */
807 tlb_flush(env, 1);
808 env->a20_mask = (~0x100000) | (a20_state << 20);
812 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
814 int pe_state;
816 #if defined(DEBUG_MMU)
817 printf("CR0 update: CR0=0x%08x\n", new_cr0);
818 #endif
819 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
820 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
821 tlb_flush(env, 1);
824 #ifdef TARGET_X86_64
825 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
826 (env->efer & MSR_EFER_LME)) {
827 /* enter in long mode */
828 /* XXX: generate an exception */
829 if (!(env->cr[4] & CR4_PAE_MASK))
830 return;
831 env->efer |= MSR_EFER_LMA;
832 env->hflags |= HF_LMA_MASK;
833 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
834 (env->efer & MSR_EFER_LMA)) {
835 /* exit long mode */
836 env->efer &= ~MSR_EFER_LMA;
837 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
838 env->eip &= 0xffffffff;
840 #endif
841 env->cr[0] = new_cr0 | CR0_ET_MASK;
843 /* update PE flag in hidden flags */
844 pe_state = (env->cr[0] & CR0_PE_MASK);
845 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
846 /* ensure that ADDSEG is always set in real mode */
847 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
848 /* update FPU flags */
849 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
850 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
853 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
854 the PDPT */
855 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
857 env->cr[3] = new_cr3;
858 if (env->cr[0] & CR0_PG_MASK) {
859 #if defined(DEBUG_MMU)
860 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
861 #endif
862 tlb_flush(env, 0);
866 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
868 #if defined(DEBUG_MMU)
869 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
870 #endif
871 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
872 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
873 tlb_flush(env, 1);
875 /* SSE handling */
876 if (!(env->cpuid_features & CPUID_SSE))
877 new_cr4 &= ~CR4_OSFXSR_MASK;
878 if (new_cr4 & CR4_OSFXSR_MASK)
879 env->hflags |= HF_OSFXSR_MASK;
880 else
881 env->hflags &= ~HF_OSFXSR_MASK;
883 env->cr[4] = new_cr4;
886 #if defined(CONFIG_USER_ONLY)
888 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
889 int is_write, int mmu_idx, int is_softmmu)
891 /* user mode only emulation */
892 is_write &= 1;
893 env->cr[2] = addr;
894 env->error_code = (is_write << PG_ERROR_W_BIT);
895 env->error_code |= PG_ERROR_U_MASK;
896 env->exception_index = EXCP0E_PAGE;
897 return 1;
900 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
902 return addr;
905 #else
907 /* XXX: This value should match the one returned by CPUID
908 * and in exec.c */
909 #if defined(USE_KQEMU)
910 #define PHYS_ADDR_MASK 0xfffff000LL
911 #else
912 # if defined(TARGET_X86_64)
913 # define PHYS_ADDR_MASK 0xfffffff000LL
914 # else
915 # define PHYS_ADDR_MASK 0xffffff000LL
916 # endif
917 #endif
919 /* return value:
920 -1 = cannot handle fault
921 0 = nothing more to do
922 1 = generate PF fault
923 2 = soft MMU activation required for this block
925 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
926 int is_write1, int mmu_idx, int is_softmmu)
928 uint64_t ptep, pte;
929 target_ulong pde_addr, pte_addr;
930 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
931 target_phys_addr_t paddr;
932 uint32_t page_offset;
933 target_ulong vaddr, virt_addr;
935 is_user = mmu_idx == MMU_USER_IDX;
936 #if defined(DEBUG_MMU)
937 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
938 addr, is_write1, is_user, env->eip);
939 #endif
940 is_write = is_write1 & 1;
942 if (!(env->cr[0] & CR0_PG_MASK)) {
943 pte = addr;
944 virt_addr = addr & TARGET_PAGE_MASK;
945 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
946 page_size = 4096;
947 goto do_mapping;
950 if (env->cr[4] & CR4_PAE_MASK) {
951 uint64_t pde, pdpe;
952 target_ulong pdpe_addr;
954 #ifdef TARGET_X86_64
955 if (env->hflags & HF_LMA_MASK) {
956 uint64_t pml4e_addr, pml4e;
957 int32_t sext;
959 /* test virtual address sign extension */
960 sext = (int64_t)addr >> 47;
961 if (sext != 0 && sext != -1) {
962 env->error_code = 0;
963 env->exception_index = EXCP0D_GPF;
964 return 1;
967 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
968 env->a20_mask;
969 pml4e = ldq_phys(pml4e_addr);
970 if (!(pml4e & PG_PRESENT_MASK)) {
971 error_code = 0;
972 goto do_fault;
974 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
975 error_code = PG_ERROR_RSVD_MASK;
976 goto do_fault;
978 if (!(pml4e & PG_ACCESSED_MASK)) {
979 pml4e |= PG_ACCESSED_MASK;
980 stl_phys_notdirty(pml4e_addr, pml4e);
982 ptep = pml4e ^ PG_NX_MASK;
983 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
984 env->a20_mask;
985 pdpe = ldq_phys(pdpe_addr);
986 if (!(pdpe & PG_PRESENT_MASK)) {
987 error_code = 0;
988 goto do_fault;
990 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
991 error_code = PG_ERROR_RSVD_MASK;
992 goto do_fault;
994 ptep &= pdpe ^ PG_NX_MASK;
995 if (!(pdpe & PG_ACCESSED_MASK)) {
996 pdpe |= PG_ACCESSED_MASK;
997 stl_phys_notdirty(pdpe_addr, pdpe);
999 } else
1000 #endif
1002 /* XXX: load them when cr3 is loaded ? */
1003 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1004 env->a20_mask;
1005 pdpe = ldq_phys(pdpe_addr);
1006 if (!(pdpe & PG_PRESENT_MASK)) {
1007 error_code = 0;
1008 goto do_fault;
1010 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1013 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1014 env->a20_mask;
1015 pde = ldq_phys(pde_addr);
1016 if (!(pde & PG_PRESENT_MASK)) {
1017 error_code = 0;
1018 goto do_fault;
1020 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1021 error_code = PG_ERROR_RSVD_MASK;
1022 goto do_fault;
1024 ptep &= pde ^ PG_NX_MASK;
1025 if (pde & PG_PSE_MASK) {
1026 /* 2 MB page */
1027 page_size = 2048 * 1024;
1028 ptep ^= PG_NX_MASK;
1029 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1030 goto do_fault_protect;
1031 if (is_user) {
1032 if (!(ptep & PG_USER_MASK))
1033 goto do_fault_protect;
1034 if (is_write && !(ptep & PG_RW_MASK))
1035 goto do_fault_protect;
1036 } else {
1037 if ((env->cr[0] & CR0_WP_MASK) &&
1038 is_write && !(ptep & PG_RW_MASK))
1039 goto do_fault_protect;
1041 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1042 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1043 pde |= PG_ACCESSED_MASK;
1044 if (is_dirty)
1045 pde |= PG_DIRTY_MASK;
1046 stl_phys_notdirty(pde_addr, pde);
1048 /* align to page_size */
1049 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1050 virt_addr = addr & ~(page_size - 1);
1051 } else {
1052 /* 4 KB page */
1053 if (!(pde & PG_ACCESSED_MASK)) {
1054 pde |= PG_ACCESSED_MASK;
1055 stl_phys_notdirty(pde_addr, pde);
1057 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1058 env->a20_mask;
1059 pte = ldq_phys(pte_addr);
1060 if (!(pte & PG_PRESENT_MASK)) {
1061 error_code = 0;
1062 goto do_fault;
1064 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1065 error_code = PG_ERROR_RSVD_MASK;
1066 goto do_fault;
1068 /* combine pde and pte nx, user and rw protections */
1069 ptep &= pte ^ PG_NX_MASK;
1070 ptep ^= PG_NX_MASK;
1071 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1072 goto do_fault_protect;
1073 if (is_user) {
1074 if (!(ptep & PG_USER_MASK))
1075 goto do_fault_protect;
1076 if (is_write && !(ptep & PG_RW_MASK))
1077 goto do_fault_protect;
1078 } else {
1079 if ((env->cr[0] & CR0_WP_MASK) &&
1080 is_write && !(ptep & PG_RW_MASK))
1081 goto do_fault_protect;
1083 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1084 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1085 pte |= PG_ACCESSED_MASK;
1086 if (is_dirty)
1087 pte |= PG_DIRTY_MASK;
1088 stl_phys_notdirty(pte_addr, pte);
1090 page_size = 4096;
1091 virt_addr = addr & ~0xfff;
1092 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1094 } else {
1095 uint32_t pde;
1097 /* page directory entry */
1098 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1099 env->a20_mask;
1100 pde = ldl_phys(pde_addr);
1101 if (!(pde & PG_PRESENT_MASK)) {
1102 error_code = 0;
1103 goto do_fault;
1105 /* if PSE bit is set, then we use a 4MB page */
1106 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1107 page_size = 4096 * 1024;
1108 if (is_user) {
1109 if (!(pde & PG_USER_MASK))
1110 goto do_fault_protect;
1111 if (is_write && !(pde & PG_RW_MASK))
1112 goto do_fault_protect;
1113 } else {
1114 if ((env->cr[0] & CR0_WP_MASK) &&
1115 is_write && !(pde & PG_RW_MASK))
1116 goto do_fault_protect;
1118 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1119 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1120 pde |= PG_ACCESSED_MASK;
1121 if (is_dirty)
1122 pde |= PG_DIRTY_MASK;
1123 stl_phys_notdirty(pde_addr, pde);
1126 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1127 ptep = pte;
1128 virt_addr = addr & ~(page_size - 1);
1129 } else {
1130 if (!(pde & PG_ACCESSED_MASK)) {
1131 pde |= PG_ACCESSED_MASK;
1132 stl_phys_notdirty(pde_addr, pde);
1135 /* page directory entry */
1136 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1137 env->a20_mask;
1138 pte = ldl_phys(pte_addr);
1139 if (!(pte & PG_PRESENT_MASK)) {
1140 error_code = 0;
1141 goto do_fault;
1143 /* combine pde and pte user and rw protections */
1144 ptep = pte & pde;
1145 if (is_user) {
1146 if (!(ptep & PG_USER_MASK))
1147 goto do_fault_protect;
1148 if (is_write && !(ptep & PG_RW_MASK))
1149 goto do_fault_protect;
1150 } else {
1151 if ((env->cr[0] & CR0_WP_MASK) &&
1152 is_write && !(ptep & PG_RW_MASK))
1153 goto do_fault_protect;
1155 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1156 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1157 pte |= PG_ACCESSED_MASK;
1158 if (is_dirty)
1159 pte |= PG_DIRTY_MASK;
1160 stl_phys_notdirty(pte_addr, pte);
1162 page_size = 4096;
1163 virt_addr = addr & ~0xfff;
1166 /* the page can be put in the TLB */
1167 prot = PAGE_READ;
1168 if (!(ptep & PG_NX_MASK))
1169 prot |= PAGE_EXEC;
1170 if (pte & PG_DIRTY_MASK) {
1171 /* only set write access if already dirty... otherwise wait
1172 for dirty access */
1173 if (is_user) {
1174 if (ptep & PG_RW_MASK)
1175 prot |= PAGE_WRITE;
1176 } else {
1177 if (!(env->cr[0] & CR0_WP_MASK) ||
1178 (ptep & PG_RW_MASK))
1179 prot |= PAGE_WRITE;
1182 do_mapping:
1183 pte = pte & env->a20_mask;
1185 /* Even if 4MB pages, we map only one 4KB page in the cache to
1186 avoid filling it too fast */
1187 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1188 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1189 vaddr = virt_addr + page_offset;
1191 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1192 return ret;
1193 do_fault_protect:
1194 error_code = PG_ERROR_P_MASK;
1195 do_fault:
1196 error_code |= (is_write << PG_ERROR_W_BIT);
1197 if (is_user)
1198 error_code |= PG_ERROR_U_MASK;
1199 if (is_write1 == 2 &&
1200 (env->efer & MSR_EFER_NXE) &&
1201 (env->cr[4] & CR4_PAE_MASK))
1202 error_code |= PG_ERROR_I_D_MASK;
1203 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1204 /* cr2 is not modified in case of exceptions */
1205 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1206 addr);
1207 } else {
1208 env->cr[2] = addr;
1210 env->error_code = error_code;
1211 env->exception_index = EXCP0E_PAGE;
1212 return 1;
1215 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1217 target_ulong pde_addr, pte_addr;
1218 uint64_t pte;
1219 target_phys_addr_t paddr;
1220 uint32_t page_offset;
1221 int page_size;
1223 if (env->cr[4] & CR4_PAE_MASK) {
1224 target_ulong pdpe_addr;
1225 uint64_t pde, pdpe;
1227 #ifdef TARGET_X86_64
1228 if (env->hflags & HF_LMA_MASK) {
1229 uint64_t pml4e_addr, pml4e;
1230 int32_t sext;
1232 /* test virtual address sign extension */
1233 sext = (int64_t)addr >> 47;
1234 if (sext != 0 && sext != -1)
1235 return -1;
1237 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1238 env->a20_mask;
1239 pml4e = ldq_phys(pml4e_addr);
1240 if (!(pml4e & PG_PRESENT_MASK))
1241 return -1;
1243 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1244 env->a20_mask;
1245 pdpe = ldq_phys(pdpe_addr);
1246 if (!(pdpe & PG_PRESENT_MASK))
1247 return -1;
1248 } else
1249 #endif
1251 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1252 env->a20_mask;
1253 pdpe = ldq_phys(pdpe_addr);
1254 if (!(pdpe & PG_PRESENT_MASK))
1255 return -1;
1258 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1259 env->a20_mask;
1260 pde = ldq_phys(pde_addr);
1261 if (!(pde & PG_PRESENT_MASK)) {
1262 return -1;
1264 if (pde & PG_PSE_MASK) {
1265 /* 2 MB page */
1266 page_size = 2048 * 1024;
1267 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1268 } else {
1269 /* 4 KB page */
1270 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1271 env->a20_mask;
1272 page_size = 4096;
1273 pte = ldq_phys(pte_addr);
1275 if (!(pte & PG_PRESENT_MASK))
1276 return -1;
1277 } else {
1278 uint32_t pde;
1280 if (!(env->cr[0] & CR0_PG_MASK)) {
1281 pte = addr;
1282 page_size = 4096;
1283 } else {
1284 /* page directory entry */
1285 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1286 pde = ldl_phys(pde_addr);
1287 if (!(pde & PG_PRESENT_MASK))
1288 return -1;
1289 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1290 pte = pde & ~0x003ff000; /* align to 4MB */
1291 page_size = 4096 * 1024;
1292 } else {
1293 /* page directory entry */
1294 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1295 pte = ldl_phys(pte_addr);
1296 if (!(pte & PG_PRESENT_MASK))
1297 return -1;
1298 page_size = 4096;
1301 pte = pte & env->a20_mask;
1304 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1305 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1306 return paddr;
1309 void hw_breakpoint_insert(CPUState *env, int index)
1311 int type, err = 0;
1313 switch (hw_breakpoint_type(env->dr[7], index)) {
1314 case 0:
1315 if (hw_breakpoint_enabled(env->dr[7], index))
1316 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1317 &env->cpu_breakpoint[index]);
1318 break;
1319 case 1:
1320 type = BP_CPU | BP_MEM_WRITE;
1321 goto insert_wp;
1322 case 2:
1323 /* No support for I/O watchpoints yet */
1324 break;
1325 case 3:
1326 type = BP_CPU | BP_MEM_ACCESS;
1327 insert_wp:
1328 err = cpu_watchpoint_insert(env, env->dr[index],
1329 hw_breakpoint_len(env->dr[7], index),
1330 type, &env->cpu_watchpoint[index]);
1331 break;
1333 if (err)
1334 env->cpu_breakpoint[index] = NULL;
1337 void hw_breakpoint_remove(CPUState *env, int index)
1339 if (!env->cpu_breakpoint[index])
1340 return;
1341 switch (hw_breakpoint_type(env->dr[7], index)) {
1342 case 0:
1343 if (hw_breakpoint_enabled(env->dr[7], index))
1344 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1345 break;
1346 case 1:
1347 case 3:
1348 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1349 break;
1350 case 2:
1351 /* No support for I/O watchpoints yet */
1352 break;
1356 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1358 target_ulong dr6;
1359 int reg, type;
1360 int hit_enabled = 0;
1362 dr6 = env->dr[6] & ~0xf;
1363 for (reg = 0; reg < 4; reg++) {
1364 type = hw_breakpoint_type(env->dr[7], reg);
1365 if ((type == 0 && env->dr[reg] == env->eip) ||
1366 ((type & 1) && env->cpu_watchpoint[reg] &&
1367 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1368 dr6 |= 1 << reg;
1369 if (hw_breakpoint_enabled(env->dr[7], reg))
1370 hit_enabled = 1;
1373 if (hit_enabled || force_dr6_update)
1374 env->dr[6] = dr6;
1375 return hit_enabled;
1378 static CPUDebugExcpHandler *prev_debug_excp_handler;
1380 void raise_exception(int exception_index);
1382 static void breakpoint_handler(CPUState *env)
1384 CPUBreakpoint *bp;
1386 if (env->watchpoint_hit) {
1387 if (env->watchpoint_hit->flags & BP_CPU) {
1388 env->watchpoint_hit = NULL;
1389 if (check_hw_breakpoints(env, 0))
1390 raise_exception(EXCP01_DB);
1391 else
1392 cpu_resume_from_signal(env, NULL);
1394 } else {
1395 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1396 if (bp->pc == env->eip) {
1397 if (bp->flags & BP_CPU) {
1398 check_hw_breakpoints(env, 1);
1399 raise_exception(EXCP01_DB);
1401 break;
1404 if (prev_debug_excp_handler)
1405 prev_debug_excp_handler(env);
1407 #endif /* !CONFIG_USER_ONLY */
1409 static void host_cpuid(uint32_t function, uint32_t count,
1410 uint32_t *eax, uint32_t *ebx,
1411 uint32_t *ecx, uint32_t *edx)
1413 #if defined(CONFIG_KVM)
1414 uint32_t vec[4];
1416 #ifdef __x86_64__
1417 asm volatile("cpuid"
1418 : "=a"(vec[0]), "=b"(vec[1]),
1419 "=c"(vec[2]), "=d"(vec[3])
1420 : "0"(function), "c"(count) : "cc");
1421 #else
1422 asm volatile("pusha \n\t"
1423 "cpuid \n\t"
1424 "mov %%eax, 0(%2) \n\t"
1425 "mov %%ebx, 4(%2) \n\t"
1426 "mov %%ecx, 8(%2) \n\t"
1427 "mov %%edx, 12(%2) \n\t"
1428 "popa"
1429 : : "a"(function), "c"(count), "S"(vec)
1430 : "memory", "cc");
1431 #endif
1433 if (eax)
1434 *eax = vec[0];
1435 if (ebx)
1436 *ebx = vec[1];
1437 if (ecx)
1438 *ecx = vec[2];
1439 if (edx)
1440 *edx = vec[3];
1441 #endif
1444 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1445 uint32_t *eax, uint32_t *ebx,
1446 uint32_t *ecx, uint32_t *edx)
1448 /* test if maximum index reached */
1449 if (index & 0x80000000) {
1450 if (index > env->cpuid_xlevel)
1451 index = env->cpuid_level;
1452 } else {
1453 if (index > env->cpuid_level)
1454 index = env->cpuid_level;
1457 switch(index) {
1458 case 0:
1459 *eax = env->cpuid_level;
1460 *ebx = env->cpuid_vendor1;
1461 *edx = env->cpuid_vendor2;
1462 *ecx = env->cpuid_vendor3;
1464 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1465 * isn't supported in compatibility mode on Intel. so advertise the
1466 * actuall cpu, and say goodbye to migration between different vendors
1467 * is you use compatibility mode. */
1468 if (kvm_enabled())
1469 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1470 break;
1471 case 1:
1472 *eax = env->cpuid_version;
1473 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1474 *ecx = env->cpuid_ext_features;
1475 *edx = env->cpuid_features;
1477 /* "Hypervisor present" bit required for Microsoft SVVP */
1478 if (kvm_enabled())
1479 *ecx |= (1 << 31);
1480 break;
1481 case 2:
1482 /* cache info: needed for Pentium Pro compatibility */
1483 *eax = 1;
1484 *ebx = 0;
1485 *ecx = 0;
1486 *edx = 0x2c307d;
1487 break;
1488 case 4:
1489 /* cache info: needed for Core compatibility */
1490 switch (count) {
1491 case 0: /* L1 dcache info */
1492 *eax = 0x0000121;
1493 *ebx = 0x1c0003f;
1494 *ecx = 0x000003f;
1495 *edx = 0x0000001;
1496 break;
1497 case 1: /* L1 icache info */
1498 *eax = 0x0000122;
1499 *ebx = 0x1c0003f;
1500 *ecx = 0x000003f;
1501 *edx = 0x0000001;
1502 break;
1503 case 2: /* L2 cache info */
1504 *eax = 0x0000143;
1505 *ebx = 0x3c0003f;
1506 *ecx = 0x0000fff;
1507 *edx = 0x0000001;
1508 break;
1509 default: /* end of info */
1510 *eax = 0;
1511 *ebx = 0;
1512 *ecx = 0;
1513 *edx = 0;
1514 break;
1516 break;
1517 case 5:
1518 /* mwait info: needed for Core compatibility */
1519 *eax = 0; /* Smallest monitor-line size in bytes */
1520 *ebx = 0; /* Largest monitor-line size in bytes */
1521 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1522 *edx = 0;
1523 break;
1524 case 6:
1525 /* Thermal and Power Leaf */
1526 *eax = 0;
1527 *ebx = 0;
1528 *ecx = 0;
1529 *edx = 0;
1530 break;
1531 case 9:
1532 /* Direct Cache Access Information Leaf */
1533 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1534 *ebx = 0;
1535 *ecx = 0;
1536 *edx = 0;
1537 break;
1538 case 0xA:
1539 /* Architectural Performance Monitoring Leaf */
1540 *eax = 0;
1541 *ebx = 0;
1542 *ecx = 0;
1543 *edx = 0;
1544 break;
1545 case 0x80000000:
1546 *eax = env->cpuid_xlevel;
1547 *ebx = env->cpuid_vendor1;
1548 *edx = env->cpuid_vendor2;
1549 *ecx = env->cpuid_vendor3;
1550 break;
1551 case 0x80000001:
1552 *eax = env->cpuid_features;
1553 *ebx = 0;
1554 *ecx = env->cpuid_ext3_features;
1555 *edx = env->cpuid_ext2_features;
1557 if (kvm_enabled()) {
1558 uint32_t h_eax, h_edx;
1560 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1562 /* disable CPU features that the host does not support */
1564 /* long mode */
1565 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1566 *edx &= ~0x20000000;
1567 /* syscall */
1568 if ((h_edx & 0x00000800) == 0)
1569 *edx &= ~0x00000800;
1570 /* nx */
1571 if ((h_edx & 0x00100000) == 0)
1572 *edx &= ~0x00100000;
1574 /* disable CPU features that KVM cannot support */
1576 /* svm */
1577 *ecx &= ~4UL;
1578 /* 3dnow */
1579 *edx &= ~0xc0000000;
1581 break;
1582 case 0x80000002:
1583 case 0x80000003:
1584 case 0x80000004:
1585 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1586 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1587 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1588 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1589 break;
1590 case 0x80000005:
1591 /* cache info (L1 cache) */
1592 *eax = 0x01ff01ff;
1593 *ebx = 0x01ff01ff;
1594 *ecx = 0x40020140;
1595 *edx = 0x40020140;
1596 break;
1597 case 0x80000006:
1598 /* cache info (L2 cache) */
1599 *eax = 0;
1600 *ebx = 0x42004200;
1601 *ecx = 0x02008140;
1602 *edx = 0;
1603 break;
1604 case 0x80000008:
1605 /* virtual & phys address size in low 2 bytes. */
1606 /* XXX: This value must match the one used in the MMU code. */
1607 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1608 /* 64 bit processor */
1609 #if defined(USE_KQEMU)
1610 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1611 #else
1612 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1613 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1614 #endif
1615 } else {
1616 #if defined(USE_KQEMU)
1617 *eax = 0x00000020; /* 32 bits physical */
1618 #else
1619 if (env->cpuid_features & CPUID_PSE36)
1620 *eax = 0x00000024; /* 36 bits physical */
1621 else
1622 *eax = 0x00000020; /* 32 bits physical */
1623 #endif
1625 *ebx = 0;
1626 *ecx = 0;
1627 *edx = 0;
1628 break;
1629 case 0x8000000A:
1630 *eax = 0x00000001; /* SVM Revision */
1631 *ebx = 0x00000010; /* nr of ASIDs */
1632 *ecx = 0;
1633 *edx = 0; /* optional features */
1634 break;
1635 default:
1636 /* reserved values: zero */
1637 *eax = 0;
1638 *ebx = 0;
1639 *ecx = 0;
1640 *edx = 0;
1641 break;
1645 CPUX86State *cpu_x86_init(const char *cpu_model)
1647 CPUX86State *env;
1648 static int inited;
1650 env = qemu_mallocz(sizeof(CPUX86State));
1651 cpu_exec_init(env);
1652 env->cpu_model_str = cpu_model;
1654 /* init various static tables */
1655 if (!inited) {
1656 inited = 1;
1657 optimize_flags_init();
1658 #ifndef CONFIG_USER_ONLY
1659 prev_debug_excp_handler =
1660 cpu_set_debug_excp_handler(breakpoint_handler);
1661 #endif
1663 if (cpu_x86_register(env, cpu_model) < 0) {
1664 cpu_x86_close(env);
1665 return NULL;
1667 cpu_reset(env);
1668 #ifdef USE_KQEMU
1669 kqemu_init(env);
1670 #endif
1671 if (kvm_enabled())
1672 kvm_init_vcpu(env);
1673 return env;