Make binary stripping conditional (Riku Voipio)
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob5578f4169397befc3558fc84f749fb3241ae12b1
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "qemu-common.h"
31 #include "kvm.h"
33 #include "qemu-kvm.h"
35 //#define DEBUG_MMU
37 /* feature flags taken from "Intel Processor Identification and the CPUID
38 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
39 * about feature names, the Linux name is used. */
40 static const char *feature_name[] = {
41 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
42 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
43 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
44 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
46 static const char *ext_feature_name[] = {
47 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
48 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
49 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
52 static const char *ext2_feature_name[] = {
53 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
54 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
55 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
56 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
58 static const char *ext3_feature_name[] = {
59 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
60 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
61 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
62 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
65 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
66 uint32_t *ext_features,
67 uint32_t *ext2_features,
68 uint32_t *ext3_features)
70 int i;
71 int found = 0;
73 for ( i = 0 ; i < 32 ; i++ )
74 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
75 *features |= 1 << i;
76 found = 1;
78 for ( i = 0 ; i < 32 ; i++ )
79 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
80 *ext_features |= 1 << i;
81 found = 1;
83 for ( i = 0 ; i < 32 ; i++ )
84 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
85 *ext2_features |= 1 << i;
86 found = 1;
88 for ( i = 0 ; i < 32 ; i++ )
89 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
90 *ext3_features |= 1 << i;
91 found = 1;
93 if (!found) {
94 fprintf(stderr, "CPU feature %s not found\n", flagname);
98 static void kvm_trim_features(uint32_t *features, uint32_t supported,
99 const char *names[])
101 int i;
102 uint32_t mask;
104 for (i = 0; i < 32; ++i) {
105 mask = 1U << i;
106 if ((*features & mask) && !(supported & mask)) {
107 *features &= ~mask;
112 extern const char *cpu_vendor_string;
114 typedef struct x86_def_t {
115 const char *name;
116 uint32_t level;
117 uint32_t vendor1, vendor2, vendor3;
118 int family;
119 int model;
120 int stepping;
121 uint32_t features, ext_features, ext2_features, ext3_features;
122 uint32_t xlevel;
123 char model_id[48];
124 } x86_def_t;
126 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
127 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
128 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
129 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
130 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
131 CPUID_PSE36 | CPUID_FXSR)
132 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
133 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
134 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
135 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
136 CPUID_PAE | CPUID_SEP | CPUID_APIC)
137 static x86_def_t x86_defs[] = {
138 #ifdef TARGET_X86_64
140 .name = "qemu64",
141 .level = 2,
142 .vendor1 = CPUID_VENDOR_AMD_1,
143 .vendor2 = CPUID_VENDOR_AMD_2,
144 .vendor3 = CPUID_VENDOR_AMD_3,
145 .family = 6,
146 .model = 2,
147 .stepping = 3,
148 .features = PPRO_FEATURES |
149 /* these features are needed for Win64 and aren't fully implemented */
150 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
151 /* this feature is needed for Solaris and isn't fully implemented */
152 CPUID_PSE36,
153 .ext_features = CPUID_EXT_SSE3,
154 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
155 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
156 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
157 .ext3_features = CPUID_EXT3_SVM,
158 .xlevel = 0x8000000A,
159 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
162 .name = "phenom",
163 .level = 5,
164 .vendor1 = CPUID_VENDOR_AMD_1,
165 .vendor2 = CPUID_VENDOR_AMD_2,
166 .vendor3 = CPUID_VENDOR_AMD_3,
167 .family = 16,
168 .model = 2,
169 .stepping = 3,
170 /* Missing: CPUID_VME, CPUID_HT */
171 .features = PPRO_FEATURES |
172 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
173 CPUID_PSE36,
174 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
175 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
176 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
177 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
178 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
179 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
180 CPUID_EXT2_FFXSR,
181 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
182 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
183 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
184 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
185 .ext3_features = CPUID_EXT3_SVM,
186 .xlevel = 0x8000001A,
187 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
190 .name = "core2duo",
191 .level = 10,
192 .family = 6,
193 .model = 15,
194 .stepping = 11,
195 /* The original CPU also implements these features:
196 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
197 CPUID_TM, CPUID_PBE */
198 .features = PPRO_FEATURES |
199 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
200 CPUID_PSE36,
201 /* The original CPU also implements these ext features:
202 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
203 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
204 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
205 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
206 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
207 .xlevel = 0x80000008,
208 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
210 #endif
212 .name = "qemu32",
213 .level = 2,
214 .family = 6,
215 .model = 3,
216 .stepping = 3,
217 .features = PPRO_FEATURES,
218 .ext_features = CPUID_EXT_SSE3,
219 .xlevel = 0,
220 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
223 .name = "coreduo",
224 .level = 10,
225 .family = 6,
226 .model = 14,
227 .stepping = 8,
228 /* The original CPU also implements these features:
229 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
230 CPUID_TM, CPUID_PBE */
231 .features = PPRO_FEATURES | CPUID_VME |
232 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
233 /* The original CPU also implements these ext features:
234 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
235 CPUID_EXT_PDCM */
236 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
237 .ext2_features = CPUID_EXT2_NX,
238 .xlevel = 0x80000008,
239 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
242 .name = "486",
243 .level = 0,
244 .family = 4,
245 .model = 0,
246 .stepping = 0,
247 .features = I486_FEATURES,
248 .xlevel = 0,
251 .name = "pentium",
252 .level = 1,
253 .family = 5,
254 .model = 4,
255 .stepping = 3,
256 .features = PENTIUM_FEATURES,
257 .xlevel = 0,
260 .name = "pentium2",
261 .level = 2,
262 .family = 6,
263 .model = 5,
264 .stepping = 2,
265 .features = PENTIUM2_FEATURES,
266 .xlevel = 0,
269 .name = "pentium3",
270 .level = 2,
271 .family = 6,
272 .model = 7,
273 .stepping = 3,
274 .features = PENTIUM3_FEATURES,
275 .xlevel = 0,
278 .name = "athlon",
279 .level = 2,
280 .vendor1 = 0x68747541, /* "Auth" */
281 .vendor2 = 0x69746e65, /* "enti" */
282 .vendor3 = 0x444d4163, /* "cAMD" */
283 .family = 6,
284 .model = 2,
285 .stepping = 3,
286 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
287 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
288 .xlevel = 0x80000008,
289 /* XXX: put another string ? */
290 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
293 .name = "n270",
294 /* original is on level 10 */
295 .level = 5,
296 .family = 6,
297 .model = 28,
298 .stepping = 2,
299 .features = PPRO_FEATURES |
300 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
301 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
302 * CPUID_HT | CPUID_TM | CPUID_PBE */
303 /* Some CPUs got no CPUID_SEP */
304 .ext_features = CPUID_EXT_MONITOR |
305 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
306 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
307 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
308 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
309 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
310 .xlevel = 0x8000000A,
311 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
315 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
317 unsigned int i;
318 x86_def_t *def;
320 char *s = strdup(cpu_model);
321 char *featurestr, *name = strtok(s, ",");
322 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
323 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
324 int family = -1, model = -1, stepping = -1;
326 def = NULL;
327 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
328 if (strcmp(name, x86_defs[i].name) == 0) {
329 def = &x86_defs[i];
330 break;
333 if (!def)
334 goto error;
335 memcpy(x86_cpu_def, def, sizeof(*def));
337 featurestr = strtok(NULL, ",");
339 while (featurestr) {
340 char *val;
341 if (featurestr[0] == '+') {
342 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
343 } else if (featurestr[0] == '-') {
344 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
345 } else if ((val = strchr(featurestr, '='))) {
346 *val = 0; val++;
347 if (!strcmp(featurestr, "family")) {
348 char *err;
349 family = strtol(val, &err, 10);
350 if (!*val || *err || family < 0) {
351 fprintf(stderr, "bad numerical value %s\n", val);
352 goto error;
354 x86_cpu_def->family = family;
355 } else if (!strcmp(featurestr, "model")) {
356 char *err;
357 model = strtol(val, &err, 10);
358 if (!*val || *err || model < 0 || model > 0xff) {
359 fprintf(stderr, "bad numerical value %s\n", val);
360 goto error;
362 x86_cpu_def->model = model;
363 } else if (!strcmp(featurestr, "stepping")) {
364 char *err;
365 stepping = strtol(val, &err, 10);
366 if (!*val || *err || stepping < 0 || stepping > 0xf) {
367 fprintf(stderr, "bad numerical value %s\n", val);
368 goto error;
370 x86_cpu_def->stepping = stepping;
371 } else if (!strcmp(featurestr, "vendor")) {
372 if (strlen(val) != 12) {
373 fprintf(stderr, "vendor string must be 12 chars long\n");
374 goto error;
376 x86_cpu_def->vendor1 = 0;
377 x86_cpu_def->vendor2 = 0;
378 x86_cpu_def->vendor3 = 0;
379 for(i = 0; i < 4; i++) {
380 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
381 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
382 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
384 } else if (!strcmp(featurestr, "model_id")) {
385 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
386 val);
387 } else {
388 fprintf(stderr, "unrecognized feature %s\n", featurestr);
389 goto error;
391 } else {
392 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
393 goto error;
395 featurestr = strtok(NULL, ",");
397 x86_cpu_def->features |= plus_features;
398 x86_cpu_def->ext_features |= plus_ext_features;
399 x86_cpu_def->ext2_features |= plus_ext2_features;
400 x86_cpu_def->ext3_features |= plus_ext3_features;
401 x86_cpu_def->features &= ~minus_features;
402 x86_cpu_def->ext_features &= ~minus_ext_features;
403 x86_cpu_def->ext2_features &= ~minus_ext2_features;
404 x86_cpu_def->ext3_features &= ~minus_ext3_features;
405 free(s);
406 return 0;
408 error:
409 free(s);
410 return -1;
413 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
415 unsigned int i;
417 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
418 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
421 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
423 x86_def_t def1, *def = &def1;
425 if (cpu_x86_find_by_name(def, cpu_model) < 0)
426 return -1;
427 if (def->vendor1) {
428 env->cpuid_vendor1 = def->vendor1;
429 env->cpuid_vendor2 = def->vendor2;
430 env->cpuid_vendor3 = def->vendor3;
431 } else {
432 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
433 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
434 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
436 env->cpuid_level = def->level;
437 if (def->family > 0x0f)
438 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
439 else
440 env->cpuid_version = def->family << 8;
441 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
442 env->cpuid_version |= def->stepping;
443 env->cpuid_features = def->features;
444 env->pat = 0x0007040600070406ULL;
445 env->cpuid_ext_features = def->ext_features;
446 env->cpuid_ext2_features = def->ext2_features;
447 env->cpuid_xlevel = def->xlevel;
448 env->cpuid_ext3_features = def->ext3_features;
450 const char *model_id = def->model_id;
451 int c, len, i;
453 if (cpu_vendor_string != NULL)
454 model_id = cpu_vendor_string;
455 if (!model_id)
456 model_id = "";
457 len = strlen(model_id);
458 for(i = 0; i < 48; i++) {
459 if (i >= len)
460 c = '\0';
461 else
462 c = (uint8_t)model_id[i];
463 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
466 return 0;
469 /* NOTE: must be called outside the CPU execute loop */
470 void cpu_reset(CPUX86State *env)
472 int i;
474 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
475 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
476 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
479 memset(env, 0, offsetof(CPUX86State, breakpoints));
481 tlb_flush(env, 1);
483 env->old_exception = -1;
485 /* init to reset state */
487 #ifdef CONFIG_SOFTMMU
488 env->hflags |= HF_SOFTMMU_MASK;
489 #endif
490 env->hflags2 |= HF2_GIF_MASK;
492 cpu_x86_update_cr0(env, 0x60000010);
493 env->a20_mask = ~0x0;
494 env->smbase = 0x30000;
496 env->idt.limit = 0xffff;
497 env->gdt.limit = 0xffff;
498 env->ldt.limit = 0xffff;
499 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
500 env->tr.limit = 0xffff;
501 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
503 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
504 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
505 DESC_R_MASK | DESC_A_MASK);
506 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
507 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
508 DESC_A_MASK);
509 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
510 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
511 DESC_A_MASK);
512 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
513 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
514 DESC_A_MASK);
515 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
516 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
517 DESC_A_MASK);
518 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
519 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
520 DESC_A_MASK);
522 env->eip = 0xfff0;
523 env->regs[R_EDX] = env->cpuid_version;
525 env->eflags = 0x2;
527 /* FPU init */
528 for(i = 0;i < 8; i++)
529 env->fptags[i] = 1;
530 env->fpuc = 0x37f;
532 env->mxcsr = 0x1f80;
534 memset(env->dr, 0, sizeof(env->dr));
535 env->dr[6] = DR6_FIXED_1;
536 env->dr[7] = DR7_FIXED_1;
537 cpu_breakpoint_remove_all(env, BP_CPU);
538 cpu_watchpoint_remove_all(env, BP_CPU);
541 void cpu_x86_close(CPUX86State *env)
543 qemu_free(env);
546 /***********************************************************/
547 /* x86 debug */
549 static const char *cc_op_str[] = {
550 "DYNAMIC",
551 "EFLAGS",
553 "MULB",
554 "MULW",
555 "MULL",
556 "MULQ",
558 "ADDB",
559 "ADDW",
560 "ADDL",
561 "ADDQ",
563 "ADCB",
564 "ADCW",
565 "ADCL",
566 "ADCQ",
568 "SUBB",
569 "SUBW",
570 "SUBL",
571 "SUBQ",
573 "SBBB",
574 "SBBW",
575 "SBBL",
576 "SBBQ",
578 "LOGICB",
579 "LOGICW",
580 "LOGICL",
581 "LOGICQ",
583 "INCB",
584 "INCW",
585 "INCL",
586 "INCQ",
588 "DECB",
589 "DECW",
590 "DECL",
591 "DECQ",
593 "SHLB",
594 "SHLW",
595 "SHLL",
596 "SHLQ",
598 "SARB",
599 "SARW",
600 "SARL",
601 "SARQ",
604 void cpu_dump_state(CPUState *env, FILE *f,
605 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
606 int flags)
608 int eflags, i, nb;
609 char cc_op_name[32];
610 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
612 if (kvm_enabled())
613 kvm_arch_get_registers(env);
615 eflags = env->eflags;
616 #ifdef TARGET_X86_64
617 if (env->hflags & HF_CS64_MASK) {
618 cpu_fprintf(f,
619 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
620 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
621 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
622 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
623 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
624 env->regs[R_EAX],
625 env->regs[R_EBX],
626 env->regs[R_ECX],
627 env->regs[R_EDX],
628 env->regs[R_ESI],
629 env->regs[R_EDI],
630 env->regs[R_EBP],
631 env->regs[R_ESP],
632 env->regs[8],
633 env->regs[9],
634 env->regs[10],
635 env->regs[11],
636 env->regs[12],
637 env->regs[13],
638 env->regs[14],
639 env->regs[15],
640 env->eip, eflags,
641 eflags & DF_MASK ? 'D' : '-',
642 eflags & CC_O ? 'O' : '-',
643 eflags & CC_S ? 'S' : '-',
644 eflags & CC_Z ? 'Z' : '-',
645 eflags & CC_A ? 'A' : '-',
646 eflags & CC_P ? 'P' : '-',
647 eflags & CC_C ? 'C' : '-',
648 env->hflags & HF_CPL_MASK,
649 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
650 (int)(env->a20_mask >> 20) & 1,
651 (env->hflags >> HF_SMM_SHIFT) & 1,
652 env->halted);
653 } else
654 #endif
656 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
657 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
658 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
659 (uint32_t)env->regs[R_EAX],
660 (uint32_t)env->regs[R_EBX],
661 (uint32_t)env->regs[R_ECX],
662 (uint32_t)env->regs[R_EDX],
663 (uint32_t)env->regs[R_ESI],
664 (uint32_t)env->regs[R_EDI],
665 (uint32_t)env->regs[R_EBP],
666 (uint32_t)env->regs[R_ESP],
667 (uint32_t)env->eip, eflags,
668 eflags & DF_MASK ? 'D' : '-',
669 eflags & CC_O ? 'O' : '-',
670 eflags & CC_S ? 'S' : '-',
671 eflags & CC_Z ? 'Z' : '-',
672 eflags & CC_A ? 'A' : '-',
673 eflags & CC_P ? 'P' : '-',
674 eflags & CC_C ? 'C' : '-',
675 env->hflags & HF_CPL_MASK,
676 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
677 (int)(env->a20_mask >> 20) & 1,
678 (env->hflags >> HF_SMM_SHIFT) & 1,
679 env->halted);
682 #ifdef TARGET_X86_64
683 if (env->hflags & HF_LMA_MASK) {
684 for(i = 0; i < 6; i++) {
685 SegmentCache *sc = &env->segs[i];
686 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
687 seg_name[i],
688 sc->selector,
689 sc->base,
690 sc->limit,
691 sc->flags);
693 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
694 env->ldt.selector,
695 env->ldt.base,
696 env->ldt.limit,
697 env->ldt.flags);
698 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
699 env->tr.selector,
700 env->tr.base,
701 env->tr.limit,
702 env->tr.flags);
703 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
704 env->gdt.base, env->gdt.limit);
705 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
706 env->idt.base, env->idt.limit);
707 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
708 (uint32_t)env->cr[0],
709 env->cr[2],
710 env->cr[3],
711 (uint32_t)env->cr[4]);
712 for(i = 0; i < 4; i++)
713 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
714 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
715 env->dr[6], env->dr[7]);
716 } else
717 #endif
719 for(i = 0; i < 6; i++) {
720 SegmentCache *sc = &env->segs[i];
721 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
722 seg_name[i],
723 sc->selector,
724 (uint32_t)sc->base,
725 sc->limit,
726 sc->flags);
728 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
729 env->ldt.selector,
730 (uint32_t)env->ldt.base,
731 env->ldt.limit,
732 env->ldt.flags);
733 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
734 env->tr.selector,
735 (uint32_t)env->tr.base,
736 env->tr.limit,
737 env->tr.flags);
738 cpu_fprintf(f, "GDT= %08x %08x\n",
739 (uint32_t)env->gdt.base, env->gdt.limit);
740 cpu_fprintf(f, "IDT= %08x %08x\n",
741 (uint32_t)env->idt.base, env->idt.limit);
742 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
743 (uint32_t)env->cr[0],
744 (uint32_t)env->cr[2],
745 (uint32_t)env->cr[3],
746 (uint32_t)env->cr[4]);
747 for(i = 0; i < 4; i++)
748 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
749 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
751 if (flags & X86_DUMP_CCOP) {
752 if ((unsigned)env->cc_op < CC_OP_NB)
753 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
754 else
755 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
756 #ifdef TARGET_X86_64
757 if (env->hflags & HF_CS64_MASK) {
758 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
759 env->cc_src, env->cc_dst,
760 cc_op_name);
761 } else
762 #endif
764 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
765 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
766 cc_op_name);
769 if (flags & X86_DUMP_FPU) {
770 int fptag;
771 fptag = 0;
772 for(i = 0; i < 8; i++) {
773 fptag |= ((!env->fptags[i]) << i);
775 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
776 env->fpuc,
777 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
778 env->fpstt,
779 fptag,
780 env->mxcsr);
781 for(i=0;i<8;i++) {
782 #if defined(USE_X86LDOUBLE)
783 union {
784 long double d;
785 struct {
786 uint64_t lower;
787 uint16_t upper;
788 } l;
789 } tmp;
790 tmp.d = env->fpregs[i].d;
791 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
792 i, tmp.l.lower, tmp.l.upper);
793 #else
794 cpu_fprintf(f, "FPR%d=%016" PRIx64,
795 i, env->fpregs[i].mmx.q);
796 #endif
797 if ((i & 1) == 1)
798 cpu_fprintf(f, "\n");
799 else
800 cpu_fprintf(f, " ");
802 if (env->hflags & HF_CS64_MASK)
803 nb = 16;
804 else
805 nb = 8;
806 for(i=0;i<nb;i++) {
807 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
809 env->xmm_regs[i].XMM_L(3),
810 env->xmm_regs[i].XMM_L(2),
811 env->xmm_regs[i].XMM_L(1),
812 env->xmm_regs[i].XMM_L(0));
813 if ((i & 1) == 1)
814 cpu_fprintf(f, "\n");
815 else
816 cpu_fprintf(f, " ");
821 /***********************************************************/
822 /* x86 mmu */
823 /* XXX: add PGE support */
825 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
827 a20_state = (a20_state != 0);
828 if (a20_state != ((env->a20_mask >> 20) & 1)) {
829 #if defined(DEBUG_MMU)
830 printf("A20 update: a20=%d\n", a20_state);
831 #endif
832 /* if the cpu is currently executing code, we must unlink it and
833 all the potentially executing TB */
834 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
836 /* when a20 is changed, all the MMU mappings are invalid, so
837 we must flush everything */
838 tlb_flush(env, 1);
839 env->a20_mask = (~0x100000) | (a20_state << 20);
843 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
845 int pe_state;
847 #if defined(DEBUG_MMU)
848 printf("CR0 update: CR0=0x%08x\n", new_cr0);
849 #endif
850 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
851 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
852 tlb_flush(env, 1);
855 #ifdef TARGET_X86_64
856 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
857 (env->efer & MSR_EFER_LME)) {
858 /* enter in long mode */
859 /* XXX: generate an exception */
860 if (!(env->cr[4] & CR4_PAE_MASK))
861 return;
862 env->efer |= MSR_EFER_LMA;
863 env->hflags |= HF_LMA_MASK;
864 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
865 (env->efer & MSR_EFER_LMA)) {
866 /* exit long mode */
867 env->efer &= ~MSR_EFER_LMA;
868 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
869 env->eip &= 0xffffffff;
871 #endif
872 env->cr[0] = new_cr0 | CR0_ET_MASK;
874 /* update PE flag in hidden flags */
875 pe_state = (env->cr[0] & CR0_PE_MASK);
876 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
877 /* ensure that ADDSEG is always set in real mode */
878 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
879 /* update FPU flags */
880 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
881 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
884 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
885 the PDPT */
886 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
888 env->cr[3] = new_cr3;
889 if (env->cr[0] & CR0_PG_MASK) {
890 #if defined(DEBUG_MMU)
891 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
892 #endif
893 tlb_flush(env, 0);
897 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
899 #if defined(DEBUG_MMU)
900 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
901 #endif
902 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
903 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
904 tlb_flush(env, 1);
906 /* SSE handling */
907 if (!(env->cpuid_features & CPUID_SSE))
908 new_cr4 &= ~CR4_OSFXSR_MASK;
909 if (new_cr4 & CR4_OSFXSR_MASK)
910 env->hflags |= HF_OSFXSR_MASK;
911 else
912 env->hflags &= ~HF_OSFXSR_MASK;
914 env->cr[4] = new_cr4;
917 #if defined(CONFIG_USER_ONLY)
919 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
920 int is_write, int mmu_idx, int is_softmmu)
922 /* user mode only emulation */
923 is_write &= 1;
924 env->cr[2] = addr;
925 env->error_code = (is_write << PG_ERROR_W_BIT);
926 env->error_code |= PG_ERROR_U_MASK;
927 env->exception_index = EXCP0E_PAGE;
928 return 1;
931 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
933 return addr;
936 #else
938 /* XXX: This value should match the one returned by CPUID
939 * and in exec.c */
940 #if defined(USE_KQEMU)
941 #define PHYS_ADDR_MASK 0xfffff000LL
942 #else
943 # if defined(TARGET_X86_64)
944 # define PHYS_ADDR_MASK 0xfffffff000LL
945 # else
946 # define PHYS_ADDR_MASK 0xffffff000LL
947 # endif
948 #endif
950 /* return value:
951 -1 = cannot handle fault
952 0 = nothing more to do
953 1 = generate PF fault
954 2 = soft MMU activation required for this block
956 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
957 int is_write1, int mmu_idx, int is_softmmu)
959 uint64_t ptep, pte;
960 target_ulong pde_addr, pte_addr;
961 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
962 target_phys_addr_t paddr;
963 uint32_t page_offset;
964 target_ulong vaddr, virt_addr;
966 is_user = mmu_idx == MMU_USER_IDX;
967 #if defined(DEBUG_MMU)
968 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
969 addr, is_write1, is_user, env->eip);
970 #endif
971 is_write = is_write1 & 1;
973 if (!(env->cr[0] & CR0_PG_MASK)) {
974 pte = addr;
975 virt_addr = addr & TARGET_PAGE_MASK;
976 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
977 page_size = 4096;
978 goto do_mapping;
981 if (env->cr[4] & CR4_PAE_MASK) {
982 uint64_t pde, pdpe;
983 target_ulong pdpe_addr;
985 #ifdef TARGET_X86_64
986 if (env->hflags & HF_LMA_MASK) {
987 uint64_t pml4e_addr, pml4e;
988 int32_t sext;
990 /* test virtual address sign extension */
991 sext = (int64_t)addr >> 47;
992 if (sext != 0 && sext != -1) {
993 env->error_code = 0;
994 env->exception_index = EXCP0D_GPF;
995 return 1;
998 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
999 env->a20_mask;
1000 pml4e = ldq_phys(pml4e_addr);
1001 if (!(pml4e & PG_PRESENT_MASK)) {
1002 error_code = 0;
1003 goto do_fault;
1005 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1006 error_code = PG_ERROR_RSVD_MASK;
1007 goto do_fault;
1009 if (!(pml4e & PG_ACCESSED_MASK)) {
1010 pml4e |= PG_ACCESSED_MASK;
1011 stl_phys_notdirty(pml4e_addr, pml4e);
1013 ptep = pml4e ^ PG_NX_MASK;
1014 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1015 env->a20_mask;
1016 pdpe = ldq_phys(pdpe_addr);
1017 if (!(pdpe & PG_PRESENT_MASK)) {
1018 error_code = 0;
1019 goto do_fault;
1021 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1022 error_code = PG_ERROR_RSVD_MASK;
1023 goto do_fault;
1025 ptep &= pdpe ^ PG_NX_MASK;
1026 if (!(pdpe & PG_ACCESSED_MASK)) {
1027 pdpe |= PG_ACCESSED_MASK;
1028 stl_phys_notdirty(pdpe_addr, pdpe);
1030 } else
1031 #endif
1033 /* XXX: load them when cr3 is loaded ? */
1034 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1035 env->a20_mask;
1036 pdpe = ldq_phys(pdpe_addr);
1037 if (!(pdpe & PG_PRESENT_MASK)) {
1038 error_code = 0;
1039 goto do_fault;
1041 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1044 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1045 env->a20_mask;
1046 pde = ldq_phys(pde_addr);
1047 if (!(pde & PG_PRESENT_MASK)) {
1048 error_code = 0;
1049 goto do_fault;
1051 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1052 error_code = PG_ERROR_RSVD_MASK;
1053 goto do_fault;
1055 ptep &= pde ^ PG_NX_MASK;
1056 if (pde & PG_PSE_MASK) {
1057 /* 2 MB page */
1058 page_size = 2048 * 1024;
1059 ptep ^= PG_NX_MASK;
1060 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1061 goto do_fault_protect;
1062 if (is_user) {
1063 if (!(ptep & PG_USER_MASK))
1064 goto do_fault_protect;
1065 if (is_write && !(ptep & PG_RW_MASK))
1066 goto do_fault_protect;
1067 } else {
1068 if ((env->cr[0] & CR0_WP_MASK) &&
1069 is_write && !(ptep & PG_RW_MASK))
1070 goto do_fault_protect;
1072 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1073 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1074 pde |= PG_ACCESSED_MASK;
1075 if (is_dirty)
1076 pde |= PG_DIRTY_MASK;
1077 stl_phys_notdirty(pde_addr, pde);
1079 /* align to page_size */
1080 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1081 virt_addr = addr & ~(page_size - 1);
1082 } else {
1083 /* 4 KB page */
1084 if (!(pde & PG_ACCESSED_MASK)) {
1085 pde |= PG_ACCESSED_MASK;
1086 stl_phys_notdirty(pde_addr, pde);
1088 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1089 env->a20_mask;
1090 pte = ldq_phys(pte_addr);
1091 if (!(pte & PG_PRESENT_MASK)) {
1092 error_code = 0;
1093 goto do_fault;
1095 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1096 error_code = PG_ERROR_RSVD_MASK;
1097 goto do_fault;
1099 /* combine pde and pte nx, user and rw protections */
1100 ptep &= pte ^ PG_NX_MASK;
1101 ptep ^= PG_NX_MASK;
1102 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1103 goto do_fault_protect;
1104 if (is_user) {
1105 if (!(ptep & PG_USER_MASK))
1106 goto do_fault_protect;
1107 if (is_write && !(ptep & PG_RW_MASK))
1108 goto do_fault_protect;
1109 } else {
1110 if ((env->cr[0] & CR0_WP_MASK) &&
1111 is_write && !(ptep & PG_RW_MASK))
1112 goto do_fault_protect;
1114 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1115 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1116 pte |= PG_ACCESSED_MASK;
1117 if (is_dirty)
1118 pte |= PG_DIRTY_MASK;
1119 stl_phys_notdirty(pte_addr, pte);
1121 page_size = 4096;
1122 virt_addr = addr & ~0xfff;
1123 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1125 } else {
1126 uint32_t pde;
1128 /* page directory entry */
1129 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1130 env->a20_mask;
1131 pde = ldl_phys(pde_addr);
1132 if (!(pde & PG_PRESENT_MASK)) {
1133 error_code = 0;
1134 goto do_fault;
1136 /* if PSE bit is set, then we use a 4MB page */
1137 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1138 page_size = 4096 * 1024;
1139 if (is_user) {
1140 if (!(pde & PG_USER_MASK))
1141 goto do_fault_protect;
1142 if (is_write && !(pde & PG_RW_MASK))
1143 goto do_fault_protect;
1144 } else {
1145 if ((env->cr[0] & CR0_WP_MASK) &&
1146 is_write && !(pde & PG_RW_MASK))
1147 goto do_fault_protect;
1149 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1150 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1151 pde |= PG_ACCESSED_MASK;
1152 if (is_dirty)
1153 pde |= PG_DIRTY_MASK;
1154 stl_phys_notdirty(pde_addr, pde);
1157 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1158 ptep = pte;
1159 virt_addr = addr & ~(page_size - 1);
1160 } else {
1161 if (!(pde & PG_ACCESSED_MASK)) {
1162 pde |= PG_ACCESSED_MASK;
1163 stl_phys_notdirty(pde_addr, pde);
1166 /* page directory entry */
1167 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1168 env->a20_mask;
1169 pte = ldl_phys(pte_addr);
1170 if (!(pte & PG_PRESENT_MASK)) {
1171 error_code = 0;
1172 goto do_fault;
1174 /* combine pde and pte user and rw protections */
1175 ptep = pte & pde;
1176 if (is_user) {
1177 if (!(ptep & PG_USER_MASK))
1178 goto do_fault_protect;
1179 if (is_write && !(ptep & PG_RW_MASK))
1180 goto do_fault_protect;
1181 } else {
1182 if ((env->cr[0] & CR0_WP_MASK) &&
1183 is_write && !(ptep & PG_RW_MASK))
1184 goto do_fault_protect;
1186 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1187 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1188 pte |= PG_ACCESSED_MASK;
1189 if (is_dirty)
1190 pte |= PG_DIRTY_MASK;
1191 stl_phys_notdirty(pte_addr, pte);
1193 page_size = 4096;
1194 virt_addr = addr & ~0xfff;
1197 /* the page can be put in the TLB */
1198 prot = PAGE_READ;
1199 if (!(ptep & PG_NX_MASK))
1200 prot |= PAGE_EXEC;
1201 if (pte & PG_DIRTY_MASK) {
1202 /* only set write access if already dirty... otherwise wait
1203 for dirty access */
1204 if (is_user) {
1205 if (ptep & PG_RW_MASK)
1206 prot |= PAGE_WRITE;
1207 } else {
1208 if (!(env->cr[0] & CR0_WP_MASK) ||
1209 (ptep & PG_RW_MASK))
1210 prot |= PAGE_WRITE;
1213 do_mapping:
1214 pte = pte & env->a20_mask;
1216 /* Even if 4MB pages, we map only one 4KB page in the cache to
1217 avoid filling it too fast */
1218 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1219 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1220 vaddr = virt_addr + page_offset;
1222 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1223 return ret;
1224 do_fault_protect:
1225 error_code = PG_ERROR_P_MASK;
1226 do_fault:
1227 error_code |= (is_write << PG_ERROR_W_BIT);
1228 if (is_user)
1229 error_code |= PG_ERROR_U_MASK;
1230 if (is_write1 == 2 &&
1231 (env->efer & MSR_EFER_NXE) &&
1232 (env->cr[4] & CR4_PAE_MASK))
1233 error_code |= PG_ERROR_I_D_MASK;
1234 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1235 /* cr2 is not modified in case of exceptions */
1236 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1237 addr);
1238 } else {
1239 env->cr[2] = addr;
1241 env->error_code = error_code;
1242 env->exception_index = EXCP0E_PAGE;
1243 return 1;
1246 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1248 target_ulong pde_addr, pte_addr;
1249 uint64_t pte;
1250 target_phys_addr_t paddr;
1251 uint32_t page_offset;
1252 int page_size;
1254 if (env->cr[4] & CR4_PAE_MASK) {
1255 target_ulong pdpe_addr;
1256 uint64_t pde, pdpe;
1258 #ifdef TARGET_X86_64
1259 if (env->hflags & HF_LMA_MASK) {
1260 uint64_t pml4e_addr, pml4e;
1261 int32_t sext;
1263 /* test virtual address sign extension */
1264 sext = (int64_t)addr >> 47;
1265 if (sext != 0 && sext != -1)
1266 return -1;
1268 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1269 env->a20_mask;
1270 pml4e = ldq_phys(pml4e_addr);
1271 if (!(pml4e & PG_PRESENT_MASK))
1272 return -1;
1274 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1275 env->a20_mask;
1276 pdpe = ldq_phys(pdpe_addr);
1277 if (!(pdpe & PG_PRESENT_MASK))
1278 return -1;
1279 } else
1280 #endif
1282 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1283 env->a20_mask;
1284 pdpe = ldq_phys(pdpe_addr);
1285 if (!(pdpe & PG_PRESENT_MASK))
1286 return -1;
1289 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1290 env->a20_mask;
1291 pde = ldq_phys(pde_addr);
1292 if (!(pde & PG_PRESENT_MASK)) {
1293 return -1;
1295 if (pde & PG_PSE_MASK) {
1296 /* 2 MB page */
1297 page_size = 2048 * 1024;
1298 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1299 } else {
1300 /* 4 KB page */
1301 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1302 env->a20_mask;
1303 page_size = 4096;
1304 pte = ldq_phys(pte_addr);
1306 if (!(pte & PG_PRESENT_MASK))
1307 return -1;
1308 } else {
1309 uint32_t pde;
1311 if (!(env->cr[0] & CR0_PG_MASK)) {
1312 pte = addr;
1313 page_size = 4096;
1314 } else {
1315 /* page directory entry */
1316 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1317 pde = ldl_phys(pde_addr);
1318 if (!(pde & PG_PRESENT_MASK))
1319 return -1;
1320 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1321 pte = pde & ~0x003ff000; /* align to 4MB */
1322 page_size = 4096 * 1024;
1323 } else {
1324 /* page directory entry */
1325 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1326 pte = ldl_phys(pte_addr);
1327 if (!(pte & PG_PRESENT_MASK))
1328 return -1;
1329 page_size = 4096;
1332 pte = pte & env->a20_mask;
1335 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1336 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1337 return paddr;
1340 void hw_breakpoint_insert(CPUState *env, int index)
1342 int type, err = 0;
1344 switch (hw_breakpoint_type(env->dr[7], index)) {
1345 case 0:
1346 if (hw_breakpoint_enabled(env->dr[7], index))
1347 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1348 &env->cpu_breakpoint[index]);
1349 break;
1350 case 1:
1351 type = BP_CPU | BP_MEM_WRITE;
1352 goto insert_wp;
1353 case 2:
1354 /* No support for I/O watchpoints yet */
1355 break;
1356 case 3:
1357 type = BP_CPU | BP_MEM_ACCESS;
1358 insert_wp:
1359 err = cpu_watchpoint_insert(env, env->dr[index],
1360 hw_breakpoint_len(env->dr[7], index),
1361 type, &env->cpu_watchpoint[index]);
1362 break;
1364 if (err)
1365 env->cpu_breakpoint[index] = NULL;
1368 void hw_breakpoint_remove(CPUState *env, int index)
1370 if (!env->cpu_breakpoint[index])
1371 return;
1372 switch (hw_breakpoint_type(env->dr[7], index)) {
1373 case 0:
1374 if (hw_breakpoint_enabled(env->dr[7], index))
1375 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1376 break;
1377 case 1:
1378 case 3:
1379 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1380 break;
1381 case 2:
1382 /* No support for I/O watchpoints yet */
1383 break;
1387 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1389 target_ulong dr6;
1390 int reg, type;
1391 int hit_enabled = 0;
1393 dr6 = env->dr[6] & ~0xf;
1394 for (reg = 0; reg < 4; reg++) {
1395 type = hw_breakpoint_type(env->dr[7], reg);
1396 if ((type == 0 && env->dr[reg] == env->eip) ||
1397 ((type & 1) && env->cpu_watchpoint[reg] &&
1398 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1399 dr6 |= 1 << reg;
1400 if (hw_breakpoint_enabled(env->dr[7], reg))
1401 hit_enabled = 1;
1404 if (hit_enabled || force_dr6_update)
1405 env->dr[6] = dr6;
1406 return hit_enabled;
1409 static CPUDebugExcpHandler *prev_debug_excp_handler;
1411 void raise_exception(int exception_index);
1413 static void breakpoint_handler(CPUState *env)
1415 CPUBreakpoint *bp;
1417 if (env->watchpoint_hit) {
1418 if (env->watchpoint_hit->flags & BP_CPU) {
1419 env->watchpoint_hit = NULL;
1420 if (check_hw_breakpoints(env, 0))
1421 raise_exception(EXCP01_DB);
1422 else
1423 cpu_resume_from_signal(env, NULL);
1425 } else {
1426 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1427 if (bp->pc == env->eip) {
1428 if (bp->flags & BP_CPU) {
1429 check_hw_breakpoints(env, 1);
1430 raise_exception(EXCP01_DB);
1432 break;
1435 if (prev_debug_excp_handler)
1436 prev_debug_excp_handler(env);
1438 #endif /* !CONFIG_USER_ONLY */
1440 static void host_cpuid(uint32_t function, uint32_t count,
1441 uint32_t *eax, uint32_t *ebx,
1442 uint32_t *ecx, uint32_t *edx)
1444 #if defined(CONFIG_KVM) || defined(USE_KVM)
1445 uint32_t vec[4];
1447 #ifdef __x86_64__
1448 asm volatile("cpuid"
1449 : "=a"(vec[0]), "=b"(vec[1]),
1450 "=c"(vec[2]), "=d"(vec[3])
1451 : "0"(function), "c"(count) : "cc");
1452 #else
1453 asm volatile("pusha \n\t"
1454 "cpuid \n\t"
1455 "mov %%eax, 0(%2) \n\t"
1456 "mov %%ebx, 4(%2) \n\t"
1457 "mov %%ecx, 8(%2) \n\t"
1458 "mov %%edx, 12(%2) \n\t"
1459 "popa"
1460 : : "a"(function), "c"(count), "S"(vec)
1461 : "memory", "cc");
1462 #endif
1464 if (eax)
1465 *eax = vec[0];
1466 if (ebx)
1467 *ebx = vec[1];
1468 if (ecx)
1469 *ecx = vec[2];
1470 if (edx)
1471 *edx = vec[3];
1472 #endif
1475 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1476 uint32_t *eax, uint32_t *ebx,
1477 uint32_t *ecx, uint32_t *edx)
1479 /* test if maximum index reached */
1480 if (index & 0x80000000) {
1481 if (index > env->cpuid_xlevel)
1482 index = env->cpuid_level;
1483 } else {
1484 if (index > env->cpuid_level)
1485 index = env->cpuid_level;
1488 switch(index) {
1489 case 0:
1490 *eax = env->cpuid_level;
1491 *ebx = env->cpuid_vendor1;
1492 *edx = env->cpuid_vendor2;
1493 *ecx = env->cpuid_vendor3;
1495 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1496 * isn't supported in compatibility mode on Intel. so advertise the
1497 * actuall cpu, and say goodbye to migration between different vendors
1498 * is you use compatibility mode. */
1499 if (kvm_enabled())
1500 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1501 break;
1502 case 1:
1503 *eax = env->cpuid_version;
1504 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1505 *ecx = env->cpuid_ext_features;
1506 *edx = env->cpuid_features;
1508 /* "Hypervisor present" bit required for Microsoft SVVP */
1509 if (kvm_enabled())
1510 *ecx |= (1 << 31);
1511 break;
1512 case 2:
1513 /* cache info: needed for Pentium Pro compatibility */
1514 *eax = 1;
1515 *ebx = 0;
1516 *ecx = 0;
1517 *edx = 0x2c307d;
1518 break;
1519 case 4:
1520 /* cache info: needed for Core compatibility */
1521 switch (count) {
1522 case 0: /* L1 dcache info */
1523 *eax = 0x0000121;
1524 *ebx = 0x1c0003f;
1525 *ecx = 0x000003f;
1526 *edx = 0x0000001;
1527 break;
1528 case 1: /* L1 icache info */
1529 *eax = 0x0000122;
1530 *ebx = 0x1c0003f;
1531 *ecx = 0x000003f;
1532 *edx = 0x0000001;
1533 break;
1534 case 2: /* L2 cache info */
1535 *eax = 0x0000143;
1536 *ebx = 0x3c0003f;
1537 *ecx = 0x0000fff;
1538 *edx = 0x0000001;
1539 break;
1540 default: /* end of info */
1541 *eax = 0;
1542 *ebx = 0;
1543 *ecx = 0;
1544 *edx = 0;
1545 break;
1547 break;
1548 case 5:
1549 /* mwait info: needed for Core compatibility */
1550 *eax = 0; /* Smallest monitor-line size in bytes */
1551 *ebx = 0; /* Largest monitor-line size in bytes */
1552 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1553 *edx = 0;
1554 break;
1555 case 6:
1556 /* Thermal and Power Leaf */
1557 *eax = 0;
1558 *ebx = 0;
1559 *ecx = 0;
1560 *edx = 0;
1561 break;
1562 case 9:
1563 /* Direct Cache Access Information Leaf */
1564 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1565 *ebx = 0;
1566 *ecx = 0;
1567 *edx = 0;
1568 break;
1569 case 0xA:
1570 /* Architectural Performance Monitoring Leaf */
1571 *eax = 0;
1572 *ebx = 0;
1573 *ecx = 0;
1574 *edx = 0;
1575 break;
1576 case 0x80000000:
1577 *eax = env->cpuid_xlevel;
1578 *ebx = env->cpuid_vendor1;
1579 *edx = env->cpuid_vendor2;
1580 *ecx = env->cpuid_vendor3;
1581 break;
1582 case 0x80000001:
1583 *eax = env->cpuid_features;
1584 *ebx = 0;
1585 *ecx = env->cpuid_ext3_features;
1586 *edx = env->cpuid_ext2_features;
1588 if (kvm_enabled()) {
1589 uint32_t h_eax, h_edx;
1591 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1593 /* disable CPU features that the host does not support */
1595 /* long mode */
1596 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1597 *edx &= ~0x20000000;
1598 /* syscall */
1599 if ((h_edx & 0x00000800) == 0)
1600 *edx &= ~0x00000800;
1601 /* nx */
1602 if ((h_edx & 0x00100000) == 0)
1603 *edx &= ~0x00100000;
1605 /* disable CPU features that KVM cannot support */
1607 /* svm */
1608 if (!kvm_nested)
1609 *ecx &= ~4UL;
1610 /* 3dnow */
1611 *edx &= ~0xc0000000;
1613 break;
1614 case 0x80000002:
1615 case 0x80000003:
1616 case 0x80000004:
1617 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1618 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1619 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1620 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1621 break;
1622 case 0x80000005:
1623 /* cache info (L1 cache) */
1624 *eax = 0x01ff01ff;
1625 *ebx = 0x01ff01ff;
1626 *ecx = 0x40020140;
1627 *edx = 0x40020140;
1628 break;
1629 case 0x80000006:
1630 /* cache info (L2 cache) */
1631 *eax = 0;
1632 *ebx = 0x42004200;
1633 *ecx = 0x02008140;
1634 *edx = 0;
1635 break;
1636 case 0x80000008:
1637 /* virtual & phys address size in low 2 bytes. */
1638 /* XXX: This value must match the one used in the MMU code. */
1639 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1640 /* 64 bit processor */
1641 #if defined(USE_KQEMU)
1642 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1643 #else
1644 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1645 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1646 #endif
1647 } else {
1648 #if defined(USE_KQEMU)
1649 *eax = 0x00000020; /* 32 bits physical */
1650 #else
1651 if (env->cpuid_features & CPUID_PSE36)
1652 *eax = 0x00000024; /* 36 bits physical */
1653 else
1654 *eax = 0x00000020; /* 32 bits physical */
1655 #endif
1657 *ebx = 0;
1658 *ecx = 0;
1659 *edx = 0;
1660 break;
1661 case 0x8000000A:
1662 *eax = 0x00000001; /* SVM Revision */
1663 *ebx = 0x00000010; /* nr of ASIDs */
1664 *ecx = 0;
1665 *edx = 0; /* optional features */
1666 break;
1667 default:
1668 /* reserved values: zero */
1669 *eax = 0;
1670 *ebx = 0;
1671 *ecx = 0;
1672 *edx = 0;
1673 break;
1677 CPUX86State *cpu_x86_init(const char *cpu_model)
1679 CPUX86State *env;
1680 static int inited;
1682 env = qemu_mallocz(sizeof(CPUX86State));
1683 cpu_exec_init(env);
1684 env->cpu_model_str = cpu_model;
1686 /* init various static tables */
1687 if (!inited) {
1688 inited = 1;
1689 optimize_flags_init();
1690 #ifndef CONFIG_USER_ONLY
1691 prev_debug_excp_handler =
1692 cpu_set_debug_excp_handler(breakpoint_handler);
1693 #endif
1695 if (cpu_x86_register(env, cpu_model) < 0) {
1696 cpu_x86_close(env);
1697 return NULL;
1699 cpu_reset(env);
1700 #ifdef USE_KQEMU
1701 kqemu_init(env);
1702 #endif
1703 if (kvm_enabled())
1704 kvm_init_vcpu(env);
1705 if (kvm_enabled()) {
1706 kvm_trim_features(&env->cpuid_features,
1707 kvm_arch_get_supported_cpuid(env, 1, R_EDX),
1708 feature_name);
1709 kvm_trim_features(&env->cpuid_ext_features,
1710 kvm_arch_get_supported_cpuid(env, 1, R_ECX),
1711 ext_feature_name);
1712 kvm_trim_features(&env->cpuid_ext2_features,
1713 kvm_arch_get_supported_cpuid(env, 0x80000001, R_EDX),
1714 ext2_feature_name);
1715 kvm_trim_features(&env->cpuid_ext3_features,
1716 kvm_arch_get_supported_cpuid(env, 0x80000001, R_ECX),
1717 ext3_feature_name);
1720 return env;