Merge branch 'master' of git://git.sv.gnu.org/qemu
[qemu-kvm/fedora.git] / target-i386 / helper.c
blob2c5af3c00d6de53d68c5551a8f3980da55dabed0
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "qemu-common.h"
30 #include "kvm.h"
32 #include "qemu-kvm.h"
34 //#define DEBUG_MMU
36 /* feature flags taken from "Intel Processor Identification and the CPUID
37 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
38 * about feature names, the Linux name is used. */
39 static const char *feature_name[] = {
40 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
41 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
42 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
43 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
45 static const char *ext_feature_name[] = {
46 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
47 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
48 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
51 static const char *ext2_feature_name[] = {
52 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
53 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
54 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
55 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
57 static const char *ext3_feature_name[] = {
58 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
59 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
60 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
61 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
64 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
65 uint32_t *ext_features,
66 uint32_t *ext2_features,
67 uint32_t *ext3_features)
69 int i;
70 int found = 0;
72 for ( i = 0 ; i < 32 ; i++ )
73 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
74 *features |= 1 << i;
75 found = 1;
77 for ( i = 0 ; i < 32 ; i++ )
78 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
79 *ext_features |= 1 << i;
80 found = 1;
82 for ( i = 0 ; i < 32 ; i++ )
83 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
84 *ext2_features |= 1 << i;
85 found = 1;
87 for ( i = 0 ; i < 32 ; i++ )
88 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
89 *ext3_features |= 1 << i;
90 found = 1;
92 if (!found) {
93 fprintf(stderr, "CPU feature %s not found\n", flagname);
97 typedef struct x86_def_t {
98 const char *name;
99 uint32_t level;
100 uint32_t vendor1, vendor2, vendor3;
101 int family;
102 int model;
103 int stepping;
104 uint32_t features, ext_features, ext2_features, ext3_features;
105 uint32_t xlevel;
106 char model_id[48];
107 int vendor_override;
108 } x86_def_t;
110 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
111 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
112 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
113 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
114 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
115 CPUID_PSE36 | CPUID_FXSR)
116 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
117 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
118 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
119 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
120 CPUID_PAE | CPUID_SEP | CPUID_APIC)
121 static x86_def_t x86_defs[] = {
122 #ifdef TARGET_X86_64
124 .name = "qemu64",
125 .level = 2,
126 .vendor1 = CPUID_VENDOR_AMD_1,
127 .vendor2 = CPUID_VENDOR_AMD_2,
128 .vendor3 = CPUID_VENDOR_AMD_3,
129 .family = 6,
130 .model = 2,
131 .stepping = 3,
132 .features = PPRO_FEATURES |
133 /* these features are needed for Win64 and aren't fully implemented */
134 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
135 /* this feature is needed for Solaris and isn't fully implemented */
136 CPUID_PSE36,
137 .ext_features = CPUID_EXT_SSE3,
138 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
139 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
140 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
141 .ext3_features = CPUID_EXT3_SVM,
142 .xlevel = 0x8000000A,
143 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
146 .name = "phenom",
147 .level = 5,
148 .vendor1 = CPUID_VENDOR_AMD_1,
149 .vendor2 = CPUID_VENDOR_AMD_2,
150 .vendor3 = CPUID_VENDOR_AMD_3,
151 .family = 16,
152 .model = 2,
153 .stepping = 3,
154 /* Missing: CPUID_VME, CPUID_HT */
155 .features = PPRO_FEATURES |
156 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
157 CPUID_PSE36,
158 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
159 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
160 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
161 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
162 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
163 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
164 CPUID_EXT2_FFXSR,
165 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
166 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
167 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
168 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
169 .ext3_features = CPUID_EXT3_SVM,
170 .xlevel = 0x8000001A,
171 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
174 .name = "core2duo",
175 .level = 10,
176 .family = 6,
177 .model = 15,
178 .stepping = 11,
179 /* The original CPU also implements these features:
180 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
181 CPUID_TM, CPUID_PBE */
182 .features = PPRO_FEATURES |
183 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
184 CPUID_PSE36,
185 /* The original CPU also implements these ext features:
186 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
187 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
188 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
189 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
190 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
191 .xlevel = 0x80000008,
192 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
194 #endif
196 .name = "qemu32",
197 .level = 2,
198 .family = 6,
199 .model = 3,
200 .stepping = 3,
201 .features = PPRO_FEATURES,
202 .ext_features = CPUID_EXT_SSE3,
203 .xlevel = 0,
204 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
207 .name = "coreduo",
208 .level = 10,
209 .family = 6,
210 .model = 14,
211 .stepping = 8,
212 /* The original CPU also implements these features:
213 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
214 CPUID_TM, CPUID_PBE */
215 .features = PPRO_FEATURES | CPUID_VME |
216 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
217 /* The original CPU also implements these ext features:
218 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
219 CPUID_EXT_PDCM */
220 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
221 .ext2_features = CPUID_EXT2_NX,
222 .xlevel = 0x80000008,
223 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
226 .name = "486",
227 .level = 0,
228 .family = 4,
229 .model = 0,
230 .stepping = 0,
231 .features = I486_FEATURES,
232 .xlevel = 0,
235 .name = "pentium",
236 .level = 1,
237 .family = 5,
238 .model = 4,
239 .stepping = 3,
240 .features = PENTIUM_FEATURES,
241 .xlevel = 0,
244 .name = "pentium2",
245 .level = 2,
246 .family = 6,
247 .model = 5,
248 .stepping = 2,
249 .features = PENTIUM2_FEATURES,
250 .xlevel = 0,
253 .name = "pentium3",
254 .level = 2,
255 .family = 6,
256 .model = 7,
257 .stepping = 3,
258 .features = PENTIUM3_FEATURES,
259 .xlevel = 0,
262 .name = "athlon",
263 .level = 2,
264 .vendor1 = 0x68747541, /* "Auth" */
265 .vendor2 = 0x69746e65, /* "enti" */
266 .vendor3 = 0x444d4163, /* "cAMD" */
267 .family = 6,
268 .model = 2,
269 .stepping = 3,
270 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
271 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
272 .xlevel = 0x80000008,
273 /* XXX: put another string ? */
274 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
277 .name = "n270",
278 /* original is on level 10 */
279 .level = 5,
280 .family = 6,
281 .model = 28,
282 .stepping = 2,
283 .features = PPRO_FEATURES |
284 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
285 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
286 * CPUID_HT | CPUID_TM | CPUID_PBE */
287 /* Some CPUs got no CPUID_SEP */
288 .ext_features = CPUID_EXT_MONITOR |
289 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
290 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
291 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
292 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
293 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
294 .xlevel = 0x8000000A,
295 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
299 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
301 unsigned int i;
302 x86_def_t *def;
304 char *s = strdup(cpu_model);
305 char *featurestr, *name = strtok(s, ",");
306 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
307 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
308 int family = -1, model = -1, stepping = -1;
310 def = NULL;
311 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
312 if (strcmp(name, x86_defs[i].name) == 0) {
313 def = &x86_defs[i];
314 break;
317 if (!def)
318 goto error;
319 memcpy(x86_cpu_def, def, sizeof(*def));
321 featurestr = strtok(NULL, ",");
323 while (featurestr) {
324 char *val;
325 if (featurestr[0] == '+') {
326 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
327 } else if (featurestr[0] == '-') {
328 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
329 } else if ((val = strchr(featurestr, '='))) {
330 *val = 0; val++;
331 if (!strcmp(featurestr, "family")) {
332 char *err;
333 family = strtol(val, &err, 10);
334 if (!*val || *err || family < 0) {
335 fprintf(stderr, "bad numerical value %s\n", val);
336 goto error;
338 x86_cpu_def->family = family;
339 } else if (!strcmp(featurestr, "model")) {
340 char *err;
341 model = strtol(val, &err, 10);
342 if (!*val || *err || model < 0 || model > 0xff) {
343 fprintf(stderr, "bad numerical value %s\n", val);
344 goto error;
346 x86_cpu_def->model = model;
347 } else if (!strcmp(featurestr, "stepping")) {
348 char *err;
349 stepping = strtol(val, &err, 10);
350 if (!*val || *err || stepping < 0 || stepping > 0xf) {
351 fprintf(stderr, "bad numerical value %s\n", val);
352 goto error;
354 x86_cpu_def->stepping = stepping;
355 } else if (!strcmp(featurestr, "vendor")) {
356 if (strlen(val) != 12) {
357 fprintf(stderr, "vendor string must be 12 chars long\n");
358 goto error;
360 x86_cpu_def->vendor1 = 0;
361 x86_cpu_def->vendor2 = 0;
362 x86_cpu_def->vendor3 = 0;
363 for(i = 0; i < 4; i++) {
364 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
365 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
366 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
368 x86_cpu_def->vendor_override = 1;
369 } else if (!strcmp(featurestr, "model_id")) {
370 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
371 val);
372 } else {
373 fprintf(stderr, "unrecognized feature %s\n", featurestr);
374 goto error;
376 } else {
377 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
378 goto error;
380 featurestr = strtok(NULL, ",");
382 x86_cpu_def->features |= plus_features;
383 x86_cpu_def->ext_features |= plus_ext_features;
384 x86_cpu_def->ext2_features |= plus_ext2_features;
385 x86_cpu_def->ext3_features |= plus_ext3_features;
386 x86_cpu_def->features &= ~minus_features;
387 x86_cpu_def->ext_features &= ~minus_ext_features;
388 x86_cpu_def->ext2_features &= ~minus_ext2_features;
389 x86_cpu_def->ext3_features &= ~minus_ext3_features;
390 free(s);
391 return 0;
393 error:
394 free(s);
395 return -1;
398 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
400 unsigned int i;
402 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
403 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
406 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
408 x86_def_t def1, *def = &def1;
410 if (cpu_x86_find_by_name(def, cpu_model) < 0)
411 return -1;
412 if (def->vendor1) {
413 env->cpuid_vendor1 = def->vendor1;
414 env->cpuid_vendor2 = def->vendor2;
415 env->cpuid_vendor3 = def->vendor3;
416 } else {
417 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
418 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
419 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
421 env->cpuid_vendor_override = def->vendor_override;
422 env->cpuid_level = def->level;
423 if (def->family > 0x0f)
424 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
425 else
426 env->cpuid_version = def->family << 8;
427 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
428 env->cpuid_version |= def->stepping;
429 env->cpuid_features = def->features;
430 env->pat = 0x0007040600070406ULL;
431 env->cpuid_ext_features = def->ext_features;
432 env->cpuid_ext2_features = def->ext2_features;
433 env->cpuid_xlevel = def->xlevel;
434 env->cpuid_ext3_features = def->ext3_features;
436 const char *model_id = def->model_id;
437 int c, len, i;
438 if (!model_id)
439 model_id = "";
440 len = strlen(model_id);
441 for(i = 0; i < 48; i++) {
442 if (i >= len)
443 c = '\0';
444 else
445 c = (uint8_t)model_id[i];
446 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
449 return 0;
452 /* NOTE: must be called outside the CPU execute loop */
453 void cpu_reset(CPUX86State *env)
455 int i;
457 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
458 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
459 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
462 memset(env, 0, offsetof(CPUX86State, breakpoints));
464 tlb_flush(env, 1);
466 env->old_exception = -1;
468 /* init to reset state */
470 #ifdef CONFIG_SOFTMMU
471 env->hflags |= HF_SOFTMMU_MASK;
472 #endif
473 env->hflags2 |= HF2_GIF_MASK;
475 cpu_x86_update_cr0(env, 0x60000010);
476 env->a20_mask = ~0x0;
477 env->smbase = 0x30000;
479 env->idt.limit = 0xffff;
480 env->gdt.limit = 0xffff;
481 env->ldt.limit = 0xffff;
482 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
483 env->tr.limit = 0xffff;
484 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
486 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
487 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
488 DESC_R_MASK | DESC_A_MASK);
489 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
490 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
491 DESC_A_MASK);
492 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
493 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
494 DESC_A_MASK);
495 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
496 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
497 DESC_A_MASK);
498 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
499 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
500 DESC_A_MASK);
501 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
502 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
503 DESC_A_MASK);
505 env->eip = 0xfff0;
506 env->regs[R_EDX] = env->cpuid_version;
508 env->eflags = 0x2;
510 /* FPU init */
511 for(i = 0;i < 8; i++)
512 env->fptags[i] = 1;
513 env->fpuc = 0x37f;
515 env->mxcsr = 0x1f80;
517 memset(env->dr, 0, sizeof(env->dr));
518 env->dr[6] = DR6_FIXED_1;
519 env->dr[7] = DR7_FIXED_1;
520 cpu_breakpoint_remove_all(env, BP_CPU);
521 cpu_watchpoint_remove_all(env, BP_CPU);
524 void cpu_x86_close(CPUX86State *env)
526 qemu_free(env);
529 /***********************************************************/
530 /* x86 debug */
532 static const char *cc_op_str[] = {
533 "DYNAMIC",
534 "EFLAGS",
536 "MULB",
537 "MULW",
538 "MULL",
539 "MULQ",
541 "ADDB",
542 "ADDW",
543 "ADDL",
544 "ADDQ",
546 "ADCB",
547 "ADCW",
548 "ADCL",
549 "ADCQ",
551 "SUBB",
552 "SUBW",
553 "SUBL",
554 "SUBQ",
556 "SBBB",
557 "SBBW",
558 "SBBL",
559 "SBBQ",
561 "LOGICB",
562 "LOGICW",
563 "LOGICL",
564 "LOGICQ",
566 "INCB",
567 "INCW",
568 "INCL",
569 "INCQ",
571 "DECB",
572 "DECW",
573 "DECL",
574 "DECQ",
576 "SHLB",
577 "SHLW",
578 "SHLL",
579 "SHLQ",
581 "SARB",
582 "SARW",
583 "SARL",
584 "SARQ",
587 static void
588 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
589 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
590 const char *name, struct SegmentCache *sc)
592 #ifdef TARGET_X86_64
593 if (env->hflags & HF_CS64_MASK) {
594 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
595 sc->selector, sc->base, sc->limit, sc->flags);
596 } else
597 #endif
599 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
600 (uint32_t)sc->base, sc->limit, sc->flags);
603 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
604 goto done;
606 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
607 if (sc->flags & DESC_S_MASK) {
608 if (sc->flags & DESC_CS_MASK) {
609 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
610 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
611 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
612 (sc->flags & DESC_R_MASK) ? 'R' : '-');
613 } else {
614 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
615 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
616 (sc->flags & DESC_W_MASK) ? 'W' : '-');
618 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
619 } else {
620 static const char *sys_type_name[2][16] = {
621 { /* 32 bit mode */
622 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
623 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
624 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
625 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
627 { /* 64 bit mode */
628 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
629 "Reserved", "Reserved", "Reserved", "Reserved",
630 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
631 "Reserved", "IntGate64", "TrapGate64"
634 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
635 [(sc->flags & DESC_TYPE_MASK)
636 >> DESC_TYPE_SHIFT]);
638 done:
639 cpu_fprintf(f, "\n");
642 void cpu_dump_state(CPUState *env, FILE *f,
643 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
644 int flags)
646 int eflags, i, nb;
647 char cc_op_name[32];
648 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
650 if (kvm_enabled())
651 kvm_arch_get_registers(env);
653 eflags = env->eflags;
654 #ifdef TARGET_X86_64
655 if (env->hflags & HF_CS64_MASK) {
656 cpu_fprintf(f,
657 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
658 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
659 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
660 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
661 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
662 env->regs[R_EAX],
663 env->regs[R_EBX],
664 env->regs[R_ECX],
665 env->regs[R_EDX],
666 env->regs[R_ESI],
667 env->regs[R_EDI],
668 env->regs[R_EBP],
669 env->regs[R_ESP],
670 env->regs[8],
671 env->regs[9],
672 env->regs[10],
673 env->regs[11],
674 env->regs[12],
675 env->regs[13],
676 env->regs[14],
677 env->regs[15],
678 env->eip, eflags,
679 eflags & DF_MASK ? 'D' : '-',
680 eflags & CC_O ? 'O' : '-',
681 eflags & CC_S ? 'S' : '-',
682 eflags & CC_Z ? 'Z' : '-',
683 eflags & CC_A ? 'A' : '-',
684 eflags & CC_P ? 'P' : '-',
685 eflags & CC_C ? 'C' : '-',
686 env->hflags & HF_CPL_MASK,
687 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
688 (int)(env->a20_mask >> 20) & 1,
689 (env->hflags >> HF_SMM_SHIFT) & 1,
690 env->halted);
691 } else
692 #endif
694 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
695 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
696 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
697 (uint32_t)env->regs[R_EAX],
698 (uint32_t)env->regs[R_EBX],
699 (uint32_t)env->regs[R_ECX],
700 (uint32_t)env->regs[R_EDX],
701 (uint32_t)env->regs[R_ESI],
702 (uint32_t)env->regs[R_EDI],
703 (uint32_t)env->regs[R_EBP],
704 (uint32_t)env->regs[R_ESP],
705 (uint32_t)env->eip, eflags,
706 eflags & DF_MASK ? 'D' : '-',
707 eflags & CC_O ? 'O' : '-',
708 eflags & CC_S ? 'S' : '-',
709 eflags & CC_Z ? 'Z' : '-',
710 eflags & CC_A ? 'A' : '-',
711 eflags & CC_P ? 'P' : '-',
712 eflags & CC_C ? 'C' : '-',
713 env->hflags & HF_CPL_MASK,
714 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
715 (int)(env->a20_mask >> 20) & 1,
716 (env->hflags >> HF_SMM_SHIFT) & 1,
717 env->halted);
720 for(i = 0; i < 6; i++) {
721 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
722 &env->segs[i]);
724 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
725 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
727 #ifdef TARGET_X86_64
728 if (env->hflags & HF_LMA_MASK) {
729 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
730 env->gdt.base, env->gdt.limit);
731 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
732 env->idt.base, env->idt.limit);
733 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
734 (uint32_t)env->cr[0],
735 env->cr[2],
736 env->cr[3],
737 (uint32_t)env->cr[4]);
738 for(i = 0; i < 4; i++)
739 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
740 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
741 env->dr[6], env->dr[7]);
742 } else
743 #endif
745 cpu_fprintf(f, "GDT= %08x %08x\n",
746 (uint32_t)env->gdt.base, env->gdt.limit);
747 cpu_fprintf(f, "IDT= %08x %08x\n",
748 (uint32_t)env->idt.base, env->idt.limit);
749 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
750 (uint32_t)env->cr[0],
751 (uint32_t)env->cr[2],
752 (uint32_t)env->cr[3],
753 (uint32_t)env->cr[4]);
754 for(i = 0; i < 4; i++)
755 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
756 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
758 if (flags & X86_DUMP_CCOP) {
759 if ((unsigned)env->cc_op < CC_OP_NB)
760 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
761 else
762 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
763 #ifdef TARGET_X86_64
764 if (env->hflags & HF_CS64_MASK) {
765 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
766 env->cc_src, env->cc_dst,
767 cc_op_name);
768 } else
769 #endif
771 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
772 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
773 cc_op_name);
776 if (flags & X86_DUMP_FPU) {
777 int fptag;
778 fptag = 0;
779 for(i = 0; i < 8; i++) {
780 fptag |= ((!env->fptags[i]) << i);
782 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
783 env->fpuc,
784 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
785 env->fpstt,
786 fptag,
787 env->mxcsr);
788 for(i=0;i<8;i++) {
789 #if defined(USE_X86LDOUBLE)
790 union {
791 long double d;
792 struct {
793 uint64_t lower;
794 uint16_t upper;
795 } l;
796 } tmp;
797 tmp.d = env->fpregs[i].d;
798 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
799 i, tmp.l.lower, tmp.l.upper);
800 #else
801 cpu_fprintf(f, "FPR%d=%016" PRIx64,
802 i, env->fpregs[i].mmx.q);
803 #endif
804 if ((i & 1) == 1)
805 cpu_fprintf(f, "\n");
806 else
807 cpu_fprintf(f, " ");
809 if (env->hflags & HF_CS64_MASK)
810 nb = 16;
811 else
812 nb = 8;
813 for(i=0;i<nb;i++) {
814 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
816 env->xmm_regs[i].XMM_L(3),
817 env->xmm_regs[i].XMM_L(2),
818 env->xmm_regs[i].XMM_L(1),
819 env->xmm_regs[i].XMM_L(0));
820 if ((i & 1) == 1)
821 cpu_fprintf(f, "\n");
822 else
823 cpu_fprintf(f, " ");
828 /***********************************************************/
829 /* x86 mmu */
830 /* XXX: add PGE support */
832 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
834 a20_state = (a20_state != 0);
835 if (a20_state != ((env->a20_mask >> 20) & 1)) {
836 #if defined(DEBUG_MMU)
837 printf("A20 update: a20=%d\n", a20_state);
838 #endif
839 /* if the cpu is currently executing code, we must unlink it and
840 all the potentially executing TB */
841 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
843 /* when a20 is changed, all the MMU mappings are invalid, so
844 we must flush everything */
845 tlb_flush(env, 1);
846 env->a20_mask = (~0x100000) | (a20_state << 20);
850 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
852 int pe_state;
854 #if defined(DEBUG_MMU)
855 printf("CR0 update: CR0=0x%08x\n", new_cr0);
856 #endif
857 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
858 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
859 tlb_flush(env, 1);
862 #ifdef TARGET_X86_64
863 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
864 (env->efer & MSR_EFER_LME)) {
865 /* enter in long mode */
866 /* XXX: generate an exception */
867 if (!(env->cr[4] & CR4_PAE_MASK))
868 return;
869 env->efer |= MSR_EFER_LMA;
870 env->hflags |= HF_LMA_MASK;
871 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
872 (env->efer & MSR_EFER_LMA)) {
873 /* exit long mode */
874 env->efer &= ~MSR_EFER_LMA;
875 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
876 env->eip &= 0xffffffff;
878 #endif
879 env->cr[0] = new_cr0 | CR0_ET_MASK;
881 /* update PE flag in hidden flags */
882 pe_state = (env->cr[0] & CR0_PE_MASK);
883 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
884 /* ensure that ADDSEG is always set in real mode */
885 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
886 /* update FPU flags */
887 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
888 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
891 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
892 the PDPT */
893 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
895 env->cr[3] = new_cr3;
896 if (env->cr[0] & CR0_PG_MASK) {
897 #if defined(DEBUG_MMU)
898 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
899 #endif
900 tlb_flush(env, 0);
904 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
906 #if defined(DEBUG_MMU)
907 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
908 #endif
909 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
910 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
911 tlb_flush(env, 1);
913 /* SSE handling */
914 if (!(env->cpuid_features & CPUID_SSE))
915 new_cr4 &= ~CR4_OSFXSR_MASK;
916 if (new_cr4 & CR4_OSFXSR_MASK)
917 env->hflags |= HF_OSFXSR_MASK;
918 else
919 env->hflags &= ~HF_OSFXSR_MASK;
921 env->cr[4] = new_cr4;
924 #if defined(CONFIG_USER_ONLY)
926 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
927 int is_write, int mmu_idx, int is_softmmu)
929 /* user mode only emulation */
930 is_write &= 1;
931 env->cr[2] = addr;
932 env->error_code = (is_write << PG_ERROR_W_BIT);
933 env->error_code |= PG_ERROR_U_MASK;
934 env->exception_index = EXCP0E_PAGE;
935 return 1;
938 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
940 return addr;
943 #else
945 /* XXX: This value should match the one returned by CPUID
946 * and in exec.c */
947 #if defined(CONFIG_KQEMU)
948 #define PHYS_ADDR_MASK 0xfffff000LL
949 #else
950 # if defined(TARGET_X86_64)
951 # define PHYS_ADDR_MASK 0xfffffff000LL
952 # else
953 # define PHYS_ADDR_MASK 0xffffff000LL
954 # endif
955 #endif
957 /* return value:
958 -1 = cannot handle fault
959 0 = nothing more to do
960 1 = generate PF fault
961 2 = soft MMU activation required for this block
963 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
964 int is_write1, int mmu_idx, int is_softmmu)
966 uint64_t ptep, pte;
967 target_ulong pde_addr, pte_addr;
968 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
969 target_phys_addr_t paddr;
970 uint32_t page_offset;
971 target_ulong vaddr, virt_addr;
973 is_user = mmu_idx == MMU_USER_IDX;
974 #if defined(DEBUG_MMU)
975 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
976 addr, is_write1, is_user, env->eip);
977 #endif
978 is_write = is_write1 & 1;
980 if (!(env->cr[0] & CR0_PG_MASK)) {
981 pte = addr;
982 virt_addr = addr & TARGET_PAGE_MASK;
983 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
984 page_size = 4096;
985 goto do_mapping;
988 if (env->cr[4] & CR4_PAE_MASK) {
989 uint64_t pde, pdpe;
990 target_ulong pdpe_addr;
992 #ifdef TARGET_X86_64
993 if (env->hflags & HF_LMA_MASK) {
994 uint64_t pml4e_addr, pml4e;
995 int32_t sext;
997 /* test virtual address sign extension */
998 sext = (int64_t)addr >> 47;
999 if (sext != 0 && sext != -1) {
1000 env->error_code = 0;
1001 env->exception_index = EXCP0D_GPF;
1002 return 1;
1005 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1006 env->a20_mask;
1007 pml4e = ldq_phys(pml4e_addr);
1008 if (!(pml4e & PG_PRESENT_MASK)) {
1009 error_code = 0;
1010 goto do_fault;
1012 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1013 error_code = PG_ERROR_RSVD_MASK;
1014 goto do_fault;
1016 if (!(pml4e & PG_ACCESSED_MASK)) {
1017 pml4e |= PG_ACCESSED_MASK;
1018 stl_phys_notdirty(pml4e_addr, pml4e);
1020 ptep = pml4e ^ PG_NX_MASK;
1021 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1022 env->a20_mask;
1023 pdpe = ldq_phys(pdpe_addr);
1024 if (!(pdpe & PG_PRESENT_MASK)) {
1025 error_code = 0;
1026 goto do_fault;
1028 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1029 error_code = PG_ERROR_RSVD_MASK;
1030 goto do_fault;
1032 ptep &= pdpe ^ PG_NX_MASK;
1033 if (!(pdpe & PG_ACCESSED_MASK)) {
1034 pdpe |= PG_ACCESSED_MASK;
1035 stl_phys_notdirty(pdpe_addr, pdpe);
1037 } else
1038 #endif
1040 /* XXX: load them when cr3 is loaded ? */
1041 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1042 env->a20_mask;
1043 pdpe = ldq_phys(pdpe_addr);
1044 if (!(pdpe & PG_PRESENT_MASK)) {
1045 error_code = 0;
1046 goto do_fault;
1048 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1051 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1052 env->a20_mask;
1053 pde = ldq_phys(pde_addr);
1054 if (!(pde & PG_PRESENT_MASK)) {
1055 error_code = 0;
1056 goto do_fault;
1058 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1059 error_code = PG_ERROR_RSVD_MASK;
1060 goto do_fault;
1062 ptep &= pde ^ PG_NX_MASK;
1063 if (pde & PG_PSE_MASK) {
1064 /* 2 MB page */
1065 page_size = 2048 * 1024;
1066 ptep ^= PG_NX_MASK;
1067 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1068 goto do_fault_protect;
1069 if (is_user) {
1070 if (!(ptep & PG_USER_MASK))
1071 goto do_fault_protect;
1072 if (is_write && !(ptep & PG_RW_MASK))
1073 goto do_fault_protect;
1074 } else {
1075 if ((env->cr[0] & CR0_WP_MASK) &&
1076 is_write && !(ptep & PG_RW_MASK))
1077 goto do_fault_protect;
1079 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1080 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1081 pde |= PG_ACCESSED_MASK;
1082 if (is_dirty)
1083 pde |= PG_DIRTY_MASK;
1084 stl_phys_notdirty(pde_addr, pde);
1086 /* align to page_size */
1087 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1088 virt_addr = addr & ~(page_size - 1);
1089 } else {
1090 /* 4 KB page */
1091 if (!(pde & PG_ACCESSED_MASK)) {
1092 pde |= PG_ACCESSED_MASK;
1093 stl_phys_notdirty(pde_addr, pde);
1095 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1096 env->a20_mask;
1097 pte = ldq_phys(pte_addr);
1098 if (!(pte & PG_PRESENT_MASK)) {
1099 error_code = 0;
1100 goto do_fault;
1102 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1103 error_code = PG_ERROR_RSVD_MASK;
1104 goto do_fault;
1106 /* combine pde and pte nx, user and rw protections */
1107 ptep &= pte ^ PG_NX_MASK;
1108 ptep ^= PG_NX_MASK;
1109 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1110 goto do_fault_protect;
1111 if (is_user) {
1112 if (!(ptep & PG_USER_MASK))
1113 goto do_fault_protect;
1114 if (is_write && !(ptep & PG_RW_MASK))
1115 goto do_fault_protect;
1116 } else {
1117 if ((env->cr[0] & CR0_WP_MASK) &&
1118 is_write && !(ptep & PG_RW_MASK))
1119 goto do_fault_protect;
1121 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1122 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1123 pte |= PG_ACCESSED_MASK;
1124 if (is_dirty)
1125 pte |= PG_DIRTY_MASK;
1126 stl_phys_notdirty(pte_addr, pte);
1128 page_size = 4096;
1129 virt_addr = addr & ~0xfff;
1130 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1132 } else {
1133 uint32_t pde;
1135 /* page directory entry */
1136 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1137 env->a20_mask;
1138 pde = ldl_phys(pde_addr);
1139 if (!(pde & PG_PRESENT_MASK)) {
1140 error_code = 0;
1141 goto do_fault;
1143 /* if PSE bit is set, then we use a 4MB page */
1144 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1145 page_size = 4096 * 1024;
1146 if (is_user) {
1147 if (!(pde & PG_USER_MASK))
1148 goto do_fault_protect;
1149 if (is_write && !(pde & PG_RW_MASK))
1150 goto do_fault_protect;
1151 } else {
1152 if ((env->cr[0] & CR0_WP_MASK) &&
1153 is_write && !(pde & PG_RW_MASK))
1154 goto do_fault_protect;
1156 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1157 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1158 pde |= PG_ACCESSED_MASK;
1159 if (is_dirty)
1160 pde |= PG_DIRTY_MASK;
1161 stl_phys_notdirty(pde_addr, pde);
1164 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1165 ptep = pte;
1166 virt_addr = addr & ~(page_size - 1);
1167 } else {
1168 if (!(pde & PG_ACCESSED_MASK)) {
1169 pde |= PG_ACCESSED_MASK;
1170 stl_phys_notdirty(pde_addr, pde);
1173 /* page directory entry */
1174 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1175 env->a20_mask;
1176 pte = ldl_phys(pte_addr);
1177 if (!(pte & PG_PRESENT_MASK)) {
1178 error_code = 0;
1179 goto do_fault;
1181 /* combine pde and pte user and rw protections */
1182 ptep = pte & pde;
1183 if (is_user) {
1184 if (!(ptep & PG_USER_MASK))
1185 goto do_fault_protect;
1186 if (is_write && !(ptep & PG_RW_MASK))
1187 goto do_fault_protect;
1188 } else {
1189 if ((env->cr[0] & CR0_WP_MASK) &&
1190 is_write && !(ptep & PG_RW_MASK))
1191 goto do_fault_protect;
1193 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1194 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1195 pte |= PG_ACCESSED_MASK;
1196 if (is_dirty)
1197 pte |= PG_DIRTY_MASK;
1198 stl_phys_notdirty(pte_addr, pte);
1200 page_size = 4096;
1201 virt_addr = addr & ~0xfff;
1204 /* the page can be put in the TLB */
1205 prot = PAGE_READ;
1206 if (!(ptep & PG_NX_MASK))
1207 prot |= PAGE_EXEC;
1208 if (pte & PG_DIRTY_MASK) {
1209 /* only set write access if already dirty... otherwise wait
1210 for dirty access */
1211 if (is_user) {
1212 if (ptep & PG_RW_MASK)
1213 prot |= PAGE_WRITE;
1214 } else {
1215 if (!(env->cr[0] & CR0_WP_MASK) ||
1216 (ptep & PG_RW_MASK))
1217 prot |= PAGE_WRITE;
1220 do_mapping:
1221 pte = pte & env->a20_mask;
1223 /* Even if 4MB pages, we map only one 4KB page in the cache to
1224 avoid filling it too fast */
1225 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1226 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1227 vaddr = virt_addr + page_offset;
1229 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1230 return ret;
1231 do_fault_protect:
1232 error_code = PG_ERROR_P_MASK;
1233 do_fault:
1234 error_code |= (is_write << PG_ERROR_W_BIT);
1235 if (is_user)
1236 error_code |= PG_ERROR_U_MASK;
1237 if (is_write1 == 2 &&
1238 (env->efer & MSR_EFER_NXE) &&
1239 (env->cr[4] & CR4_PAE_MASK))
1240 error_code |= PG_ERROR_I_D_MASK;
1241 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1242 /* cr2 is not modified in case of exceptions */
1243 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1244 addr);
1245 } else {
1246 env->cr[2] = addr;
1248 env->error_code = error_code;
1249 env->exception_index = EXCP0E_PAGE;
1250 return 1;
1253 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1255 target_ulong pde_addr, pte_addr;
1256 uint64_t pte;
1257 target_phys_addr_t paddr;
1258 uint32_t page_offset;
1259 int page_size;
1261 if (env->cr[4] & CR4_PAE_MASK) {
1262 target_ulong pdpe_addr;
1263 uint64_t pde, pdpe;
1265 #ifdef TARGET_X86_64
1266 if (env->hflags & HF_LMA_MASK) {
1267 uint64_t pml4e_addr, pml4e;
1268 int32_t sext;
1270 /* test virtual address sign extension */
1271 sext = (int64_t)addr >> 47;
1272 if (sext != 0 && sext != -1)
1273 return -1;
1275 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1276 env->a20_mask;
1277 pml4e = ldq_phys(pml4e_addr);
1278 if (!(pml4e & PG_PRESENT_MASK))
1279 return -1;
1281 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1282 env->a20_mask;
1283 pdpe = ldq_phys(pdpe_addr);
1284 if (!(pdpe & PG_PRESENT_MASK))
1285 return -1;
1286 } else
1287 #endif
1289 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1290 env->a20_mask;
1291 pdpe = ldq_phys(pdpe_addr);
1292 if (!(pdpe & PG_PRESENT_MASK))
1293 return -1;
1296 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1297 env->a20_mask;
1298 pde = ldq_phys(pde_addr);
1299 if (!(pde & PG_PRESENT_MASK)) {
1300 return -1;
1302 if (pde & PG_PSE_MASK) {
1303 /* 2 MB page */
1304 page_size = 2048 * 1024;
1305 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1306 } else {
1307 /* 4 KB page */
1308 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1309 env->a20_mask;
1310 page_size = 4096;
1311 pte = ldq_phys(pte_addr);
1313 if (!(pte & PG_PRESENT_MASK))
1314 return -1;
1315 } else {
1316 uint32_t pde;
1318 if (!(env->cr[0] & CR0_PG_MASK)) {
1319 pte = addr;
1320 page_size = 4096;
1321 } else {
1322 /* page directory entry */
1323 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1324 pde = ldl_phys(pde_addr);
1325 if (!(pde & PG_PRESENT_MASK))
1326 return -1;
1327 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1328 pte = pde & ~0x003ff000; /* align to 4MB */
1329 page_size = 4096 * 1024;
1330 } else {
1331 /* page directory entry */
1332 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1333 pte = ldl_phys(pte_addr);
1334 if (!(pte & PG_PRESENT_MASK))
1335 return -1;
1336 page_size = 4096;
1339 pte = pte & env->a20_mask;
1342 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1343 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1344 return paddr;
1347 void hw_breakpoint_insert(CPUState *env, int index)
1349 int type, err = 0;
1351 switch (hw_breakpoint_type(env->dr[7], index)) {
1352 case 0:
1353 if (hw_breakpoint_enabled(env->dr[7], index))
1354 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1355 &env->cpu_breakpoint[index]);
1356 break;
1357 case 1:
1358 type = BP_CPU | BP_MEM_WRITE;
1359 goto insert_wp;
1360 case 2:
1361 /* No support for I/O watchpoints yet */
1362 break;
1363 case 3:
1364 type = BP_CPU | BP_MEM_ACCESS;
1365 insert_wp:
1366 err = cpu_watchpoint_insert(env, env->dr[index],
1367 hw_breakpoint_len(env->dr[7], index),
1368 type, &env->cpu_watchpoint[index]);
1369 break;
1371 if (err)
1372 env->cpu_breakpoint[index] = NULL;
1375 void hw_breakpoint_remove(CPUState *env, int index)
1377 if (!env->cpu_breakpoint[index])
1378 return;
1379 switch (hw_breakpoint_type(env->dr[7], index)) {
1380 case 0:
1381 if (hw_breakpoint_enabled(env->dr[7], index))
1382 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1383 break;
1384 case 1:
1385 case 3:
1386 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1387 break;
1388 case 2:
1389 /* No support for I/O watchpoints yet */
1390 break;
1394 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1396 target_ulong dr6;
1397 int reg, type;
1398 int hit_enabled = 0;
1400 dr6 = env->dr[6] & ~0xf;
1401 for (reg = 0; reg < 4; reg++) {
1402 type = hw_breakpoint_type(env->dr[7], reg);
1403 if ((type == 0 && env->dr[reg] == env->eip) ||
1404 ((type & 1) && env->cpu_watchpoint[reg] &&
1405 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1406 dr6 |= 1 << reg;
1407 if (hw_breakpoint_enabled(env->dr[7], reg))
1408 hit_enabled = 1;
1411 if (hit_enabled || force_dr6_update)
1412 env->dr[6] = dr6;
1413 return hit_enabled;
1416 static CPUDebugExcpHandler *prev_debug_excp_handler;
1418 void raise_exception(int exception_index);
1420 static void breakpoint_handler(CPUState *env)
1422 CPUBreakpoint *bp;
1424 if (env->watchpoint_hit) {
1425 if (env->watchpoint_hit->flags & BP_CPU) {
1426 env->watchpoint_hit = NULL;
1427 if (check_hw_breakpoints(env, 0))
1428 raise_exception(EXCP01_DB);
1429 else
1430 cpu_resume_from_signal(env, NULL);
1432 } else {
1433 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1434 if (bp->pc == env->eip) {
1435 if (bp->flags & BP_CPU) {
1436 check_hw_breakpoints(env, 1);
1437 raise_exception(EXCP01_DB);
1439 break;
1442 if (prev_debug_excp_handler)
1443 prev_debug_excp_handler(env);
1445 #endif /* !CONFIG_USER_ONLY */
1447 static void host_cpuid(uint32_t function, uint32_t count,
1448 uint32_t *eax, uint32_t *ebx,
1449 uint32_t *ecx, uint32_t *edx)
1451 #if defined(CONFIG_KVM) || defined(USE_KVM)
1452 uint32_t vec[4];
1454 #ifdef __x86_64__
1455 asm volatile("cpuid"
1456 : "=a"(vec[0]), "=b"(vec[1]),
1457 "=c"(vec[2]), "=d"(vec[3])
1458 : "0"(function), "c"(count) : "cc");
1459 #else
1460 asm volatile("pusha \n\t"
1461 "cpuid \n\t"
1462 "mov %%eax, 0(%2) \n\t"
1463 "mov %%ebx, 4(%2) \n\t"
1464 "mov %%ecx, 8(%2) \n\t"
1465 "mov %%edx, 12(%2) \n\t"
1466 "popa"
1467 : : "a"(function), "c"(count), "S"(vec)
1468 : "memory", "cc");
1469 #endif
1471 if (eax)
1472 *eax = vec[0];
1473 if (ebx)
1474 *ebx = vec[1];
1475 if (ecx)
1476 *ecx = vec[2];
1477 if (edx)
1478 *edx = vec[3];
1479 #endif
1482 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1483 uint32_t *eax, uint32_t *ebx,
1484 uint32_t *ecx, uint32_t *edx)
1486 /* test if maximum index reached */
1487 if (index & 0x80000000) {
1488 if (index > env->cpuid_xlevel)
1489 index = env->cpuid_level;
1490 } else {
1491 if (index > env->cpuid_level)
1492 index = env->cpuid_level;
1495 switch(index) {
1496 case 0:
1497 *eax = env->cpuid_level;
1498 *ebx = env->cpuid_vendor1;
1499 *edx = env->cpuid_vendor2;
1500 *ecx = env->cpuid_vendor3;
1502 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1503 * isn't supported in compatibility mode on Intel. so advertise the
1504 * actuall cpu, and say goodbye to migration between different vendors
1505 * is you use compatibility mode. */
1506 if (kvm_enabled() && !env->cpuid_vendor_override)
1507 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1508 break;
1509 case 1:
1510 *eax = env->cpuid_version;
1511 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1512 *ecx = env->cpuid_ext_features;
1513 *edx = env->cpuid_features;
1515 /* "Hypervisor present" bit required for Microsoft SVVP */
1516 if (kvm_enabled())
1517 *ecx |= (1 << 31);
1518 break;
1519 case 2:
1520 /* cache info: needed for Pentium Pro compatibility */
1521 *eax = 1;
1522 *ebx = 0;
1523 *ecx = 0;
1524 *edx = 0x2c307d;
1525 break;
1526 case 4:
1527 /* cache info: needed for Core compatibility */
1528 switch (count) {
1529 case 0: /* L1 dcache info */
1530 *eax = 0x0000121;
1531 *ebx = 0x1c0003f;
1532 *ecx = 0x000003f;
1533 *edx = 0x0000001;
1534 break;
1535 case 1: /* L1 icache info */
1536 *eax = 0x0000122;
1537 *ebx = 0x1c0003f;
1538 *ecx = 0x000003f;
1539 *edx = 0x0000001;
1540 break;
1541 case 2: /* L2 cache info */
1542 *eax = 0x0000143;
1543 *ebx = 0x3c0003f;
1544 *ecx = 0x0000fff;
1545 *edx = 0x0000001;
1546 break;
1547 default: /* end of info */
1548 *eax = 0;
1549 *ebx = 0;
1550 *ecx = 0;
1551 *edx = 0;
1552 break;
1554 break;
1555 case 5:
1556 /* mwait info: needed for Core compatibility */
1557 *eax = 0; /* Smallest monitor-line size in bytes */
1558 *ebx = 0; /* Largest monitor-line size in bytes */
1559 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1560 *edx = 0;
1561 break;
1562 case 6:
1563 /* Thermal and Power Leaf */
1564 *eax = 0;
1565 *ebx = 0;
1566 *ecx = 0;
1567 *edx = 0;
1568 break;
1569 case 9:
1570 /* Direct Cache Access Information Leaf */
1571 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1572 *ebx = 0;
1573 *ecx = 0;
1574 *edx = 0;
1575 break;
1576 case 0xA:
1577 /* Architectural Performance Monitoring Leaf */
1578 *eax = 0;
1579 *ebx = 0;
1580 *ecx = 0;
1581 *edx = 0;
1582 break;
1583 case 0x80000000:
1584 *eax = env->cpuid_xlevel;
1585 *ebx = env->cpuid_vendor1;
1586 *edx = env->cpuid_vendor2;
1587 *ecx = env->cpuid_vendor3;
1588 break;
1589 case 0x80000001:
1590 *eax = env->cpuid_features;
1591 *ebx = 0;
1592 *ecx = env->cpuid_ext3_features;
1593 *edx = env->cpuid_ext2_features;
1595 if (kvm_enabled()) {
1596 uint32_t h_eax, h_edx;
1598 host_cpuid(index, 0, &h_eax, NULL, NULL, &h_edx);
1600 /* disable CPU features that the host does not support */
1602 /* long mode */
1603 if ((h_edx & 0x20000000) == 0 /* || !lm_capable_kernel */)
1604 *edx &= ~0x20000000;
1605 /* syscall */
1606 if ((h_edx & 0x00000800) == 0)
1607 *edx &= ~0x00000800;
1608 /* nx */
1609 if ((h_edx & 0x00100000) == 0)
1610 *edx &= ~0x00100000;
1612 /* disable CPU features that KVM cannot support */
1614 /* svm */
1615 if (!kvm_nested)
1616 *ecx &= ~4UL;
1617 /* 3dnow */
1618 *edx &= ~0xc0000000;
1620 break;
1621 case 0x80000002:
1622 case 0x80000003:
1623 case 0x80000004:
1624 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1625 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1626 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1627 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1628 break;
1629 case 0x80000005:
1630 /* cache info (L1 cache) */
1631 *eax = 0x01ff01ff;
1632 *ebx = 0x01ff01ff;
1633 *ecx = 0x40020140;
1634 *edx = 0x40020140;
1635 break;
1636 case 0x80000006:
1637 /* cache info (L2 cache) */
1638 *eax = 0;
1639 *ebx = 0x42004200;
1640 *ecx = 0x02008140;
1641 *edx = 0;
1642 break;
1643 case 0x80000008:
1644 /* virtual & phys address size in low 2 bytes. */
1645 /* XXX: This value must match the one used in the MMU code. */
1646 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1647 /* 64 bit processor */
1648 #if defined(CONFIG_KQEMU)
1649 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1650 #else
1651 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1652 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1653 #endif
1654 } else {
1655 #if defined(CONFIG_KQEMU)
1656 *eax = 0x00000020; /* 32 bits physical */
1657 #else
1658 if (env->cpuid_features & CPUID_PSE36)
1659 *eax = 0x00000024; /* 36 bits physical */
1660 else
1661 *eax = 0x00000020; /* 32 bits physical */
1662 #endif
1664 *ebx = 0;
1665 *ecx = 0;
1666 *edx = 0;
1667 break;
1668 case 0x8000000A:
1669 *eax = 0x00000001; /* SVM Revision */
1670 *ebx = 0x00000010; /* nr of ASIDs */
1671 *ecx = 0;
1672 *edx = 0; /* optional features */
1673 break;
1674 default:
1675 /* reserved values: zero */
1676 *eax = 0;
1677 *ebx = 0;
1678 *ecx = 0;
1679 *edx = 0;
1680 break;
1684 CPUX86State *cpu_x86_init(const char *cpu_model)
1686 CPUX86State *env;
1687 static int inited;
1689 env = qemu_mallocz(sizeof(CPUX86State));
1690 cpu_exec_init(env);
1691 env->cpu_model_str = cpu_model;
1693 /* init various static tables */
1694 if (!inited) {
1695 inited = 1;
1696 optimize_flags_init();
1697 #ifndef CONFIG_USER_ONLY
1698 prev_debug_excp_handler =
1699 cpu_set_debug_excp_handler(breakpoint_handler);
1700 #endif
1702 if (cpu_x86_register(env, cpu_model) < 0) {
1703 cpu_x86_close(env);
1704 return NULL;
1706 cpu_reset(env);
1707 #ifdef CONFIG_KQEMU
1708 kqemu_init(env);
1709 #endif
1711 return env;