Fix undefined pthread_sigmask warning on OpenBSD
[qemu/mini2440.git] / target-i386 / helper.c
blobc2e1a88ad4213e9c08cd4608d9fae40610dae72b
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
33 //#define DEBUG_MMU
35 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
37 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
38 uint32_t *ext_features,
39 uint32_t *ext2_features,
40 uint32_t *ext3_features)
42 int i;
43 /* feature flags taken from "Intel Processor Identification and the CPUID
44 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
45 * about feature names, the Linux name is used. */
46 static const char *feature_name[] = {
47 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
52 static const char *ext_feature_name[] = {
53 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 static const char *ext2_feature_name[] = {
59 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
61 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
64 static const char *ext3_feature_name[] = {
65 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 return;
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 return;
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 return;
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 return;
91 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 CPUX86State *cpu_x86_init(const char *cpu_model)
96 CPUX86State *env;
97 static int inited;
99 env = qemu_mallocz(sizeof(CPUX86State));
100 if (!env)
101 return NULL;
102 cpu_exec_init(env);
103 env->cpu_model_str = cpu_model;
105 /* init various static tables */
106 if (!inited) {
107 inited = 1;
108 optimize_flags_init();
110 if (cpu_x86_register(env, cpu_model) < 0) {
111 cpu_x86_close(env);
112 return NULL;
114 cpu_reset(env);
115 #ifdef USE_KQEMU
116 kqemu_init(env);
117 #endif
118 return env;
121 typedef struct x86_def_t {
122 const char *name;
123 uint32_t level;
124 uint32_t vendor1, vendor2, vendor3;
125 int family;
126 int model;
127 int stepping;
128 uint32_t features, ext_features, ext2_features, ext3_features;
129 uint32_t xlevel;
130 char model_id[48];
131 } x86_def_t;
133 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
134 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
135 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
136 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
137 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
138 CPUID_PSE36 | CPUID_FXSR)
139 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
140 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
141 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
142 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
143 CPUID_PAE | CPUID_SEP | CPUID_APIC)
144 static x86_def_t x86_defs[] = {
145 #ifdef TARGET_X86_64
147 .name = "qemu64",
148 .level = 2,
149 .vendor1 = CPUID_VENDOR_AMD_1,
150 .vendor2 = CPUID_VENDOR_AMD_2,
151 .vendor3 = CPUID_VENDOR_AMD_3,
152 .family = 6,
153 .model = 2,
154 .stepping = 3,
155 .features = PPRO_FEATURES |
156 /* these features are needed for Win64 and aren't fully implemented */
157 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
158 /* this feature is needed for Solaris and isn't fully implemented */
159 CPUID_PSE36,
160 .ext_features = CPUID_EXT_SSE3,
161 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
162 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
163 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
164 .ext3_features = CPUID_EXT3_SVM,
165 .xlevel = 0x8000000A,
166 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
169 .name = "core2duo",
170 .level = 10,
171 .family = 6,
172 .model = 15,
173 .stepping = 11,
174 /* The original CPU also implements these features:
175 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
176 CPUID_TM, CPUID_PBE */
177 .features = PPRO_FEATURES |
178 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
179 CPUID_PSE36,
180 /* The original CPU also implements these ext features:
181 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
182 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
183 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
184 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
185 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
186 .xlevel = 0x80000008,
187 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
189 #endif
191 .name = "qemu32",
192 .level = 2,
193 .family = 6,
194 .model = 3,
195 .stepping = 3,
196 .features = PPRO_FEATURES,
197 .ext_features = CPUID_EXT_SSE3,
198 .xlevel = 0,
199 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
202 .name = "coreduo",
203 .level = 10,
204 .family = 6,
205 .model = 14,
206 .stepping = 8,
207 /* The original CPU also implements these features:
208 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
209 CPUID_TM, CPUID_PBE */
210 .features = PPRO_FEATURES | CPUID_VME |
211 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
212 /* The original CPU also implements these ext features:
213 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
214 CPUID_EXT_PDCM */
215 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
216 .ext2_features = CPUID_EXT2_NX,
217 .xlevel = 0x80000008,
218 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
221 .name = "486",
222 .level = 0,
223 .family = 4,
224 .model = 0,
225 .stepping = 0,
226 .features = I486_FEATURES,
227 .xlevel = 0,
230 .name = "pentium",
231 .level = 1,
232 .family = 5,
233 .model = 4,
234 .stepping = 3,
235 .features = PENTIUM_FEATURES,
236 .xlevel = 0,
239 .name = "pentium2",
240 .level = 2,
241 .family = 6,
242 .model = 5,
243 .stepping = 2,
244 .features = PENTIUM2_FEATURES,
245 .xlevel = 0,
248 .name = "pentium3",
249 .level = 2,
250 .family = 6,
251 .model = 7,
252 .stepping = 3,
253 .features = PENTIUM3_FEATURES,
254 .xlevel = 0,
257 .name = "athlon",
258 .level = 2,
259 .vendor1 = 0x68747541, /* "Auth" */
260 .vendor2 = 0x69746e65, /* "enti" */
261 .vendor3 = 0x444d4163, /* "cAMD" */
262 .family = 6,
263 .model = 2,
264 .stepping = 3,
265 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
266 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
267 .xlevel = 0x80000008,
268 /* XXX: put another string ? */
269 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
272 .name = "n270",
273 /* original is on level 10 */
274 .level = 5,
275 .family = 6,
276 .model = 28,
277 .stepping = 2,
278 .features = PPRO_FEATURES |
279 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
280 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
281 * CPUID_HT | CPUID_TM | CPUID_PBE */
282 /* Some CPUs got no CPUID_SEP */
283 .ext_features = CPUID_EXT_MONITOR |
284 CPUID_EXT_SSE3 /* PNI */, CPUID_EXT_SSSE3,
285 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
286 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
287 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
288 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
289 .xlevel = 0x8000000A,
290 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
294 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
296 unsigned int i;
297 x86_def_t *def;
299 char *s = strdup(cpu_model);
300 char *featurestr, *name = strtok(s, ",");
301 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
302 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
303 int family = -1, model = -1, stepping = -1;
305 def = NULL;
306 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
307 if (strcmp(name, x86_defs[i].name) == 0) {
308 def = &x86_defs[i];
309 break;
312 if (!def)
313 goto error;
314 memcpy(x86_cpu_def, def, sizeof(*def));
316 featurestr = strtok(NULL, ",");
318 while (featurestr) {
319 char *val;
320 if (featurestr[0] == '+') {
321 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
322 } else if (featurestr[0] == '-') {
323 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
324 } else if ((val = strchr(featurestr, '='))) {
325 *val = 0; val++;
326 if (!strcmp(featurestr, "family")) {
327 char *err;
328 family = strtol(val, &err, 10);
329 if (!*val || *err || family < 0) {
330 fprintf(stderr, "bad numerical value %s\n", val);
331 goto error;
333 x86_cpu_def->family = family;
334 } else if (!strcmp(featurestr, "model")) {
335 char *err;
336 model = strtol(val, &err, 10);
337 if (!*val || *err || model < 0 || model > 0xf) {
338 fprintf(stderr, "bad numerical value %s\n", val);
339 goto error;
341 x86_cpu_def->model = model;
342 } else if (!strcmp(featurestr, "stepping")) {
343 char *err;
344 stepping = strtol(val, &err, 10);
345 if (!*val || *err || stepping < 0 || stepping > 0xf) {
346 fprintf(stderr, "bad numerical value %s\n", val);
347 goto error;
349 x86_cpu_def->stepping = stepping;
350 } else if (!strcmp(featurestr, "vendor")) {
351 if (strlen(val) != 12) {
352 fprintf(stderr, "vendor string must be 12 chars long\n");
353 goto error;
355 x86_cpu_def->vendor1 = 0;
356 x86_cpu_def->vendor2 = 0;
357 x86_cpu_def->vendor3 = 0;
358 for(i = 0; i < 4; i++) {
359 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
360 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
361 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
363 } else if (!strcmp(featurestr, "model_id")) {
364 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
365 val);
366 } else {
367 fprintf(stderr, "unrecognized feature %s\n", featurestr);
368 goto error;
370 } else {
371 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
372 goto error;
374 featurestr = strtok(NULL, ",");
376 x86_cpu_def->features |= plus_features;
377 x86_cpu_def->ext_features |= plus_ext_features;
378 x86_cpu_def->ext2_features |= plus_ext2_features;
379 x86_cpu_def->ext3_features |= plus_ext3_features;
380 x86_cpu_def->features &= ~minus_features;
381 x86_cpu_def->ext_features &= ~minus_ext_features;
382 x86_cpu_def->ext2_features &= ~minus_ext2_features;
383 x86_cpu_def->ext3_features &= ~minus_ext3_features;
384 free(s);
385 return 0;
387 error:
388 free(s);
389 return -1;
392 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
394 unsigned int i;
396 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
397 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
400 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
402 x86_def_t def1, *def = &def1;
404 if (cpu_x86_find_by_name(def, cpu_model) < 0)
405 return -1;
406 if (def->vendor1) {
407 env->cpuid_vendor1 = def->vendor1;
408 env->cpuid_vendor2 = def->vendor2;
409 env->cpuid_vendor3 = def->vendor3;
410 } else {
411 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
412 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
413 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
415 env->cpuid_level = def->level;
416 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
417 env->cpuid_features = def->features;
418 env->pat = 0x0007040600070406ULL;
419 env->cpuid_ext_features = def->ext_features;
420 env->cpuid_ext2_features = def->ext2_features;
421 env->cpuid_xlevel = def->xlevel;
422 env->cpuid_ext3_features = def->ext3_features;
424 const char *model_id = def->model_id;
425 int c, len, i;
426 if (!model_id)
427 model_id = "";
428 len = strlen(model_id);
429 for(i = 0; i < 48; i++) {
430 if (i >= len)
431 c = '\0';
432 else
433 c = (uint8_t)model_id[i];
434 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
437 return 0;
440 /* NOTE: must be called outside the CPU execute loop */
441 void cpu_reset(CPUX86State *env)
443 int i;
445 memset(env, 0, offsetof(CPUX86State, breakpoints));
447 tlb_flush(env, 1);
449 env->old_exception = -1;
451 /* init to reset state */
453 #ifdef CONFIG_SOFTMMU
454 env->hflags |= HF_SOFTMMU_MASK;
455 #endif
456 env->hflags2 |= HF2_GIF_MASK;
458 cpu_x86_update_cr0(env, 0x60000010);
459 env->a20_mask = ~0x0;
460 env->smbase = 0x30000;
462 env->idt.limit = 0xffff;
463 env->gdt.limit = 0xffff;
464 env->ldt.limit = 0xffff;
465 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
466 env->tr.limit = 0xffff;
467 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
469 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
470 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
471 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
472 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
473 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
474 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
475 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
476 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
477 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
478 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
479 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
480 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
482 env->eip = 0xfff0;
483 env->regs[R_EDX] = env->cpuid_version;
485 env->eflags = 0x2;
487 /* FPU init */
488 for(i = 0;i < 8; i++)
489 env->fptags[i] = 1;
490 env->fpuc = 0x37f;
492 env->mxcsr = 0x1f80;
495 void cpu_x86_close(CPUX86State *env)
497 qemu_free(env);
500 /***********************************************************/
501 /* x86 debug */
503 static const char *cc_op_str[] = {
504 "DYNAMIC",
505 "EFLAGS",
507 "MULB",
508 "MULW",
509 "MULL",
510 "MULQ",
512 "ADDB",
513 "ADDW",
514 "ADDL",
515 "ADDQ",
517 "ADCB",
518 "ADCW",
519 "ADCL",
520 "ADCQ",
522 "SUBB",
523 "SUBW",
524 "SUBL",
525 "SUBQ",
527 "SBBB",
528 "SBBW",
529 "SBBL",
530 "SBBQ",
532 "LOGICB",
533 "LOGICW",
534 "LOGICL",
535 "LOGICQ",
537 "INCB",
538 "INCW",
539 "INCL",
540 "INCQ",
542 "DECB",
543 "DECW",
544 "DECL",
545 "DECQ",
547 "SHLB",
548 "SHLW",
549 "SHLL",
550 "SHLQ",
552 "SARB",
553 "SARW",
554 "SARL",
555 "SARQ",
558 void cpu_dump_state(CPUState *env, FILE *f,
559 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
560 int flags)
562 int eflags, i, nb;
563 char cc_op_name[32];
564 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
566 eflags = env->eflags;
567 #ifdef TARGET_X86_64
568 if (env->hflags & HF_CS64_MASK) {
569 cpu_fprintf(f,
570 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
571 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
572 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
573 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
574 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
575 env->regs[R_EAX],
576 env->regs[R_EBX],
577 env->regs[R_ECX],
578 env->regs[R_EDX],
579 env->regs[R_ESI],
580 env->regs[R_EDI],
581 env->regs[R_EBP],
582 env->regs[R_ESP],
583 env->regs[8],
584 env->regs[9],
585 env->regs[10],
586 env->regs[11],
587 env->regs[12],
588 env->regs[13],
589 env->regs[14],
590 env->regs[15],
591 env->eip, eflags,
592 eflags & DF_MASK ? 'D' : '-',
593 eflags & CC_O ? 'O' : '-',
594 eflags & CC_S ? 'S' : '-',
595 eflags & CC_Z ? 'Z' : '-',
596 eflags & CC_A ? 'A' : '-',
597 eflags & CC_P ? 'P' : '-',
598 eflags & CC_C ? 'C' : '-',
599 env->hflags & HF_CPL_MASK,
600 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
601 (int)(env->a20_mask >> 20) & 1,
602 (env->hflags >> HF_SMM_SHIFT) & 1,
603 env->halted);
604 } else
605 #endif
607 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
608 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
609 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
610 (uint32_t)env->regs[R_EAX],
611 (uint32_t)env->regs[R_EBX],
612 (uint32_t)env->regs[R_ECX],
613 (uint32_t)env->regs[R_EDX],
614 (uint32_t)env->regs[R_ESI],
615 (uint32_t)env->regs[R_EDI],
616 (uint32_t)env->regs[R_EBP],
617 (uint32_t)env->regs[R_ESP],
618 (uint32_t)env->eip, eflags,
619 eflags & DF_MASK ? 'D' : '-',
620 eflags & CC_O ? 'O' : '-',
621 eflags & CC_S ? 'S' : '-',
622 eflags & CC_Z ? 'Z' : '-',
623 eflags & CC_A ? 'A' : '-',
624 eflags & CC_P ? 'P' : '-',
625 eflags & CC_C ? 'C' : '-',
626 env->hflags & HF_CPL_MASK,
627 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
628 (int)(env->a20_mask >> 20) & 1,
629 (env->hflags >> HF_SMM_SHIFT) & 1,
630 env->halted);
633 #ifdef TARGET_X86_64
634 if (env->hflags & HF_LMA_MASK) {
635 for(i = 0; i < 6; i++) {
636 SegmentCache *sc = &env->segs[i];
637 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
638 seg_name[i],
639 sc->selector,
640 sc->base,
641 sc->limit,
642 sc->flags);
644 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
645 env->ldt.selector,
646 env->ldt.base,
647 env->ldt.limit,
648 env->ldt.flags);
649 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
650 env->tr.selector,
651 env->tr.base,
652 env->tr.limit,
653 env->tr.flags);
654 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
655 env->gdt.base, env->gdt.limit);
656 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
657 env->idt.base, env->idt.limit);
658 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
659 (uint32_t)env->cr[0],
660 env->cr[2],
661 env->cr[3],
662 (uint32_t)env->cr[4]);
663 } else
664 #endif
666 for(i = 0; i < 6; i++) {
667 SegmentCache *sc = &env->segs[i];
668 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
669 seg_name[i],
670 sc->selector,
671 (uint32_t)sc->base,
672 sc->limit,
673 sc->flags);
675 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
676 env->ldt.selector,
677 (uint32_t)env->ldt.base,
678 env->ldt.limit,
679 env->ldt.flags);
680 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
681 env->tr.selector,
682 (uint32_t)env->tr.base,
683 env->tr.limit,
684 env->tr.flags);
685 cpu_fprintf(f, "GDT= %08x %08x\n",
686 (uint32_t)env->gdt.base, env->gdt.limit);
687 cpu_fprintf(f, "IDT= %08x %08x\n",
688 (uint32_t)env->idt.base, env->idt.limit);
689 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
690 (uint32_t)env->cr[0],
691 (uint32_t)env->cr[2],
692 (uint32_t)env->cr[3],
693 (uint32_t)env->cr[4]);
695 if (flags & X86_DUMP_CCOP) {
696 if ((unsigned)env->cc_op < CC_OP_NB)
697 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
698 else
699 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
700 #ifdef TARGET_X86_64
701 if (env->hflags & HF_CS64_MASK) {
702 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
703 env->cc_src, env->cc_dst,
704 cc_op_name);
705 } else
706 #endif
708 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
709 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
710 cc_op_name);
713 if (flags & X86_DUMP_FPU) {
714 int fptag;
715 fptag = 0;
716 for(i = 0; i < 8; i++) {
717 fptag |= ((!env->fptags[i]) << i);
719 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
720 env->fpuc,
721 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
722 env->fpstt,
723 fptag,
724 env->mxcsr);
725 for(i=0;i<8;i++) {
726 #if defined(USE_X86LDOUBLE)
727 union {
728 long double d;
729 struct {
730 uint64_t lower;
731 uint16_t upper;
732 } l;
733 } tmp;
734 tmp.d = env->fpregs[i].d;
735 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
736 i, tmp.l.lower, tmp.l.upper);
737 #else
738 cpu_fprintf(f, "FPR%d=%016" PRIx64,
739 i, env->fpregs[i].mmx.q);
740 #endif
741 if ((i & 1) == 1)
742 cpu_fprintf(f, "\n");
743 else
744 cpu_fprintf(f, " ");
746 if (env->hflags & HF_CS64_MASK)
747 nb = 16;
748 else
749 nb = 8;
750 for(i=0;i<nb;i++) {
751 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
753 env->xmm_regs[i].XMM_L(3),
754 env->xmm_regs[i].XMM_L(2),
755 env->xmm_regs[i].XMM_L(1),
756 env->xmm_regs[i].XMM_L(0));
757 if ((i & 1) == 1)
758 cpu_fprintf(f, "\n");
759 else
760 cpu_fprintf(f, " ");
765 /***********************************************************/
766 /* x86 mmu */
767 /* XXX: add PGE support */
769 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
771 a20_state = (a20_state != 0);
772 if (a20_state != ((env->a20_mask >> 20) & 1)) {
773 #if defined(DEBUG_MMU)
774 printf("A20 update: a20=%d\n", a20_state);
775 #endif
776 /* if the cpu is currently executing code, we must unlink it and
777 all the potentially executing TB */
778 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
780 /* when a20 is changed, all the MMU mappings are invalid, so
781 we must flush everything */
782 tlb_flush(env, 1);
783 env->a20_mask = (~0x100000) | (a20_state << 20);
787 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
789 int pe_state;
791 #if defined(DEBUG_MMU)
792 printf("CR0 update: CR0=0x%08x\n", new_cr0);
793 #endif
794 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
795 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
796 tlb_flush(env, 1);
799 #ifdef TARGET_X86_64
800 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
801 (env->efer & MSR_EFER_LME)) {
802 /* enter in long mode */
803 /* XXX: generate an exception */
804 if (!(env->cr[4] & CR4_PAE_MASK))
805 return;
806 env->efer |= MSR_EFER_LMA;
807 env->hflags |= HF_LMA_MASK;
808 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
809 (env->efer & MSR_EFER_LMA)) {
810 /* exit long mode */
811 env->efer &= ~MSR_EFER_LMA;
812 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
813 env->eip &= 0xffffffff;
815 #endif
816 env->cr[0] = new_cr0 | CR0_ET_MASK;
818 /* update PE flag in hidden flags */
819 pe_state = (env->cr[0] & CR0_PE_MASK);
820 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
821 /* ensure that ADDSEG is always set in real mode */
822 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
823 /* update FPU flags */
824 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
825 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
828 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
829 the PDPT */
830 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
832 env->cr[3] = new_cr3;
833 if (env->cr[0] & CR0_PG_MASK) {
834 #if defined(DEBUG_MMU)
835 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
836 #endif
837 tlb_flush(env, 0);
841 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
843 #if defined(DEBUG_MMU)
844 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
845 #endif
846 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
847 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
848 tlb_flush(env, 1);
850 /* SSE handling */
851 if (!(env->cpuid_features & CPUID_SSE))
852 new_cr4 &= ~CR4_OSFXSR_MASK;
853 if (new_cr4 & CR4_OSFXSR_MASK)
854 env->hflags |= HF_OSFXSR_MASK;
855 else
856 env->hflags &= ~HF_OSFXSR_MASK;
858 env->cr[4] = new_cr4;
861 /* XXX: also flush 4MB pages */
862 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
864 tlb_flush_page(env, addr);
867 #if defined(CONFIG_USER_ONLY)
869 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
870 int is_write, int mmu_idx, int is_softmmu)
872 /* user mode only emulation */
873 is_write &= 1;
874 env->cr[2] = addr;
875 env->error_code = (is_write << PG_ERROR_W_BIT);
876 env->error_code |= PG_ERROR_U_MASK;
877 env->exception_index = EXCP0E_PAGE;
878 return 1;
881 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
883 return addr;
886 #else
888 /* XXX: This value should match the one returned by CPUID
889 * and in exec.c */
890 #if defined(USE_KQEMU)
891 #define PHYS_ADDR_MASK 0xfffff000LL
892 #else
893 # if defined(TARGET_X86_64)
894 # define PHYS_ADDR_MASK 0xfffffff000LL
895 # else
896 # define PHYS_ADDR_MASK 0xffffff000LL
897 # endif
898 #endif
900 /* return value:
901 -1 = cannot handle fault
902 0 = nothing more to do
903 1 = generate PF fault
904 2 = soft MMU activation required for this block
906 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
907 int is_write1, int mmu_idx, int is_softmmu)
909 uint64_t ptep, pte;
910 target_ulong pde_addr, pte_addr;
911 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
912 target_phys_addr_t paddr;
913 uint32_t page_offset;
914 target_ulong vaddr, virt_addr;
916 is_user = mmu_idx == MMU_USER_IDX;
917 #if defined(DEBUG_MMU)
918 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
919 addr, is_write1, is_user, env->eip);
920 #endif
921 is_write = is_write1 & 1;
923 if (!(env->cr[0] & CR0_PG_MASK)) {
924 pte = addr;
925 virt_addr = addr & TARGET_PAGE_MASK;
926 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
927 page_size = 4096;
928 goto do_mapping;
931 if (env->cr[4] & CR4_PAE_MASK) {
932 uint64_t pde, pdpe;
933 target_ulong pdpe_addr;
935 #ifdef TARGET_X86_64
936 if (env->hflags & HF_LMA_MASK) {
937 uint64_t pml4e_addr, pml4e;
938 int32_t sext;
940 /* test virtual address sign extension */
941 sext = (int64_t)addr >> 47;
942 if (sext != 0 && sext != -1) {
943 env->error_code = 0;
944 env->exception_index = EXCP0D_GPF;
945 return 1;
948 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
949 env->a20_mask;
950 pml4e = ldq_phys(pml4e_addr);
951 if (!(pml4e & PG_PRESENT_MASK)) {
952 error_code = 0;
953 goto do_fault;
955 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
956 error_code = PG_ERROR_RSVD_MASK;
957 goto do_fault;
959 if (!(pml4e & PG_ACCESSED_MASK)) {
960 pml4e |= PG_ACCESSED_MASK;
961 stl_phys_notdirty(pml4e_addr, pml4e);
963 ptep = pml4e ^ PG_NX_MASK;
964 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
965 env->a20_mask;
966 pdpe = ldq_phys(pdpe_addr);
967 if (!(pdpe & PG_PRESENT_MASK)) {
968 error_code = 0;
969 goto do_fault;
971 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
972 error_code = PG_ERROR_RSVD_MASK;
973 goto do_fault;
975 ptep &= pdpe ^ PG_NX_MASK;
976 if (!(pdpe & PG_ACCESSED_MASK)) {
977 pdpe |= PG_ACCESSED_MASK;
978 stl_phys_notdirty(pdpe_addr, pdpe);
980 } else
981 #endif
983 /* XXX: load them when cr3 is loaded ? */
984 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
985 env->a20_mask;
986 pdpe = ldq_phys(pdpe_addr);
987 if (!(pdpe & PG_PRESENT_MASK)) {
988 error_code = 0;
989 goto do_fault;
991 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
994 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
995 env->a20_mask;
996 pde = ldq_phys(pde_addr);
997 if (!(pde & PG_PRESENT_MASK)) {
998 error_code = 0;
999 goto do_fault;
1001 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1002 error_code = PG_ERROR_RSVD_MASK;
1003 goto do_fault;
1005 ptep &= pde ^ PG_NX_MASK;
1006 if (pde & PG_PSE_MASK) {
1007 /* 2 MB page */
1008 page_size = 2048 * 1024;
1009 ptep ^= PG_NX_MASK;
1010 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1011 goto do_fault_protect;
1012 if (is_user) {
1013 if (!(ptep & PG_USER_MASK))
1014 goto do_fault_protect;
1015 if (is_write && !(ptep & PG_RW_MASK))
1016 goto do_fault_protect;
1017 } else {
1018 if ((env->cr[0] & CR0_WP_MASK) &&
1019 is_write && !(ptep & PG_RW_MASK))
1020 goto do_fault_protect;
1022 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1023 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1024 pde |= PG_ACCESSED_MASK;
1025 if (is_dirty)
1026 pde |= PG_DIRTY_MASK;
1027 stl_phys_notdirty(pde_addr, pde);
1029 /* align to page_size */
1030 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1031 virt_addr = addr & ~(page_size - 1);
1032 } else {
1033 /* 4 KB page */
1034 if (!(pde & PG_ACCESSED_MASK)) {
1035 pde |= PG_ACCESSED_MASK;
1036 stl_phys_notdirty(pde_addr, pde);
1038 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1039 env->a20_mask;
1040 pte = ldq_phys(pte_addr);
1041 if (!(pte & PG_PRESENT_MASK)) {
1042 error_code = 0;
1043 goto do_fault;
1045 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1046 error_code = PG_ERROR_RSVD_MASK;
1047 goto do_fault;
1049 /* combine pde and pte nx, user and rw protections */
1050 ptep &= pte ^ PG_NX_MASK;
1051 ptep ^= PG_NX_MASK;
1052 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1053 goto do_fault_protect;
1054 if (is_user) {
1055 if (!(ptep & PG_USER_MASK))
1056 goto do_fault_protect;
1057 if (is_write && !(ptep & PG_RW_MASK))
1058 goto do_fault_protect;
1059 } else {
1060 if ((env->cr[0] & CR0_WP_MASK) &&
1061 is_write && !(ptep & PG_RW_MASK))
1062 goto do_fault_protect;
1064 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1065 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1066 pte |= PG_ACCESSED_MASK;
1067 if (is_dirty)
1068 pte |= PG_DIRTY_MASK;
1069 stl_phys_notdirty(pte_addr, pte);
1071 page_size = 4096;
1072 virt_addr = addr & ~0xfff;
1073 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1075 } else {
1076 uint32_t pde;
1078 /* page directory entry */
1079 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1080 env->a20_mask;
1081 pde = ldl_phys(pde_addr);
1082 if (!(pde & PG_PRESENT_MASK)) {
1083 error_code = 0;
1084 goto do_fault;
1086 /* if PSE bit is set, then we use a 4MB page */
1087 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1088 page_size = 4096 * 1024;
1089 if (is_user) {
1090 if (!(pde & PG_USER_MASK))
1091 goto do_fault_protect;
1092 if (is_write && !(pde & PG_RW_MASK))
1093 goto do_fault_protect;
1094 } else {
1095 if ((env->cr[0] & CR0_WP_MASK) &&
1096 is_write && !(pde & PG_RW_MASK))
1097 goto do_fault_protect;
1099 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1100 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1101 pde |= PG_ACCESSED_MASK;
1102 if (is_dirty)
1103 pde |= PG_DIRTY_MASK;
1104 stl_phys_notdirty(pde_addr, pde);
1107 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1108 ptep = pte;
1109 virt_addr = addr & ~(page_size - 1);
1110 } else {
1111 if (!(pde & PG_ACCESSED_MASK)) {
1112 pde |= PG_ACCESSED_MASK;
1113 stl_phys_notdirty(pde_addr, pde);
1116 /* page directory entry */
1117 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1118 env->a20_mask;
1119 pte = ldl_phys(pte_addr);
1120 if (!(pte & PG_PRESENT_MASK)) {
1121 error_code = 0;
1122 goto do_fault;
1124 /* combine pde and pte user and rw protections */
1125 ptep = pte & pde;
1126 if (is_user) {
1127 if (!(ptep & PG_USER_MASK))
1128 goto do_fault_protect;
1129 if (is_write && !(ptep & PG_RW_MASK))
1130 goto do_fault_protect;
1131 } else {
1132 if ((env->cr[0] & CR0_WP_MASK) &&
1133 is_write && !(ptep & PG_RW_MASK))
1134 goto do_fault_protect;
1136 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1137 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1138 pte |= PG_ACCESSED_MASK;
1139 if (is_dirty)
1140 pte |= PG_DIRTY_MASK;
1141 stl_phys_notdirty(pte_addr, pte);
1143 page_size = 4096;
1144 virt_addr = addr & ~0xfff;
1147 /* the page can be put in the TLB */
1148 prot = PAGE_READ;
1149 if (!(ptep & PG_NX_MASK))
1150 prot |= PAGE_EXEC;
1151 if (pte & PG_DIRTY_MASK) {
1152 /* only set write access if already dirty... otherwise wait
1153 for dirty access */
1154 if (is_user) {
1155 if (ptep & PG_RW_MASK)
1156 prot |= PAGE_WRITE;
1157 } else {
1158 if (!(env->cr[0] & CR0_WP_MASK) ||
1159 (ptep & PG_RW_MASK))
1160 prot |= PAGE_WRITE;
1163 do_mapping:
1164 pte = pte & env->a20_mask;
1166 /* Even if 4MB pages, we map only one 4KB page in the cache to
1167 avoid filling it too fast */
1168 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1169 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1170 vaddr = virt_addr + page_offset;
1172 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1173 return ret;
1174 do_fault_protect:
1175 error_code = PG_ERROR_P_MASK;
1176 do_fault:
1177 error_code |= (is_write << PG_ERROR_W_BIT);
1178 if (is_user)
1179 error_code |= PG_ERROR_U_MASK;
1180 if (is_write1 == 2 &&
1181 (env->efer & MSR_EFER_NXE) &&
1182 (env->cr[4] & CR4_PAE_MASK))
1183 error_code |= PG_ERROR_I_D_MASK;
1184 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1185 /* cr2 is not modified in case of exceptions */
1186 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1187 addr);
1188 } else {
1189 env->cr[2] = addr;
1191 env->error_code = error_code;
1192 env->exception_index = EXCP0E_PAGE;
1193 return 1;
1196 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1198 target_ulong pde_addr, pte_addr;
1199 uint64_t pte;
1200 target_phys_addr_t paddr;
1201 uint32_t page_offset;
1202 int page_size;
1204 if (env->cr[4] & CR4_PAE_MASK) {
1205 target_ulong pdpe_addr;
1206 uint64_t pde, pdpe;
1208 #ifdef TARGET_X86_64
1209 if (env->hflags & HF_LMA_MASK) {
1210 uint64_t pml4e_addr, pml4e;
1211 int32_t sext;
1213 /* test virtual address sign extension */
1214 sext = (int64_t)addr >> 47;
1215 if (sext != 0 && sext != -1)
1216 return -1;
1218 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1219 env->a20_mask;
1220 pml4e = ldq_phys(pml4e_addr);
1221 if (!(pml4e & PG_PRESENT_MASK))
1222 return -1;
1224 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1225 env->a20_mask;
1226 pdpe = ldq_phys(pdpe_addr);
1227 if (!(pdpe & PG_PRESENT_MASK))
1228 return -1;
1229 } else
1230 #endif
1232 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1233 env->a20_mask;
1234 pdpe = ldq_phys(pdpe_addr);
1235 if (!(pdpe & PG_PRESENT_MASK))
1236 return -1;
1239 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1240 env->a20_mask;
1241 pde = ldq_phys(pde_addr);
1242 if (!(pde & PG_PRESENT_MASK)) {
1243 return -1;
1245 if (pde & PG_PSE_MASK) {
1246 /* 2 MB page */
1247 page_size = 2048 * 1024;
1248 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1249 } else {
1250 /* 4 KB page */
1251 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1252 env->a20_mask;
1253 page_size = 4096;
1254 pte = ldq_phys(pte_addr);
1256 if (!(pte & PG_PRESENT_MASK))
1257 return -1;
1258 } else {
1259 uint32_t pde;
1261 if (!(env->cr[0] & CR0_PG_MASK)) {
1262 pte = addr;
1263 page_size = 4096;
1264 } else {
1265 /* page directory entry */
1266 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1267 pde = ldl_phys(pde_addr);
1268 if (!(pde & PG_PRESENT_MASK))
1269 return -1;
1270 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1271 pte = pde & ~0x003ff000; /* align to 4MB */
1272 page_size = 4096 * 1024;
1273 } else {
1274 /* page directory entry */
1275 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1276 pte = ldl_phys(pte_addr);
1277 if (!(pte & PG_PRESENT_MASK))
1278 return -1;
1279 page_size = 4096;
1282 pte = pte & env->a20_mask;
1285 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1286 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1287 return paddr;
1289 #endif /* !CONFIG_USER_ONLY */