Add Atom (x86) cpu identification.
[qemu/mini2440/sniper_sniper_test.git] / target-i386 / helper.c
blob4b0d5431973c28795fd8b8eefbc0160291e86447
1 /*
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31 #include "qemu-common.h"
33 //#define DEBUG_MMU
35 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
37 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
38 uint32_t *ext_features,
39 uint32_t *ext2_features,
40 uint32_t *ext3_features)
42 int i;
43 /* feature flags taken from "Intel Processor Identification and the CPUID
44 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
45 * about feature names, the Linux name is used. */
46 static const char *feature_name[] = {
47 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
48 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
49 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
50 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
52 static const char *ext_feature_name[] = {
53 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
54 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
55 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
56 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 static const char *ext2_feature_name[] = {
59 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
60 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
61 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
62 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
64 static const char *ext3_feature_name[] = {
65 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
66 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
71 for ( i = 0 ; i < 32 ; i++ )
72 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
73 *features |= 1 << i;
74 return;
76 for ( i = 0 ; i < 32 ; i++ )
77 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
78 *ext_features |= 1 << i;
79 return;
81 for ( i = 0 ; i < 32 ; i++ )
82 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
83 *ext2_features |= 1 << i;
84 return;
86 for ( i = 0 ; i < 32 ; i++ )
87 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
88 *ext3_features |= 1 << i;
89 return;
91 fprintf(stderr, "CPU feature %s not found\n", flagname);
94 CPUX86State *cpu_x86_init(const char *cpu_model)
96 CPUX86State *env;
97 static int inited;
99 env = qemu_mallocz(sizeof(CPUX86State));
100 if (!env)
101 return NULL;
102 cpu_exec_init(env);
103 env->cpu_model_str = cpu_model;
105 /* init various static tables */
106 if (!inited) {
107 inited = 1;
108 optimize_flags_init();
110 if (cpu_x86_register(env, cpu_model) < 0) {
111 cpu_x86_close(env);
112 return NULL;
114 cpu_reset(env);
115 #ifdef USE_KQEMU
116 kqemu_init(env);
117 #endif
118 return env;
121 typedef struct x86_def_t {
122 const char *name;
123 uint32_t level;
124 uint32_t vendor1, vendor2, vendor3;
125 int family;
126 int model;
127 int stepping;
128 uint32_t features, ext_features, ext2_features, ext3_features;
129 uint32_t xlevel;
130 char model_id[48];
131 } x86_def_t;
133 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
134 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
135 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
136 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
137 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
138 CPUID_PSE36 | CPUID_FXSR)
139 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
140 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
141 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
142 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
143 CPUID_PAE | CPUID_SEP | CPUID_APIC)
144 static x86_def_t x86_defs[] = {
145 #ifdef TARGET_X86_64
147 .name = "qemu64",
148 .level = 2,
149 .vendor1 = CPUID_VENDOR_AMD_1,
150 .vendor2 = CPUID_VENDOR_AMD_2,
151 .vendor3 = CPUID_VENDOR_AMD_3,
152 .family = 6,
153 .model = 2,
154 .stepping = 3,
155 .features = PPRO_FEATURES |
156 /* these features are needed for Win64 and aren't fully implemented */
157 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
158 /* this feature is needed for Solaris and isn't fully implemented */
159 CPUID_PSE36,
160 .ext_features = CPUID_EXT_SSE3,
161 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
162 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
163 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
164 .ext3_features = CPUID_EXT3_SVM,
165 .xlevel = 0x8000000A,
166 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
169 .name = "core2duo",
170 /* original is on level 10 */
171 .level = 5,
172 .family = 6,
173 .model = 15,
174 .stepping = 11,
175 /* the original CPU does have many more features that are
176 * not implemented yet */
177 .features = PPRO_FEATURES |
178 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
179 CPUID_PSE36,
180 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
181 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
182 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
183 .xlevel = 0x8000000A,
184 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
186 #endif
188 .name = "qemu32",
189 .level = 2,
190 .family = 6,
191 .model = 3,
192 .stepping = 3,
193 .features = PPRO_FEATURES,
194 .ext_features = CPUID_EXT_SSE3,
195 .xlevel = 0,
196 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
199 .name = "486",
200 .level = 0,
201 .family = 4,
202 .model = 0,
203 .stepping = 0,
204 .features = I486_FEATURES,
205 .xlevel = 0,
208 .name = "pentium",
209 .level = 1,
210 .family = 5,
211 .model = 4,
212 .stepping = 3,
213 .features = PENTIUM_FEATURES,
214 .xlevel = 0,
217 .name = "pentium2",
218 .level = 2,
219 .family = 6,
220 .model = 5,
221 .stepping = 2,
222 .features = PENTIUM2_FEATURES,
223 .xlevel = 0,
226 .name = "pentium3",
227 .level = 2,
228 .family = 6,
229 .model = 7,
230 .stepping = 3,
231 .features = PENTIUM3_FEATURES,
232 .xlevel = 0,
235 .name = "athlon",
236 .level = 2,
237 .vendor1 = 0x68747541, /* "Auth" */
238 .vendor2 = 0x69746e65, /* "enti" */
239 .vendor3 = 0x444d4163, /* "cAMD" */
240 .family = 6,
241 .model = 2,
242 .stepping = 3,
243 .features = PPRO_FEATURES | PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
244 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
245 .xlevel = 0x80000008,
246 /* XXX: put another string ? */
247 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
250 .name = "atom",
251 /* original is on level 10 */
252 .level = 5,
253 .family = 6,
254 .model = 28,
255 .stepping = 2,
256 .features = PPRO_FEATURES |
257 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
258 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
259 * CPUID_HT | CPUID_TM | CPUID_PBE */
260 /* Some CPUs got no CPUID_SEP */
261 .ext_features = CPUID_EXT_MONITOR |
262 CPUID_EXT_SSE3 /* PNI */, CPUID_EXT_SSSE3,
263 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
264 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
265 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
266 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
267 .xlevel = 0x8000000A,
268 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
272 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
274 unsigned int i;
275 x86_def_t *def;
277 char *s = strdup(cpu_model);
278 char *featurestr, *name = strtok(s, ",");
279 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
280 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
281 int family = -1, model = -1, stepping = -1;
283 def = NULL;
284 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
285 if (strcmp(name, x86_defs[i].name) == 0) {
286 def = &x86_defs[i];
287 break;
290 if (!def)
291 goto error;
292 memcpy(x86_cpu_def, def, sizeof(*def));
294 featurestr = strtok(NULL, ",");
296 while (featurestr) {
297 char *val;
298 if (featurestr[0] == '+') {
299 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
300 } else if (featurestr[0] == '-') {
301 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
302 } else if ((val = strchr(featurestr, '='))) {
303 *val = 0; val++;
304 if (!strcmp(featurestr, "family")) {
305 char *err;
306 family = strtol(val, &err, 10);
307 if (!*val || *err || family < 0) {
308 fprintf(stderr, "bad numerical value %s\n", val);
309 goto error;
311 x86_cpu_def->family = family;
312 } else if (!strcmp(featurestr, "model")) {
313 char *err;
314 model = strtol(val, &err, 10);
315 if (!*val || *err || model < 0 || model > 0xf) {
316 fprintf(stderr, "bad numerical value %s\n", val);
317 goto error;
319 x86_cpu_def->model = model;
320 } else if (!strcmp(featurestr, "stepping")) {
321 char *err;
322 stepping = strtol(val, &err, 10);
323 if (!*val || *err || stepping < 0 || stepping > 0xf) {
324 fprintf(stderr, "bad numerical value %s\n", val);
325 goto error;
327 x86_cpu_def->stepping = stepping;
328 } else if (!strcmp(featurestr, "vendor")) {
329 if (strlen(val) != 12) {
330 fprintf(stderr, "vendor string must be 12 chars long\n");
331 goto error;
333 x86_cpu_def->vendor1 = 0;
334 x86_cpu_def->vendor2 = 0;
335 x86_cpu_def->vendor3 = 0;
336 for(i = 0; i < 4; i++) {
337 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
338 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
339 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
341 } else if (!strcmp(featurestr, "model_id")) {
342 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
343 val);
344 } else {
345 fprintf(stderr, "unrecognized feature %s\n", featurestr);
346 goto error;
348 } else {
349 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
350 goto error;
352 featurestr = strtok(NULL, ",");
354 x86_cpu_def->features |= plus_features;
355 x86_cpu_def->ext_features |= plus_ext_features;
356 x86_cpu_def->ext2_features |= plus_ext2_features;
357 x86_cpu_def->ext3_features |= plus_ext3_features;
358 x86_cpu_def->features &= ~minus_features;
359 x86_cpu_def->ext_features &= ~minus_ext_features;
360 x86_cpu_def->ext2_features &= ~minus_ext2_features;
361 x86_cpu_def->ext3_features &= ~minus_ext3_features;
362 free(s);
363 return 0;
365 error:
366 free(s);
367 return -1;
370 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
372 unsigned int i;
374 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
375 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
378 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
380 x86_def_t def1, *def = &def1;
382 if (cpu_x86_find_by_name(def, cpu_model) < 0)
383 return -1;
384 if (def->vendor1) {
385 env->cpuid_vendor1 = def->vendor1;
386 env->cpuid_vendor2 = def->vendor2;
387 env->cpuid_vendor3 = def->vendor3;
388 } else {
389 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
390 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
391 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
393 env->cpuid_level = def->level;
394 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
395 env->cpuid_features = def->features;
396 env->pat = 0x0007040600070406ULL;
397 env->cpuid_ext_features = def->ext_features;
398 env->cpuid_ext2_features = def->ext2_features;
399 env->cpuid_xlevel = def->xlevel;
400 env->cpuid_ext3_features = def->ext3_features;
402 const char *model_id = def->model_id;
403 int c, len, i;
404 if (!model_id)
405 model_id = "";
406 len = strlen(model_id);
407 for(i = 0; i < 48; i++) {
408 if (i >= len)
409 c = '\0';
410 else
411 c = (uint8_t)model_id[i];
412 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
415 return 0;
418 /* NOTE: must be called outside the CPU execute loop */
419 void cpu_reset(CPUX86State *env)
421 int i;
423 memset(env, 0, offsetof(CPUX86State, breakpoints));
425 tlb_flush(env, 1);
427 env->old_exception = -1;
429 /* init to reset state */
431 #ifdef CONFIG_SOFTMMU
432 env->hflags |= HF_SOFTMMU_MASK;
433 #endif
434 env->hflags2 |= HF2_GIF_MASK;
436 cpu_x86_update_cr0(env, 0x60000010);
437 env->a20_mask = ~0x0;
438 env->smbase = 0x30000;
440 env->idt.limit = 0xffff;
441 env->gdt.limit = 0xffff;
442 env->ldt.limit = 0xffff;
443 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
444 env->tr.limit = 0xffff;
445 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
447 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
448 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK | DESC_R_MASK);
449 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
450 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
451 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
452 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
453 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
454 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
455 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
456 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
457 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
458 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK);
460 env->eip = 0xfff0;
461 env->regs[R_EDX] = env->cpuid_version;
463 env->eflags = 0x2;
465 /* FPU init */
466 for(i = 0;i < 8; i++)
467 env->fptags[i] = 1;
468 env->fpuc = 0x37f;
470 env->mxcsr = 0x1f80;
473 void cpu_x86_close(CPUX86State *env)
475 qemu_free(env);
478 /***********************************************************/
479 /* x86 debug */
481 static const char *cc_op_str[] = {
482 "DYNAMIC",
483 "EFLAGS",
485 "MULB",
486 "MULW",
487 "MULL",
488 "MULQ",
490 "ADDB",
491 "ADDW",
492 "ADDL",
493 "ADDQ",
495 "ADCB",
496 "ADCW",
497 "ADCL",
498 "ADCQ",
500 "SUBB",
501 "SUBW",
502 "SUBL",
503 "SUBQ",
505 "SBBB",
506 "SBBW",
507 "SBBL",
508 "SBBQ",
510 "LOGICB",
511 "LOGICW",
512 "LOGICL",
513 "LOGICQ",
515 "INCB",
516 "INCW",
517 "INCL",
518 "INCQ",
520 "DECB",
521 "DECW",
522 "DECL",
523 "DECQ",
525 "SHLB",
526 "SHLW",
527 "SHLL",
528 "SHLQ",
530 "SARB",
531 "SARW",
532 "SARL",
533 "SARQ",
536 void cpu_dump_state(CPUState *env, FILE *f,
537 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
538 int flags)
540 int eflags, i, nb;
541 char cc_op_name[32];
542 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
544 eflags = env->eflags;
545 #ifdef TARGET_X86_64
546 if (env->hflags & HF_CS64_MASK) {
547 cpu_fprintf(f,
548 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
549 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
550 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
551 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
552 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
553 env->regs[R_EAX],
554 env->regs[R_EBX],
555 env->regs[R_ECX],
556 env->regs[R_EDX],
557 env->regs[R_ESI],
558 env->regs[R_EDI],
559 env->regs[R_EBP],
560 env->regs[R_ESP],
561 env->regs[8],
562 env->regs[9],
563 env->regs[10],
564 env->regs[11],
565 env->regs[12],
566 env->regs[13],
567 env->regs[14],
568 env->regs[15],
569 env->eip, eflags,
570 eflags & DF_MASK ? 'D' : '-',
571 eflags & CC_O ? 'O' : '-',
572 eflags & CC_S ? 'S' : '-',
573 eflags & CC_Z ? 'Z' : '-',
574 eflags & CC_A ? 'A' : '-',
575 eflags & CC_P ? 'P' : '-',
576 eflags & CC_C ? 'C' : '-',
577 env->hflags & HF_CPL_MASK,
578 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
579 (int)(env->a20_mask >> 20) & 1,
580 (env->hflags >> HF_SMM_SHIFT) & 1,
581 env->halted);
582 } else
583 #endif
585 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
586 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
587 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
588 (uint32_t)env->regs[R_EAX],
589 (uint32_t)env->regs[R_EBX],
590 (uint32_t)env->regs[R_ECX],
591 (uint32_t)env->regs[R_EDX],
592 (uint32_t)env->regs[R_ESI],
593 (uint32_t)env->regs[R_EDI],
594 (uint32_t)env->regs[R_EBP],
595 (uint32_t)env->regs[R_ESP],
596 (uint32_t)env->eip, eflags,
597 eflags & DF_MASK ? 'D' : '-',
598 eflags & CC_O ? 'O' : '-',
599 eflags & CC_S ? 'S' : '-',
600 eflags & CC_Z ? 'Z' : '-',
601 eflags & CC_A ? 'A' : '-',
602 eflags & CC_P ? 'P' : '-',
603 eflags & CC_C ? 'C' : '-',
604 env->hflags & HF_CPL_MASK,
605 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
606 (int)(env->a20_mask >> 20) & 1,
607 (env->hflags >> HF_SMM_SHIFT) & 1,
608 env->halted);
611 #ifdef TARGET_X86_64
612 if (env->hflags & HF_LMA_MASK) {
613 for(i = 0; i < 6; i++) {
614 SegmentCache *sc = &env->segs[i];
615 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
616 seg_name[i],
617 sc->selector,
618 sc->base,
619 sc->limit,
620 sc->flags);
622 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
623 env->ldt.selector,
624 env->ldt.base,
625 env->ldt.limit,
626 env->ldt.flags);
627 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
628 env->tr.selector,
629 env->tr.base,
630 env->tr.limit,
631 env->tr.flags);
632 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
633 env->gdt.base, env->gdt.limit);
634 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
635 env->idt.base, env->idt.limit);
636 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
637 (uint32_t)env->cr[0],
638 env->cr[2],
639 env->cr[3],
640 (uint32_t)env->cr[4]);
641 } else
642 #endif
644 for(i = 0; i < 6; i++) {
645 SegmentCache *sc = &env->segs[i];
646 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
647 seg_name[i],
648 sc->selector,
649 (uint32_t)sc->base,
650 sc->limit,
651 sc->flags);
653 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
654 env->ldt.selector,
655 (uint32_t)env->ldt.base,
656 env->ldt.limit,
657 env->ldt.flags);
658 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
659 env->tr.selector,
660 (uint32_t)env->tr.base,
661 env->tr.limit,
662 env->tr.flags);
663 cpu_fprintf(f, "GDT= %08x %08x\n",
664 (uint32_t)env->gdt.base, env->gdt.limit);
665 cpu_fprintf(f, "IDT= %08x %08x\n",
666 (uint32_t)env->idt.base, env->idt.limit);
667 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
668 (uint32_t)env->cr[0],
669 (uint32_t)env->cr[2],
670 (uint32_t)env->cr[3],
671 (uint32_t)env->cr[4]);
673 if (flags & X86_DUMP_CCOP) {
674 if ((unsigned)env->cc_op < CC_OP_NB)
675 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
676 else
677 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
678 #ifdef TARGET_X86_64
679 if (env->hflags & HF_CS64_MASK) {
680 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
681 env->cc_src, env->cc_dst,
682 cc_op_name);
683 } else
684 #endif
686 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
687 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
688 cc_op_name);
691 if (flags & X86_DUMP_FPU) {
692 int fptag;
693 fptag = 0;
694 for(i = 0; i < 8; i++) {
695 fptag |= ((!env->fptags[i]) << i);
697 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
698 env->fpuc,
699 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
700 env->fpstt,
701 fptag,
702 env->mxcsr);
703 for(i=0;i<8;i++) {
704 #if defined(USE_X86LDOUBLE)
705 union {
706 long double d;
707 struct {
708 uint64_t lower;
709 uint16_t upper;
710 } l;
711 } tmp;
712 tmp.d = env->fpregs[i].d;
713 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
714 i, tmp.l.lower, tmp.l.upper);
715 #else
716 cpu_fprintf(f, "FPR%d=%016" PRIx64,
717 i, env->fpregs[i].mmx.q);
718 #endif
719 if ((i & 1) == 1)
720 cpu_fprintf(f, "\n");
721 else
722 cpu_fprintf(f, " ");
724 if (env->hflags & HF_CS64_MASK)
725 nb = 16;
726 else
727 nb = 8;
728 for(i=0;i<nb;i++) {
729 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
731 env->xmm_regs[i].XMM_L(3),
732 env->xmm_regs[i].XMM_L(2),
733 env->xmm_regs[i].XMM_L(1),
734 env->xmm_regs[i].XMM_L(0));
735 if ((i & 1) == 1)
736 cpu_fprintf(f, "\n");
737 else
738 cpu_fprintf(f, " ");
743 /***********************************************************/
744 /* x86 mmu */
745 /* XXX: add PGE support */
747 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
749 a20_state = (a20_state != 0);
750 if (a20_state != ((env->a20_mask >> 20) & 1)) {
751 #if defined(DEBUG_MMU)
752 printf("A20 update: a20=%d\n", a20_state);
753 #endif
754 /* if the cpu is currently executing code, we must unlink it and
755 all the potentially executing TB */
756 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
758 /* when a20 is changed, all the MMU mappings are invalid, so
759 we must flush everything */
760 tlb_flush(env, 1);
761 env->a20_mask = (~0x100000) | (a20_state << 20);
765 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
767 int pe_state;
769 #if defined(DEBUG_MMU)
770 printf("CR0 update: CR0=0x%08x\n", new_cr0);
771 #endif
772 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
773 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
774 tlb_flush(env, 1);
777 #ifdef TARGET_X86_64
778 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
779 (env->efer & MSR_EFER_LME)) {
780 /* enter in long mode */
781 /* XXX: generate an exception */
782 if (!(env->cr[4] & CR4_PAE_MASK))
783 return;
784 env->efer |= MSR_EFER_LMA;
785 env->hflags |= HF_LMA_MASK;
786 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
787 (env->efer & MSR_EFER_LMA)) {
788 /* exit long mode */
789 env->efer &= ~MSR_EFER_LMA;
790 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
791 env->eip &= 0xffffffff;
793 #endif
794 env->cr[0] = new_cr0 | CR0_ET_MASK;
796 /* update PE flag in hidden flags */
797 pe_state = (env->cr[0] & CR0_PE_MASK);
798 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
799 /* ensure that ADDSEG is always set in real mode */
800 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
801 /* update FPU flags */
802 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
803 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
806 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
807 the PDPT */
808 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
810 env->cr[3] = new_cr3;
811 if (env->cr[0] & CR0_PG_MASK) {
812 #if defined(DEBUG_MMU)
813 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
814 #endif
815 tlb_flush(env, 0);
819 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
821 #if defined(DEBUG_MMU)
822 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
823 #endif
824 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
825 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
826 tlb_flush(env, 1);
828 /* SSE handling */
829 if (!(env->cpuid_features & CPUID_SSE))
830 new_cr4 &= ~CR4_OSFXSR_MASK;
831 if (new_cr4 & CR4_OSFXSR_MASK)
832 env->hflags |= HF_OSFXSR_MASK;
833 else
834 env->hflags &= ~HF_OSFXSR_MASK;
836 env->cr[4] = new_cr4;
839 /* XXX: also flush 4MB pages */
840 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
842 tlb_flush_page(env, addr);
845 #if defined(CONFIG_USER_ONLY)
847 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
848 int is_write, int mmu_idx, int is_softmmu)
850 /* user mode only emulation */
851 is_write &= 1;
852 env->cr[2] = addr;
853 env->error_code = (is_write << PG_ERROR_W_BIT);
854 env->error_code |= PG_ERROR_U_MASK;
855 env->exception_index = EXCP0E_PAGE;
856 return 1;
859 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
861 return addr;
864 #else
866 /* XXX: This value should match the one returned by CPUID
867 * and in exec.c */
868 #if defined(USE_KQEMU)
869 #define PHYS_ADDR_MASK 0xfffff000LL
870 #else
871 # if defined(TARGET_X86_64)
872 # define PHYS_ADDR_MASK 0xfffffff000LL
873 # else
874 # define PHYS_ADDR_MASK 0xffffff000LL
875 # endif
876 #endif
878 /* return value:
879 -1 = cannot handle fault
880 0 = nothing more to do
881 1 = generate PF fault
882 2 = soft MMU activation required for this block
884 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
885 int is_write1, int mmu_idx, int is_softmmu)
887 uint64_t ptep, pte;
888 target_ulong pde_addr, pte_addr;
889 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
890 target_phys_addr_t paddr;
891 uint32_t page_offset;
892 target_ulong vaddr, virt_addr;
894 is_user = mmu_idx == MMU_USER_IDX;
895 #if defined(DEBUG_MMU)
896 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
897 addr, is_write1, is_user, env->eip);
898 #endif
899 is_write = is_write1 & 1;
901 if (!(env->cr[0] & CR0_PG_MASK)) {
902 pte = addr;
903 virt_addr = addr & TARGET_PAGE_MASK;
904 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
905 page_size = 4096;
906 goto do_mapping;
909 if (env->cr[4] & CR4_PAE_MASK) {
910 uint64_t pde, pdpe;
911 target_ulong pdpe_addr;
913 #ifdef TARGET_X86_64
914 if (env->hflags & HF_LMA_MASK) {
915 uint64_t pml4e_addr, pml4e;
916 int32_t sext;
918 /* test virtual address sign extension */
919 sext = (int64_t)addr >> 47;
920 if (sext != 0 && sext != -1) {
921 env->error_code = 0;
922 env->exception_index = EXCP0D_GPF;
923 return 1;
926 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
927 env->a20_mask;
928 pml4e = ldq_phys(pml4e_addr);
929 if (!(pml4e & PG_PRESENT_MASK)) {
930 error_code = 0;
931 goto do_fault;
933 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
934 error_code = PG_ERROR_RSVD_MASK;
935 goto do_fault;
937 if (!(pml4e & PG_ACCESSED_MASK)) {
938 pml4e |= PG_ACCESSED_MASK;
939 stl_phys_notdirty(pml4e_addr, pml4e);
941 ptep = pml4e ^ PG_NX_MASK;
942 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
943 env->a20_mask;
944 pdpe = ldq_phys(pdpe_addr);
945 if (!(pdpe & PG_PRESENT_MASK)) {
946 error_code = 0;
947 goto do_fault;
949 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
950 error_code = PG_ERROR_RSVD_MASK;
951 goto do_fault;
953 ptep &= pdpe ^ PG_NX_MASK;
954 if (!(pdpe & PG_ACCESSED_MASK)) {
955 pdpe |= PG_ACCESSED_MASK;
956 stl_phys_notdirty(pdpe_addr, pdpe);
958 } else
959 #endif
961 /* XXX: load them when cr3 is loaded ? */
962 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
963 env->a20_mask;
964 pdpe = ldq_phys(pdpe_addr);
965 if (!(pdpe & PG_PRESENT_MASK)) {
966 error_code = 0;
967 goto do_fault;
969 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
972 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
973 env->a20_mask;
974 pde = ldq_phys(pde_addr);
975 if (!(pde & PG_PRESENT_MASK)) {
976 error_code = 0;
977 goto do_fault;
979 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
980 error_code = PG_ERROR_RSVD_MASK;
981 goto do_fault;
983 ptep &= pde ^ PG_NX_MASK;
984 if (pde & PG_PSE_MASK) {
985 /* 2 MB page */
986 page_size = 2048 * 1024;
987 ptep ^= PG_NX_MASK;
988 if ((ptep & PG_NX_MASK) && is_write1 == 2)
989 goto do_fault_protect;
990 if (is_user) {
991 if (!(ptep & PG_USER_MASK))
992 goto do_fault_protect;
993 if (is_write && !(ptep & PG_RW_MASK))
994 goto do_fault_protect;
995 } else {
996 if ((env->cr[0] & CR0_WP_MASK) &&
997 is_write && !(ptep & PG_RW_MASK))
998 goto do_fault_protect;
1000 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1001 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1002 pde |= PG_ACCESSED_MASK;
1003 if (is_dirty)
1004 pde |= PG_DIRTY_MASK;
1005 stl_phys_notdirty(pde_addr, pde);
1007 /* align to page_size */
1008 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1009 virt_addr = addr & ~(page_size - 1);
1010 } else {
1011 /* 4 KB page */
1012 if (!(pde & PG_ACCESSED_MASK)) {
1013 pde |= PG_ACCESSED_MASK;
1014 stl_phys_notdirty(pde_addr, pde);
1016 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1017 env->a20_mask;
1018 pte = ldq_phys(pte_addr);
1019 if (!(pte & PG_PRESENT_MASK)) {
1020 error_code = 0;
1021 goto do_fault;
1023 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1024 error_code = PG_ERROR_RSVD_MASK;
1025 goto do_fault;
1027 /* combine pde and pte nx, user and rw protections */
1028 ptep &= pte ^ PG_NX_MASK;
1029 ptep ^= PG_NX_MASK;
1030 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1031 goto do_fault_protect;
1032 if (is_user) {
1033 if (!(ptep & PG_USER_MASK))
1034 goto do_fault_protect;
1035 if (is_write && !(ptep & PG_RW_MASK))
1036 goto do_fault_protect;
1037 } else {
1038 if ((env->cr[0] & CR0_WP_MASK) &&
1039 is_write && !(ptep & PG_RW_MASK))
1040 goto do_fault_protect;
1042 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1043 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1044 pte |= PG_ACCESSED_MASK;
1045 if (is_dirty)
1046 pte |= PG_DIRTY_MASK;
1047 stl_phys_notdirty(pte_addr, pte);
1049 page_size = 4096;
1050 virt_addr = addr & ~0xfff;
1051 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1053 } else {
1054 uint32_t pde;
1056 /* page directory entry */
1057 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1058 env->a20_mask;
1059 pde = ldl_phys(pde_addr);
1060 if (!(pde & PG_PRESENT_MASK)) {
1061 error_code = 0;
1062 goto do_fault;
1064 /* if PSE bit is set, then we use a 4MB page */
1065 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1066 page_size = 4096 * 1024;
1067 if (is_user) {
1068 if (!(pde & PG_USER_MASK))
1069 goto do_fault_protect;
1070 if (is_write && !(pde & PG_RW_MASK))
1071 goto do_fault_protect;
1072 } else {
1073 if ((env->cr[0] & CR0_WP_MASK) &&
1074 is_write && !(pde & PG_RW_MASK))
1075 goto do_fault_protect;
1077 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1078 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1079 pde |= PG_ACCESSED_MASK;
1080 if (is_dirty)
1081 pde |= PG_DIRTY_MASK;
1082 stl_phys_notdirty(pde_addr, pde);
1085 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1086 ptep = pte;
1087 virt_addr = addr & ~(page_size - 1);
1088 } else {
1089 if (!(pde & PG_ACCESSED_MASK)) {
1090 pde |= PG_ACCESSED_MASK;
1091 stl_phys_notdirty(pde_addr, pde);
1094 /* page directory entry */
1095 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1096 env->a20_mask;
1097 pte = ldl_phys(pte_addr);
1098 if (!(pte & PG_PRESENT_MASK)) {
1099 error_code = 0;
1100 goto do_fault;
1102 /* combine pde and pte user and rw protections */
1103 ptep = pte & pde;
1104 if (is_user) {
1105 if (!(ptep & PG_USER_MASK))
1106 goto do_fault_protect;
1107 if (is_write && !(ptep & PG_RW_MASK))
1108 goto do_fault_protect;
1109 } else {
1110 if ((env->cr[0] & CR0_WP_MASK) &&
1111 is_write && !(ptep & PG_RW_MASK))
1112 goto do_fault_protect;
1114 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1115 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1116 pte |= PG_ACCESSED_MASK;
1117 if (is_dirty)
1118 pte |= PG_DIRTY_MASK;
1119 stl_phys_notdirty(pte_addr, pte);
1121 page_size = 4096;
1122 virt_addr = addr & ~0xfff;
1125 /* the page can be put in the TLB */
1126 prot = PAGE_READ;
1127 if (!(ptep & PG_NX_MASK))
1128 prot |= PAGE_EXEC;
1129 if (pte & PG_DIRTY_MASK) {
1130 /* only set write access if already dirty... otherwise wait
1131 for dirty access */
1132 if (is_user) {
1133 if (ptep & PG_RW_MASK)
1134 prot |= PAGE_WRITE;
1135 } else {
1136 if (!(env->cr[0] & CR0_WP_MASK) ||
1137 (ptep & PG_RW_MASK))
1138 prot |= PAGE_WRITE;
1141 do_mapping:
1142 pte = pte & env->a20_mask;
1144 /* Even if 4MB pages, we map only one 4KB page in the cache to
1145 avoid filling it too fast */
1146 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1147 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1148 vaddr = virt_addr + page_offset;
1150 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1151 return ret;
1152 do_fault_protect:
1153 error_code = PG_ERROR_P_MASK;
1154 do_fault:
1155 error_code |= (is_write << PG_ERROR_W_BIT);
1156 if (is_user)
1157 error_code |= PG_ERROR_U_MASK;
1158 if (is_write1 == 2 &&
1159 (env->efer & MSR_EFER_NXE) &&
1160 (env->cr[4] & CR4_PAE_MASK))
1161 error_code |= PG_ERROR_I_D_MASK;
1162 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1163 /* cr2 is not modified in case of exceptions */
1164 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1165 addr);
1166 } else {
1167 env->cr[2] = addr;
1169 env->error_code = error_code;
1170 env->exception_index = EXCP0E_PAGE;
1171 return 1;
1174 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1176 target_ulong pde_addr, pte_addr;
1177 uint64_t pte;
1178 target_phys_addr_t paddr;
1179 uint32_t page_offset;
1180 int page_size;
1182 if (env->cr[4] & CR4_PAE_MASK) {
1183 target_ulong pdpe_addr;
1184 uint64_t pde, pdpe;
1186 #ifdef TARGET_X86_64
1187 if (env->hflags & HF_LMA_MASK) {
1188 uint64_t pml4e_addr, pml4e;
1189 int32_t sext;
1191 /* test virtual address sign extension */
1192 sext = (int64_t)addr >> 47;
1193 if (sext != 0 && sext != -1)
1194 return -1;
1196 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1197 env->a20_mask;
1198 pml4e = ldq_phys(pml4e_addr);
1199 if (!(pml4e & PG_PRESENT_MASK))
1200 return -1;
1202 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1203 env->a20_mask;
1204 pdpe = ldq_phys(pdpe_addr);
1205 if (!(pdpe & PG_PRESENT_MASK))
1206 return -1;
1207 } else
1208 #endif
1210 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1211 env->a20_mask;
1212 pdpe = ldq_phys(pdpe_addr);
1213 if (!(pdpe & PG_PRESENT_MASK))
1214 return -1;
1217 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1218 env->a20_mask;
1219 pde = ldq_phys(pde_addr);
1220 if (!(pde & PG_PRESENT_MASK)) {
1221 return -1;
1223 if (pde & PG_PSE_MASK) {
1224 /* 2 MB page */
1225 page_size = 2048 * 1024;
1226 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1227 } else {
1228 /* 4 KB page */
1229 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1230 env->a20_mask;
1231 page_size = 4096;
1232 pte = ldq_phys(pte_addr);
1234 if (!(pte & PG_PRESENT_MASK))
1235 return -1;
1236 } else {
1237 uint32_t pde;
1239 if (!(env->cr[0] & CR0_PG_MASK)) {
1240 pte = addr;
1241 page_size = 4096;
1242 } else {
1243 /* page directory entry */
1244 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1245 pde = ldl_phys(pde_addr);
1246 if (!(pde & PG_PRESENT_MASK))
1247 return -1;
1248 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1249 pte = pde & ~0x003ff000; /* align to 4MB */
1250 page_size = 4096 * 1024;
1251 } else {
1252 /* page directory entry */
1253 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1254 pte = ldl_phys(pte_addr);
1255 if (!(pte & PG_PRESENT_MASK))
1256 return -1;
1257 page_size = 4096;
1260 pte = pte & env->a20_mask;
1263 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1264 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1265 return paddr;
1267 #endif /* !CONFIG_USER_ONLY */