4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 #include "monitor/monitor.h"
26 #include "monitor/hmp-target.h"
27 #include "hw/i386/pc.h"
28 #include "sysemu/kvm.h"
32 static void print_pte(Monitor
*mon
, hwaddr addr
,
37 if (addr
& (1ULL << 47)) {
41 monitor_printf(mon
, TARGET_FMT_plx
": " TARGET_FMT_plx
42 " %c%c%c%c%c%c%c%c%c\n",
45 pte
& PG_NX_MASK
? 'X' : '-',
46 pte
& PG_GLOBAL_MASK
? 'G' : '-',
47 pte
& PG_PSE_MASK
? 'P' : '-',
48 pte
& PG_DIRTY_MASK
? 'D' : '-',
49 pte
& PG_ACCESSED_MASK
? 'A' : '-',
50 pte
& PG_PCD_MASK
? 'C' : '-',
51 pte
& PG_PWT_MASK
? 'T' : '-',
52 pte
& PG_USER_MASK
? 'U' : '-',
53 pte
& PG_RW_MASK
? 'W' : '-');
56 static void tlb_info_32(Monitor
*mon
, CPUArchState
*env
)
59 uint32_t pgd
, pde
, pte
;
61 pgd
= env
->cr
[3] & ~0xfff;
62 for(l1
= 0; l1
< 1024; l1
++) {
63 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
64 pde
= le32_to_cpu(pde
);
65 if (pde
& PG_PRESENT_MASK
) {
66 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
68 print_pte(mon
, (l1
<< 22), pde
, ~((1 << 21) - 1));
70 for(l2
= 0; l2
< 1024; l2
++) {
71 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
72 pte
= le32_to_cpu(pte
);
73 if (pte
& PG_PRESENT_MASK
) {
74 print_pte(mon
, (l1
<< 22) + (l2
<< 12),
84 static void tlb_info_pae32(Monitor
*mon
, CPUArchState
*env
)
86 unsigned int l1
, l2
, l3
;
87 uint64_t pdpe
, pde
, pte
;
88 uint64_t pdp_addr
, pd_addr
, pt_addr
;
90 pdp_addr
= env
->cr
[3] & ~0x1f;
91 for (l1
= 0; l1
< 4; l1
++) {
92 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
93 pdpe
= le64_to_cpu(pdpe
);
94 if (pdpe
& PG_PRESENT_MASK
) {
95 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
96 for (l2
= 0; l2
< 512; l2
++) {
97 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
98 pde
= le64_to_cpu(pde
);
99 if (pde
& PG_PRESENT_MASK
) {
100 if (pde
& PG_PSE_MASK
) {
101 /* 2M pages with PAE, CR4.PSE is ignored */
102 print_pte(mon
, (l1
<< 30 ) + (l2
<< 21), pde
,
103 ~((hwaddr
)(1 << 20) - 1));
105 pt_addr
= pde
& 0x3fffffffff000ULL
;
106 for (l3
= 0; l3
< 512; l3
++) {
107 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
108 pte
= le64_to_cpu(pte
);
109 if (pte
& PG_PRESENT_MASK
) {
110 print_pte(mon
, (l1
<< 30 ) + (l2
<< 21)
124 static void tlb_info_64(Monitor
*mon
, CPUArchState
*env
)
126 uint64_t l1
, l2
, l3
, l4
;
127 uint64_t pml4e
, pdpe
, pde
, pte
;
128 uint64_t pml4_addr
, pdp_addr
, pd_addr
, pt_addr
;
130 pml4_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
131 for (l1
= 0; l1
< 512; l1
++) {
132 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
133 pml4e
= le64_to_cpu(pml4e
);
134 if (pml4e
& PG_PRESENT_MASK
) {
135 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
136 for (l2
= 0; l2
< 512; l2
++) {
137 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
138 pdpe
= le64_to_cpu(pdpe
);
139 if (pdpe
& PG_PRESENT_MASK
) {
140 if (pdpe
& PG_PSE_MASK
) {
141 /* 1G pages, CR4.PSE is ignored */
142 print_pte(mon
, (l1
<< 39) + (l2
<< 30), pdpe
,
145 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
146 for (l3
= 0; l3
< 512; l3
++) {
147 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
148 pde
= le64_to_cpu(pde
);
149 if (pde
& PG_PRESENT_MASK
) {
150 if (pde
& PG_PSE_MASK
) {
151 /* 2M pages, CR4.PSE is ignored */
152 print_pte(mon
, (l1
<< 39) + (l2
<< 30) +
156 pt_addr
= pde
& 0x3fffffffff000ULL
;
157 for (l4
= 0; l4
< 512; l4
++) {
158 cpu_physical_memory_read(pt_addr
161 pte
= le64_to_cpu(pte
);
162 if (pte
& PG_PRESENT_MASK
) {
163 print_pte(mon
, (l1
<< 39) +
165 (l3
<< 21) + (l4
<< 12),
179 #endif /* TARGET_X86_64 */
181 void hmp_info_tlb(Monitor
*mon
, const QDict
*qdict
)
185 env
= mon_get_cpu_env();
187 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
188 monitor_printf(mon
, "PG disabled\n");
191 if (env
->cr
[4] & CR4_PAE_MASK
) {
193 if (env
->hflags
& HF_LMA_MASK
) {
194 tlb_info_64(mon
, env
);
198 tlb_info_pae32(mon
, env
);
201 tlb_info_32(mon
, env
);
205 static void mem_print(Monitor
*mon
, hwaddr
*pstart
,
207 hwaddr end
, int prot
)
213 monitor_printf(mon
, TARGET_FMT_plx
"-" TARGET_FMT_plx
" "
214 TARGET_FMT_plx
" %c%c%c\n",
215 *pstart
, end
, end
- *pstart
,
216 prot1
& PG_USER_MASK
? 'u' : '-',
218 prot1
& PG_RW_MASK
? 'w' : '-');
228 static void mem_info_32(Monitor
*mon
, CPUArchState
*env
)
232 uint32_t pgd
, pde
, pte
;
235 pgd
= env
->cr
[3] & ~0xfff;
238 for(l1
= 0; l1
< 1024; l1
++) {
239 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
240 pde
= le32_to_cpu(pde
);
242 if (pde
& PG_PRESENT_MASK
) {
243 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
244 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
245 mem_print(mon
, &start
, &last_prot
, end
, prot
);
247 for(l2
= 0; l2
< 1024; l2
++) {
248 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
249 pte
= le32_to_cpu(pte
);
250 end
= (l1
<< 22) + (l2
<< 12);
251 if (pte
& PG_PRESENT_MASK
) {
253 (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
257 mem_print(mon
, &start
, &last_prot
, end
, prot
);
262 mem_print(mon
, &start
, &last_prot
, end
, prot
);
265 /* Flush last range */
266 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
269 static void mem_info_pae32(Monitor
*mon
, CPUArchState
*env
)
271 unsigned int l1
, l2
, l3
;
273 uint64_t pdpe
, pde
, pte
;
274 uint64_t pdp_addr
, pd_addr
, pt_addr
;
277 pdp_addr
= env
->cr
[3] & ~0x1f;
280 for (l1
= 0; l1
< 4; l1
++) {
281 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
282 pdpe
= le64_to_cpu(pdpe
);
284 if (pdpe
& PG_PRESENT_MASK
) {
285 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
286 for (l2
= 0; l2
< 512; l2
++) {
287 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
288 pde
= le64_to_cpu(pde
);
289 end
= (l1
<< 30) + (l2
<< 21);
290 if (pde
& PG_PRESENT_MASK
) {
291 if (pde
& PG_PSE_MASK
) {
292 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
294 mem_print(mon
, &start
, &last_prot
, end
, prot
);
296 pt_addr
= pde
& 0x3fffffffff000ULL
;
297 for (l3
= 0; l3
< 512; l3
++) {
298 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
299 pte
= le64_to_cpu(pte
);
300 end
= (l1
<< 30) + (l2
<< 21) + (l3
<< 12);
301 if (pte
& PG_PRESENT_MASK
) {
302 prot
= pte
& pde
& (PG_USER_MASK
| PG_RW_MASK
|
307 mem_print(mon
, &start
, &last_prot
, end
, prot
);
312 mem_print(mon
, &start
, &last_prot
, end
, prot
);
317 mem_print(mon
, &start
, &last_prot
, end
, prot
);
320 /* Flush last range */
321 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
326 static void mem_info_64(Monitor
*mon
, CPUArchState
*env
)
329 uint64_t l1
, l2
, l3
, l4
;
330 uint64_t pml4e
, pdpe
, pde
, pte
;
331 uint64_t pml4_addr
, pdp_addr
, pd_addr
, pt_addr
, start
, end
;
333 pml4_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
336 for (l1
= 0; l1
< 512; l1
++) {
337 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
338 pml4e
= le64_to_cpu(pml4e
);
340 if (pml4e
& PG_PRESENT_MASK
) {
341 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
342 for (l2
= 0; l2
< 512; l2
++) {
343 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
344 pdpe
= le64_to_cpu(pdpe
);
345 end
= (l1
<< 39) + (l2
<< 30);
346 if (pdpe
& PG_PRESENT_MASK
) {
347 if (pdpe
& PG_PSE_MASK
) {
348 prot
= pdpe
& (PG_USER_MASK
| PG_RW_MASK
|
351 mem_print(mon
, &start
, &last_prot
, end
, prot
);
353 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
354 for (l3
= 0; l3
< 512; l3
++) {
355 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
356 pde
= le64_to_cpu(pde
);
357 end
= (l1
<< 39) + (l2
<< 30) + (l3
<< 21);
358 if (pde
& PG_PRESENT_MASK
) {
359 if (pde
& PG_PSE_MASK
) {
360 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
362 prot
&= pml4e
& pdpe
;
363 mem_print(mon
, &start
, &last_prot
, end
, prot
);
365 pt_addr
= pde
& 0x3fffffffff000ULL
;
366 for (l4
= 0; l4
< 512; l4
++) {
367 cpu_physical_memory_read(pt_addr
370 pte
= le64_to_cpu(pte
);
371 end
= (l1
<< 39) + (l2
<< 30) +
372 (l3
<< 21) + (l4
<< 12);
373 if (pte
& PG_PRESENT_MASK
) {
374 prot
= pte
& (PG_USER_MASK
| PG_RW_MASK
|
376 prot
&= pml4e
& pdpe
& pde
;
380 mem_print(mon
, &start
, &last_prot
, end
, prot
);
385 mem_print(mon
, &start
, &last_prot
, end
, prot
);
391 mem_print(mon
, &start
, &last_prot
, end
, prot
);
396 mem_print(mon
, &start
, &last_prot
, end
, prot
);
399 /* Flush last range */
400 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 48, 0);
402 #endif /* TARGET_X86_64 */
404 void hmp_info_mem(Monitor
*mon
, const QDict
*qdict
)
408 env
= mon_get_cpu_env();
410 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
411 monitor_printf(mon
, "PG disabled\n");
414 if (env
->cr
[4] & CR4_PAE_MASK
) {
416 if (env
->hflags
& HF_LMA_MASK
) {
417 mem_info_64(mon
, env
);
421 mem_info_pae32(mon
, env
);
424 mem_info_32(mon
, env
);
428 void hmp_mce(Monitor
*mon
, const QDict
*qdict
)
432 int cpu_index
= qdict_get_int(qdict
, "cpu_index");
433 int bank
= qdict_get_int(qdict
, "bank");
434 uint64_t status
= qdict_get_int(qdict
, "status");
435 uint64_t mcg_status
= qdict_get_int(qdict
, "mcg_status");
436 uint64_t addr
= qdict_get_int(qdict
, "addr");
437 uint64_t misc
= qdict_get_int(qdict
, "misc");
438 int flags
= MCE_INJECT_UNCOND_AO
;
440 if (qdict_get_try_bool(qdict
, "broadcast", false)) {
441 flags
|= MCE_INJECT_BROADCAST
;
443 cs
= qemu_get_cpu(cpu_index
);
446 cpu_x86_inject_mce(mon
, cpu
, bank
, status
, mcg_status
, addr
, misc
,
451 static target_long
monitor_get_pc(const struct MonitorDef
*md
, int val
)
453 CPUArchState
*env
= mon_get_cpu_env();
454 return env
->eip
+ env
->segs
[R_CS
].base
;
457 const MonitorDef monitor_defs
[] = {
458 #define SEG(name, seg) \
459 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
460 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
461 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
463 { "eax", offsetof(CPUX86State
, regs
[0]) },
464 { "ecx", offsetof(CPUX86State
, regs
[1]) },
465 { "edx", offsetof(CPUX86State
, regs
[2]) },
466 { "ebx", offsetof(CPUX86State
, regs
[3]) },
467 { "esp|sp", offsetof(CPUX86State
, regs
[4]) },
468 { "ebp|fp", offsetof(CPUX86State
, regs
[5]) },
469 { "esi", offsetof(CPUX86State
, regs
[6]) },
470 { "edi", offsetof(CPUX86State
, regs
[7]) },
472 { "r8", offsetof(CPUX86State
, regs
[8]) },
473 { "r9", offsetof(CPUX86State
, regs
[9]) },
474 { "r10", offsetof(CPUX86State
, regs
[10]) },
475 { "r11", offsetof(CPUX86State
, regs
[11]) },
476 { "r12", offsetof(CPUX86State
, regs
[12]) },
477 { "r13", offsetof(CPUX86State
, regs
[13]) },
478 { "r14", offsetof(CPUX86State
, regs
[14]) },
479 { "r15", offsetof(CPUX86State
, regs
[15]) },
481 { "eflags", offsetof(CPUX86State
, eflags
) },
482 { "eip", offsetof(CPUX86State
, eip
) },
489 { "pc", 0, monitor_get_pc
, },
493 const MonitorDef
*target_monitor_defs(void)
498 void hmp_info_local_apic(Monitor
*mon
, const QDict
*qdict
)
500 x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon
, monitor_fprintf
,
504 void hmp_info_io_apic(Monitor
*mon
, const QDict
*qdict
)
506 if (kvm_irqchip_in_kernel()) {
507 kvm_ioapic_dump_state(mon
, qdict
);
509 ioapic_dump_state(mon
, qdict
);