4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
24 #include "qemu/osdep.h"
26 #include "monitor/monitor.h"
27 #include "monitor/hmp-target.h"
28 #include "hw/i386/pc.h"
29 #include "sysemu/kvm.h"
33 static void print_pte(Monitor
*mon
, hwaddr addr
,
38 if (addr
& (1ULL << 47)) {
42 monitor_printf(mon
, TARGET_FMT_plx
": " TARGET_FMT_plx
43 " %c%c%c%c%c%c%c%c%c\n",
46 pte
& PG_NX_MASK
? 'X' : '-',
47 pte
& PG_GLOBAL_MASK
? 'G' : '-',
48 pte
& PG_PSE_MASK
? 'P' : '-',
49 pte
& PG_DIRTY_MASK
? 'D' : '-',
50 pte
& PG_ACCESSED_MASK
? 'A' : '-',
51 pte
& PG_PCD_MASK
? 'C' : '-',
52 pte
& PG_PWT_MASK
? 'T' : '-',
53 pte
& PG_USER_MASK
? 'U' : '-',
54 pte
& PG_RW_MASK
? 'W' : '-');
57 static void tlb_info_32(Monitor
*mon
, CPUArchState
*env
)
60 uint32_t pgd
, pde
, pte
;
62 pgd
= env
->cr
[3] & ~0xfff;
63 for(l1
= 0; l1
< 1024; l1
++) {
64 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
65 pde
= le32_to_cpu(pde
);
66 if (pde
& PG_PRESENT_MASK
) {
67 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
69 print_pte(mon
, (l1
<< 22), pde
, ~((1 << 21) - 1));
71 for(l2
= 0; l2
< 1024; l2
++) {
72 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
73 pte
= le32_to_cpu(pte
);
74 if (pte
& PG_PRESENT_MASK
) {
75 print_pte(mon
, (l1
<< 22) + (l2
<< 12),
85 static void tlb_info_pae32(Monitor
*mon
, CPUArchState
*env
)
87 unsigned int l1
, l2
, l3
;
88 uint64_t pdpe
, pde
, pte
;
89 uint64_t pdp_addr
, pd_addr
, pt_addr
;
91 pdp_addr
= env
->cr
[3] & ~0x1f;
92 for (l1
= 0; l1
< 4; l1
++) {
93 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
94 pdpe
= le64_to_cpu(pdpe
);
95 if (pdpe
& PG_PRESENT_MASK
) {
96 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
97 for (l2
= 0; l2
< 512; l2
++) {
98 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
99 pde
= le64_to_cpu(pde
);
100 if (pde
& PG_PRESENT_MASK
) {
101 if (pde
& PG_PSE_MASK
) {
102 /* 2M pages with PAE, CR4.PSE is ignored */
103 print_pte(mon
, (l1
<< 30 ) + (l2
<< 21), pde
,
104 ~((hwaddr
)(1 << 20) - 1));
106 pt_addr
= pde
& 0x3fffffffff000ULL
;
107 for (l3
= 0; l3
< 512; l3
++) {
108 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
109 pte
= le64_to_cpu(pte
);
110 if (pte
& PG_PRESENT_MASK
) {
111 print_pte(mon
, (l1
<< 30 ) + (l2
<< 21)
125 static void tlb_info_64(Monitor
*mon
, CPUArchState
*env
)
127 uint64_t l1
, l2
, l3
, l4
;
128 uint64_t pml4e
, pdpe
, pde
, pte
;
129 uint64_t pml4_addr
, pdp_addr
, pd_addr
, pt_addr
;
131 pml4_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
132 for (l1
= 0; l1
< 512; l1
++) {
133 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
134 pml4e
= le64_to_cpu(pml4e
);
135 if (pml4e
& PG_PRESENT_MASK
) {
136 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
137 for (l2
= 0; l2
< 512; l2
++) {
138 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
139 pdpe
= le64_to_cpu(pdpe
);
140 if (pdpe
& PG_PRESENT_MASK
) {
141 if (pdpe
& PG_PSE_MASK
) {
142 /* 1G pages, CR4.PSE is ignored */
143 print_pte(mon
, (l1
<< 39) + (l2
<< 30), pdpe
,
146 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
147 for (l3
= 0; l3
< 512; l3
++) {
148 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
149 pde
= le64_to_cpu(pde
);
150 if (pde
& PG_PRESENT_MASK
) {
151 if (pde
& PG_PSE_MASK
) {
152 /* 2M pages, CR4.PSE is ignored */
153 print_pte(mon
, (l1
<< 39) + (l2
<< 30) +
157 pt_addr
= pde
& 0x3fffffffff000ULL
;
158 for (l4
= 0; l4
< 512; l4
++) {
159 cpu_physical_memory_read(pt_addr
162 pte
= le64_to_cpu(pte
);
163 if (pte
& PG_PRESENT_MASK
) {
164 print_pte(mon
, (l1
<< 39) +
166 (l3
<< 21) + (l4
<< 12),
180 #endif /* TARGET_X86_64 */
182 void hmp_info_tlb(Monitor
*mon
, const QDict
*qdict
)
186 env
= mon_get_cpu_env();
188 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
189 monitor_printf(mon
, "PG disabled\n");
192 if (env
->cr
[4] & CR4_PAE_MASK
) {
194 if (env
->hflags
& HF_LMA_MASK
) {
195 tlb_info_64(mon
, env
);
199 tlb_info_pae32(mon
, env
);
202 tlb_info_32(mon
, env
);
206 static void mem_print(Monitor
*mon
, hwaddr
*pstart
,
208 hwaddr end
, int prot
)
214 monitor_printf(mon
, TARGET_FMT_plx
"-" TARGET_FMT_plx
" "
215 TARGET_FMT_plx
" %c%c%c\n",
216 *pstart
, end
, end
- *pstart
,
217 prot1
& PG_USER_MASK
? 'u' : '-',
219 prot1
& PG_RW_MASK
? 'w' : '-');
229 static void mem_info_32(Monitor
*mon
, CPUArchState
*env
)
233 uint32_t pgd
, pde
, pte
;
236 pgd
= env
->cr
[3] & ~0xfff;
239 for(l1
= 0; l1
< 1024; l1
++) {
240 cpu_physical_memory_read(pgd
+ l1
* 4, &pde
, 4);
241 pde
= le32_to_cpu(pde
);
243 if (pde
& PG_PRESENT_MASK
) {
244 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
245 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
246 mem_print(mon
, &start
, &last_prot
, end
, prot
);
248 for(l2
= 0; l2
< 1024; l2
++) {
249 cpu_physical_memory_read((pde
& ~0xfff) + l2
* 4, &pte
, 4);
250 pte
= le32_to_cpu(pte
);
251 end
= (l1
<< 22) + (l2
<< 12);
252 if (pte
& PG_PRESENT_MASK
) {
254 (PG_USER_MASK
| PG_RW_MASK
| PG_PRESENT_MASK
);
258 mem_print(mon
, &start
, &last_prot
, end
, prot
);
263 mem_print(mon
, &start
, &last_prot
, end
, prot
);
266 /* Flush last range */
267 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
270 static void mem_info_pae32(Monitor
*mon
, CPUArchState
*env
)
272 unsigned int l1
, l2
, l3
;
274 uint64_t pdpe
, pde
, pte
;
275 uint64_t pdp_addr
, pd_addr
, pt_addr
;
278 pdp_addr
= env
->cr
[3] & ~0x1f;
281 for (l1
= 0; l1
< 4; l1
++) {
282 cpu_physical_memory_read(pdp_addr
+ l1
* 8, &pdpe
, 8);
283 pdpe
= le64_to_cpu(pdpe
);
285 if (pdpe
& PG_PRESENT_MASK
) {
286 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
287 for (l2
= 0; l2
< 512; l2
++) {
288 cpu_physical_memory_read(pd_addr
+ l2
* 8, &pde
, 8);
289 pde
= le64_to_cpu(pde
);
290 end
= (l1
<< 30) + (l2
<< 21);
291 if (pde
& PG_PRESENT_MASK
) {
292 if (pde
& PG_PSE_MASK
) {
293 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
295 mem_print(mon
, &start
, &last_prot
, end
, prot
);
297 pt_addr
= pde
& 0x3fffffffff000ULL
;
298 for (l3
= 0; l3
< 512; l3
++) {
299 cpu_physical_memory_read(pt_addr
+ l3
* 8, &pte
, 8);
300 pte
= le64_to_cpu(pte
);
301 end
= (l1
<< 30) + (l2
<< 21) + (l3
<< 12);
302 if (pte
& PG_PRESENT_MASK
) {
303 prot
= pte
& pde
& (PG_USER_MASK
| PG_RW_MASK
|
308 mem_print(mon
, &start
, &last_prot
, end
, prot
);
313 mem_print(mon
, &start
, &last_prot
, end
, prot
);
318 mem_print(mon
, &start
, &last_prot
, end
, prot
);
321 /* Flush last range */
322 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 32, 0);
327 static void mem_info_64(Monitor
*mon
, CPUArchState
*env
)
330 uint64_t l1
, l2
, l3
, l4
;
331 uint64_t pml4e
, pdpe
, pde
, pte
;
332 uint64_t pml4_addr
, pdp_addr
, pd_addr
, pt_addr
, start
, end
;
334 pml4_addr
= env
->cr
[3] & 0x3fffffffff000ULL
;
337 for (l1
= 0; l1
< 512; l1
++) {
338 cpu_physical_memory_read(pml4_addr
+ l1
* 8, &pml4e
, 8);
339 pml4e
= le64_to_cpu(pml4e
);
341 if (pml4e
& PG_PRESENT_MASK
) {
342 pdp_addr
= pml4e
& 0x3fffffffff000ULL
;
343 for (l2
= 0; l2
< 512; l2
++) {
344 cpu_physical_memory_read(pdp_addr
+ l2
* 8, &pdpe
, 8);
345 pdpe
= le64_to_cpu(pdpe
);
346 end
= (l1
<< 39) + (l2
<< 30);
347 if (pdpe
& PG_PRESENT_MASK
) {
348 if (pdpe
& PG_PSE_MASK
) {
349 prot
= pdpe
& (PG_USER_MASK
| PG_RW_MASK
|
352 mem_print(mon
, &start
, &last_prot
, end
, prot
);
354 pd_addr
= pdpe
& 0x3fffffffff000ULL
;
355 for (l3
= 0; l3
< 512; l3
++) {
356 cpu_physical_memory_read(pd_addr
+ l3
* 8, &pde
, 8);
357 pde
= le64_to_cpu(pde
);
358 end
= (l1
<< 39) + (l2
<< 30) + (l3
<< 21);
359 if (pde
& PG_PRESENT_MASK
) {
360 if (pde
& PG_PSE_MASK
) {
361 prot
= pde
& (PG_USER_MASK
| PG_RW_MASK
|
363 prot
&= pml4e
& pdpe
;
364 mem_print(mon
, &start
, &last_prot
, end
, prot
);
366 pt_addr
= pde
& 0x3fffffffff000ULL
;
367 for (l4
= 0; l4
< 512; l4
++) {
368 cpu_physical_memory_read(pt_addr
371 pte
= le64_to_cpu(pte
);
372 end
= (l1
<< 39) + (l2
<< 30) +
373 (l3
<< 21) + (l4
<< 12);
374 if (pte
& PG_PRESENT_MASK
) {
375 prot
= pte
& (PG_USER_MASK
| PG_RW_MASK
|
377 prot
&= pml4e
& pdpe
& pde
;
381 mem_print(mon
, &start
, &last_prot
, end
, prot
);
386 mem_print(mon
, &start
, &last_prot
, end
, prot
);
392 mem_print(mon
, &start
, &last_prot
, end
, prot
);
397 mem_print(mon
, &start
, &last_prot
, end
, prot
);
400 /* Flush last range */
401 mem_print(mon
, &start
, &last_prot
, (hwaddr
)1 << 48, 0);
403 #endif /* TARGET_X86_64 */
405 void hmp_info_mem(Monitor
*mon
, const QDict
*qdict
)
409 env
= mon_get_cpu_env();
411 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
412 monitor_printf(mon
, "PG disabled\n");
415 if (env
->cr
[4] & CR4_PAE_MASK
) {
417 if (env
->hflags
& HF_LMA_MASK
) {
418 mem_info_64(mon
, env
);
422 mem_info_pae32(mon
, env
);
425 mem_info_32(mon
, env
);
429 void hmp_mce(Monitor
*mon
, const QDict
*qdict
)
433 int cpu_index
= qdict_get_int(qdict
, "cpu_index");
434 int bank
= qdict_get_int(qdict
, "bank");
435 uint64_t status
= qdict_get_int(qdict
, "status");
436 uint64_t mcg_status
= qdict_get_int(qdict
, "mcg_status");
437 uint64_t addr
= qdict_get_int(qdict
, "addr");
438 uint64_t misc
= qdict_get_int(qdict
, "misc");
439 int flags
= MCE_INJECT_UNCOND_AO
;
441 if (qdict_get_try_bool(qdict
, "broadcast", false)) {
442 flags
|= MCE_INJECT_BROADCAST
;
444 cs
= qemu_get_cpu(cpu_index
);
447 cpu_x86_inject_mce(mon
, cpu
, bank
, status
, mcg_status
, addr
, misc
,
452 static target_long
monitor_get_pc(const struct MonitorDef
*md
, int val
)
454 CPUArchState
*env
= mon_get_cpu_env();
455 return env
->eip
+ env
->segs
[R_CS
].base
;
458 const MonitorDef monitor_defs
[] = {
459 #define SEG(name, seg) \
460 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
461 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
462 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
464 { "eax", offsetof(CPUX86State
, regs
[0]) },
465 { "ecx", offsetof(CPUX86State
, regs
[1]) },
466 { "edx", offsetof(CPUX86State
, regs
[2]) },
467 { "ebx", offsetof(CPUX86State
, regs
[3]) },
468 { "esp|sp", offsetof(CPUX86State
, regs
[4]) },
469 { "ebp|fp", offsetof(CPUX86State
, regs
[5]) },
470 { "esi", offsetof(CPUX86State
, regs
[6]) },
471 { "edi", offsetof(CPUX86State
, regs
[7]) },
473 { "r8", offsetof(CPUX86State
, regs
[8]) },
474 { "r9", offsetof(CPUX86State
, regs
[9]) },
475 { "r10", offsetof(CPUX86State
, regs
[10]) },
476 { "r11", offsetof(CPUX86State
, regs
[11]) },
477 { "r12", offsetof(CPUX86State
, regs
[12]) },
478 { "r13", offsetof(CPUX86State
, regs
[13]) },
479 { "r14", offsetof(CPUX86State
, regs
[14]) },
480 { "r15", offsetof(CPUX86State
, regs
[15]) },
482 { "eflags", offsetof(CPUX86State
, eflags
) },
483 { "eip", offsetof(CPUX86State
, eip
) },
490 { "pc", 0, monitor_get_pc
, },
494 const MonitorDef
*target_monitor_defs(void)
499 void hmp_info_local_apic(Monitor
*mon
, const QDict
*qdict
)
501 x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon
, monitor_fprintf
,
505 void hmp_info_io_apic(Monitor
*mon
, const QDict
*qdict
)
507 if (kvm_irqchip_in_kernel()) {
508 kvm_ioapic_dump_state(mon
, qdict
);
510 ioapic_dump_state(mon
, qdict
);