vnc: make sure we finish disconnect
[qemu/ar7.git] / target-i386 / monitor.c
blobfccfe40ab7b3d6523de4d190cc3828e716af501a
1 /*
2 * QEMU monitor
4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu/osdep.h"
25 #include "cpu.h"
26 #include "monitor/monitor.h"
27 #include "monitor/hmp-target.h"
28 #include "hw/i386/pc.h"
29 #include "sysemu/kvm.h"
30 #include "hmp.h"
33 static void print_pte(Monitor *mon, hwaddr addr,
34 hwaddr pte,
35 hwaddr mask)
37 #ifdef TARGET_X86_64
38 if (addr & (1ULL << 47)) {
39 addr |= -1LL << 48;
41 #endif
42 monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
43 " %c%c%c%c%c%c%c%c%c\n",
44 addr,
45 pte & mask,
46 pte & PG_NX_MASK ? 'X' : '-',
47 pte & PG_GLOBAL_MASK ? 'G' : '-',
48 pte & PG_PSE_MASK ? 'P' : '-',
49 pte & PG_DIRTY_MASK ? 'D' : '-',
50 pte & PG_ACCESSED_MASK ? 'A' : '-',
51 pte & PG_PCD_MASK ? 'C' : '-',
52 pte & PG_PWT_MASK ? 'T' : '-',
53 pte & PG_USER_MASK ? 'U' : '-',
54 pte & PG_RW_MASK ? 'W' : '-');
57 static void tlb_info_32(Monitor *mon, CPUArchState *env)
59 unsigned int l1, l2;
60 uint32_t pgd, pde, pte;
62 pgd = env->cr[3] & ~0xfff;
63 for(l1 = 0; l1 < 1024; l1++) {
64 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
65 pde = le32_to_cpu(pde);
66 if (pde & PG_PRESENT_MASK) {
67 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
68 /* 4M pages */
69 print_pte(mon, (l1 << 22), pde, ~((1 << 21) - 1));
70 } else {
71 for(l2 = 0; l2 < 1024; l2++) {
72 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
73 pte = le32_to_cpu(pte);
74 if (pte & PG_PRESENT_MASK) {
75 print_pte(mon, (l1 << 22) + (l2 << 12),
76 pte & ~PG_PSE_MASK,
77 ~0xfff);
85 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
87 unsigned int l1, l2, l3;
88 uint64_t pdpe, pde, pte;
89 uint64_t pdp_addr, pd_addr, pt_addr;
91 pdp_addr = env->cr[3] & ~0x1f;
92 for (l1 = 0; l1 < 4; l1++) {
93 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
94 pdpe = le64_to_cpu(pdpe);
95 if (pdpe & PG_PRESENT_MASK) {
96 pd_addr = pdpe & 0x3fffffffff000ULL;
97 for (l2 = 0; l2 < 512; l2++) {
98 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
99 pde = le64_to_cpu(pde);
100 if (pde & PG_PRESENT_MASK) {
101 if (pde & PG_PSE_MASK) {
102 /* 2M pages with PAE, CR4.PSE is ignored */
103 print_pte(mon, (l1 << 30 ) + (l2 << 21), pde,
104 ~((hwaddr)(1 << 20) - 1));
105 } else {
106 pt_addr = pde & 0x3fffffffff000ULL;
107 for (l3 = 0; l3 < 512; l3++) {
108 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
109 pte = le64_to_cpu(pte);
110 if (pte & PG_PRESENT_MASK) {
111 print_pte(mon, (l1 << 30 ) + (l2 << 21)
112 + (l3 << 12),
113 pte & ~PG_PSE_MASK,
114 ~(hwaddr)0xfff);
124 #ifdef TARGET_X86_64
125 static void tlb_info_64(Monitor *mon, CPUArchState *env)
127 uint64_t l1, l2, l3, l4;
128 uint64_t pml4e, pdpe, pde, pte;
129 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr;
131 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
132 for (l1 = 0; l1 < 512; l1++) {
133 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
134 pml4e = le64_to_cpu(pml4e);
135 if (pml4e & PG_PRESENT_MASK) {
136 pdp_addr = pml4e & 0x3fffffffff000ULL;
137 for (l2 = 0; l2 < 512; l2++) {
138 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
139 pdpe = le64_to_cpu(pdpe);
140 if (pdpe & PG_PRESENT_MASK) {
141 if (pdpe & PG_PSE_MASK) {
142 /* 1G pages, CR4.PSE is ignored */
143 print_pte(mon, (l1 << 39) + (l2 << 30), pdpe,
144 0x3ffffc0000000ULL);
145 } else {
146 pd_addr = pdpe & 0x3fffffffff000ULL;
147 for (l3 = 0; l3 < 512; l3++) {
148 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
149 pde = le64_to_cpu(pde);
150 if (pde & PG_PRESENT_MASK) {
151 if (pde & PG_PSE_MASK) {
152 /* 2M pages, CR4.PSE is ignored */
153 print_pte(mon, (l1 << 39) + (l2 << 30) +
154 (l3 << 21), pde,
155 0x3ffffffe00000ULL);
156 } else {
157 pt_addr = pde & 0x3fffffffff000ULL;
158 for (l4 = 0; l4 < 512; l4++) {
159 cpu_physical_memory_read(pt_addr
160 + l4 * 8,
161 &pte, 8);
162 pte = le64_to_cpu(pte);
163 if (pte & PG_PRESENT_MASK) {
164 print_pte(mon, (l1 << 39) +
165 (l2 << 30) +
166 (l3 << 21) + (l4 << 12),
167 pte & ~PG_PSE_MASK,
168 0x3fffffffff000ULL);
180 #endif /* TARGET_X86_64 */
182 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
184 CPUArchState *env;
186 env = mon_get_cpu_env();
188 if (!(env->cr[0] & CR0_PG_MASK)) {
189 monitor_printf(mon, "PG disabled\n");
190 return;
192 if (env->cr[4] & CR4_PAE_MASK) {
193 #ifdef TARGET_X86_64
194 if (env->hflags & HF_LMA_MASK) {
195 tlb_info_64(mon, env);
196 } else
197 #endif
199 tlb_info_pae32(mon, env);
201 } else {
202 tlb_info_32(mon, env);
206 static void mem_print(Monitor *mon, hwaddr *pstart,
207 int *plast_prot,
208 hwaddr end, int prot)
210 int prot1;
211 prot1 = *plast_prot;
212 if (prot != prot1) {
213 if (*pstart != -1) {
214 monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
215 TARGET_FMT_plx " %c%c%c\n",
216 *pstart, end, end - *pstart,
217 prot1 & PG_USER_MASK ? 'u' : '-',
218 'r',
219 prot1 & PG_RW_MASK ? 'w' : '-');
221 if (prot != 0)
222 *pstart = end;
223 else
224 *pstart = -1;
225 *plast_prot = prot;
229 static void mem_info_32(Monitor *mon, CPUArchState *env)
231 unsigned int l1, l2;
232 int prot, last_prot;
233 uint32_t pgd, pde, pte;
234 hwaddr start, end;
236 pgd = env->cr[3] & ~0xfff;
237 last_prot = 0;
238 start = -1;
239 for(l1 = 0; l1 < 1024; l1++) {
240 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
241 pde = le32_to_cpu(pde);
242 end = l1 << 22;
243 if (pde & PG_PRESENT_MASK) {
244 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
245 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
246 mem_print(mon, &start, &last_prot, end, prot);
247 } else {
248 for(l2 = 0; l2 < 1024; l2++) {
249 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
250 pte = le32_to_cpu(pte);
251 end = (l1 << 22) + (l2 << 12);
252 if (pte & PG_PRESENT_MASK) {
253 prot = pte & pde &
254 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
255 } else {
256 prot = 0;
258 mem_print(mon, &start, &last_prot, end, prot);
261 } else {
262 prot = 0;
263 mem_print(mon, &start, &last_prot, end, prot);
266 /* Flush last range */
267 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
270 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
272 unsigned int l1, l2, l3;
273 int prot, last_prot;
274 uint64_t pdpe, pde, pte;
275 uint64_t pdp_addr, pd_addr, pt_addr;
276 hwaddr start, end;
278 pdp_addr = env->cr[3] & ~0x1f;
279 last_prot = 0;
280 start = -1;
281 for (l1 = 0; l1 < 4; l1++) {
282 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
283 pdpe = le64_to_cpu(pdpe);
284 end = l1 << 30;
285 if (pdpe & PG_PRESENT_MASK) {
286 pd_addr = pdpe & 0x3fffffffff000ULL;
287 for (l2 = 0; l2 < 512; l2++) {
288 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
289 pde = le64_to_cpu(pde);
290 end = (l1 << 30) + (l2 << 21);
291 if (pde & PG_PRESENT_MASK) {
292 if (pde & PG_PSE_MASK) {
293 prot = pde & (PG_USER_MASK | PG_RW_MASK |
294 PG_PRESENT_MASK);
295 mem_print(mon, &start, &last_prot, end, prot);
296 } else {
297 pt_addr = pde & 0x3fffffffff000ULL;
298 for (l3 = 0; l3 < 512; l3++) {
299 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
300 pte = le64_to_cpu(pte);
301 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
302 if (pte & PG_PRESENT_MASK) {
303 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
304 PG_PRESENT_MASK);
305 } else {
306 prot = 0;
308 mem_print(mon, &start, &last_prot, end, prot);
311 } else {
312 prot = 0;
313 mem_print(mon, &start, &last_prot, end, prot);
316 } else {
317 prot = 0;
318 mem_print(mon, &start, &last_prot, end, prot);
321 /* Flush last range */
322 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
326 #ifdef TARGET_X86_64
327 static void mem_info_64(Monitor *mon, CPUArchState *env)
329 int prot, last_prot;
330 uint64_t l1, l2, l3, l4;
331 uint64_t pml4e, pdpe, pde, pte;
332 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
334 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
335 last_prot = 0;
336 start = -1;
337 for (l1 = 0; l1 < 512; l1++) {
338 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
339 pml4e = le64_to_cpu(pml4e);
340 end = l1 << 39;
341 if (pml4e & PG_PRESENT_MASK) {
342 pdp_addr = pml4e & 0x3fffffffff000ULL;
343 for (l2 = 0; l2 < 512; l2++) {
344 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
345 pdpe = le64_to_cpu(pdpe);
346 end = (l1 << 39) + (l2 << 30);
347 if (pdpe & PG_PRESENT_MASK) {
348 if (pdpe & PG_PSE_MASK) {
349 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
350 PG_PRESENT_MASK);
351 prot &= pml4e;
352 mem_print(mon, &start, &last_prot, end, prot);
353 } else {
354 pd_addr = pdpe & 0x3fffffffff000ULL;
355 for (l3 = 0; l3 < 512; l3++) {
356 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
357 pde = le64_to_cpu(pde);
358 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
359 if (pde & PG_PRESENT_MASK) {
360 if (pde & PG_PSE_MASK) {
361 prot = pde & (PG_USER_MASK | PG_RW_MASK |
362 PG_PRESENT_MASK);
363 prot &= pml4e & pdpe;
364 mem_print(mon, &start, &last_prot, end, prot);
365 } else {
366 pt_addr = pde & 0x3fffffffff000ULL;
367 for (l4 = 0; l4 < 512; l4++) {
368 cpu_physical_memory_read(pt_addr
369 + l4 * 8,
370 &pte, 8);
371 pte = le64_to_cpu(pte);
372 end = (l1 << 39) + (l2 << 30) +
373 (l3 << 21) + (l4 << 12);
374 if (pte & PG_PRESENT_MASK) {
375 prot = pte & (PG_USER_MASK | PG_RW_MASK |
376 PG_PRESENT_MASK);
377 prot &= pml4e & pdpe & pde;
378 } else {
379 prot = 0;
381 mem_print(mon, &start, &last_prot, end, prot);
384 } else {
385 prot = 0;
386 mem_print(mon, &start, &last_prot, end, prot);
390 } else {
391 prot = 0;
392 mem_print(mon, &start, &last_prot, end, prot);
395 } else {
396 prot = 0;
397 mem_print(mon, &start, &last_prot, end, prot);
400 /* Flush last range */
401 mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
403 #endif /* TARGET_X86_64 */
405 void hmp_info_mem(Monitor *mon, const QDict *qdict)
407 CPUArchState *env;
409 env = mon_get_cpu_env();
411 if (!(env->cr[0] & CR0_PG_MASK)) {
412 monitor_printf(mon, "PG disabled\n");
413 return;
415 if (env->cr[4] & CR4_PAE_MASK) {
416 #ifdef TARGET_X86_64
417 if (env->hflags & HF_LMA_MASK) {
418 mem_info_64(mon, env);
419 } else
420 #endif
422 mem_info_pae32(mon, env);
424 } else {
425 mem_info_32(mon, env);
429 void hmp_mce(Monitor *mon, const QDict *qdict)
431 X86CPU *cpu;
432 CPUState *cs;
433 int cpu_index = qdict_get_int(qdict, "cpu_index");
434 int bank = qdict_get_int(qdict, "bank");
435 uint64_t status = qdict_get_int(qdict, "status");
436 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
437 uint64_t addr = qdict_get_int(qdict, "addr");
438 uint64_t misc = qdict_get_int(qdict, "misc");
439 int flags = MCE_INJECT_UNCOND_AO;
441 if (qdict_get_try_bool(qdict, "broadcast", false)) {
442 flags |= MCE_INJECT_BROADCAST;
444 cs = qemu_get_cpu(cpu_index);
445 if (cs != NULL) {
446 cpu = X86_CPU(cs);
447 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
448 flags);
452 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
454 CPUArchState *env = mon_get_cpu_env();
455 return env->eip + env->segs[R_CS].base;
458 const MonitorDef monitor_defs[] = {
459 #define SEG(name, seg) \
460 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
461 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
462 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
464 { "eax", offsetof(CPUX86State, regs[0]) },
465 { "ecx", offsetof(CPUX86State, regs[1]) },
466 { "edx", offsetof(CPUX86State, regs[2]) },
467 { "ebx", offsetof(CPUX86State, regs[3]) },
468 { "esp|sp", offsetof(CPUX86State, regs[4]) },
469 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
470 { "esi", offsetof(CPUX86State, regs[6]) },
471 { "edi", offsetof(CPUX86State, regs[7]) },
472 #ifdef TARGET_X86_64
473 { "r8", offsetof(CPUX86State, regs[8]) },
474 { "r9", offsetof(CPUX86State, regs[9]) },
475 { "r10", offsetof(CPUX86State, regs[10]) },
476 { "r11", offsetof(CPUX86State, regs[11]) },
477 { "r12", offsetof(CPUX86State, regs[12]) },
478 { "r13", offsetof(CPUX86State, regs[13]) },
479 { "r14", offsetof(CPUX86State, regs[14]) },
480 { "r15", offsetof(CPUX86State, regs[15]) },
481 #endif
482 { "eflags", offsetof(CPUX86State, eflags) },
483 { "eip", offsetof(CPUX86State, eip) },
484 SEG("cs", R_CS)
485 SEG("ds", R_DS)
486 SEG("es", R_ES)
487 SEG("ss", R_SS)
488 SEG("fs", R_FS)
489 SEG("gs", R_GS)
490 { "pc", 0, monitor_get_pc, },
491 { NULL },
494 const MonitorDef *target_monitor_defs(void)
496 return monitor_defs;
499 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
501 x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon, monitor_fprintf,
502 CPU_DUMP_FPU);
505 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
507 if (kvm_irqchip_in_kernel()) {
508 kvm_ioapic_dump_state(mon, qdict);
509 } else {
510 ioapic_dump_state(mon, qdict);