qemu-gdb: extract parts of "qemu coroutine" implementation
[qemu.git] / target-i386 / monitor.c
blobaac6b1ba8e475cd3e528bac05fe0a14f0b109a5e
1 /*
2 * QEMU monitor
4 * Copyright (c) 2003-2004 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "cpu.h"
25 #include "monitor/monitor.h"
26 #include "monitor/hmp-target.h"
27 #include "hw/i386/pc.h"
28 #include "sysemu/kvm.h"
29 #include "hmp.h"
32 static void print_pte(Monitor *mon, hwaddr addr,
33 hwaddr pte,
34 hwaddr mask)
36 #ifdef TARGET_X86_64
37 if (addr & (1ULL << 47)) {
38 addr |= -1LL << 48;
40 #endif
41 monitor_printf(mon, TARGET_FMT_plx ": " TARGET_FMT_plx
42 " %c%c%c%c%c%c%c%c%c\n",
43 addr,
44 pte & mask,
45 pte & PG_NX_MASK ? 'X' : '-',
46 pte & PG_GLOBAL_MASK ? 'G' : '-',
47 pte & PG_PSE_MASK ? 'P' : '-',
48 pte & PG_DIRTY_MASK ? 'D' : '-',
49 pte & PG_ACCESSED_MASK ? 'A' : '-',
50 pte & PG_PCD_MASK ? 'C' : '-',
51 pte & PG_PWT_MASK ? 'T' : '-',
52 pte & PG_USER_MASK ? 'U' : '-',
53 pte & PG_RW_MASK ? 'W' : '-');
56 static void tlb_info_32(Monitor *mon, CPUArchState *env)
58 unsigned int l1, l2;
59 uint32_t pgd, pde, pte;
61 pgd = env->cr[3] & ~0xfff;
62 for(l1 = 0; l1 < 1024; l1++) {
63 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
64 pde = le32_to_cpu(pde);
65 if (pde & PG_PRESENT_MASK) {
66 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
67 /* 4M pages */
68 print_pte(mon, (l1 << 22), pde, ~((1 << 21) - 1));
69 } else {
70 for(l2 = 0; l2 < 1024; l2++) {
71 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
72 pte = le32_to_cpu(pte);
73 if (pte & PG_PRESENT_MASK) {
74 print_pte(mon, (l1 << 22) + (l2 << 12),
75 pte & ~PG_PSE_MASK,
76 ~0xfff);
84 static void tlb_info_pae32(Monitor *mon, CPUArchState *env)
86 unsigned int l1, l2, l3;
87 uint64_t pdpe, pde, pte;
88 uint64_t pdp_addr, pd_addr, pt_addr;
90 pdp_addr = env->cr[3] & ~0x1f;
91 for (l1 = 0; l1 < 4; l1++) {
92 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
93 pdpe = le64_to_cpu(pdpe);
94 if (pdpe & PG_PRESENT_MASK) {
95 pd_addr = pdpe & 0x3fffffffff000ULL;
96 for (l2 = 0; l2 < 512; l2++) {
97 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
98 pde = le64_to_cpu(pde);
99 if (pde & PG_PRESENT_MASK) {
100 if (pde & PG_PSE_MASK) {
101 /* 2M pages with PAE, CR4.PSE is ignored */
102 print_pte(mon, (l1 << 30 ) + (l2 << 21), pde,
103 ~((hwaddr)(1 << 20) - 1));
104 } else {
105 pt_addr = pde & 0x3fffffffff000ULL;
106 for (l3 = 0; l3 < 512; l3++) {
107 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
108 pte = le64_to_cpu(pte);
109 if (pte & PG_PRESENT_MASK) {
110 print_pte(mon, (l1 << 30 ) + (l2 << 21)
111 + (l3 << 12),
112 pte & ~PG_PSE_MASK,
113 ~(hwaddr)0xfff);
123 #ifdef TARGET_X86_64
124 static void tlb_info_64(Monitor *mon, CPUArchState *env)
126 uint64_t l1, l2, l3, l4;
127 uint64_t pml4e, pdpe, pde, pte;
128 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr;
130 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
131 for (l1 = 0; l1 < 512; l1++) {
132 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
133 pml4e = le64_to_cpu(pml4e);
134 if (pml4e & PG_PRESENT_MASK) {
135 pdp_addr = pml4e & 0x3fffffffff000ULL;
136 for (l2 = 0; l2 < 512; l2++) {
137 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
138 pdpe = le64_to_cpu(pdpe);
139 if (pdpe & PG_PRESENT_MASK) {
140 if (pdpe & PG_PSE_MASK) {
141 /* 1G pages, CR4.PSE is ignored */
142 print_pte(mon, (l1 << 39) + (l2 << 30), pdpe,
143 0x3ffffc0000000ULL);
144 } else {
145 pd_addr = pdpe & 0x3fffffffff000ULL;
146 for (l3 = 0; l3 < 512; l3++) {
147 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
148 pde = le64_to_cpu(pde);
149 if (pde & PG_PRESENT_MASK) {
150 if (pde & PG_PSE_MASK) {
151 /* 2M pages, CR4.PSE is ignored */
152 print_pte(mon, (l1 << 39) + (l2 << 30) +
153 (l3 << 21), pde,
154 0x3ffffffe00000ULL);
155 } else {
156 pt_addr = pde & 0x3fffffffff000ULL;
157 for (l4 = 0; l4 < 512; l4++) {
158 cpu_physical_memory_read(pt_addr
159 + l4 * 8,
160 &pte, 8);
161 pte = le64_to_cpu(pte);
162 if (pte & PG_PRESENT_MASK) {
163 print_pte(mon, (l1 << 39) +
164 (l2 << 30) +
165 (l3 << 21) + (l4 << 12),
166 pte & ~PG_PSE_MASK,
167 0x3fffffffff000ULL);
179 #endif /* TARGET_X86_64 */
181 void hmp_info_tlb(Monitor *mon, const QDict *qdict)
183 CPUArchState *env;
185 env = mon_get_cpu_env();
187 if (!(env->cr[0] & CR0_PG_MASK)) {
188 monitor_printf(mon, "PG disabled\n");
189 return;
191 if (env->cr[4] & CR4_PAE_MASK) {
192 #ifdef TARGET_X86_64
193 if (env->hflags & HF_LMA_MASK) {
194 tlb_info_64(mon, env);
195 } else
196 #endif
198 tlb_info_pae32(mon, env);
200 } else {
201 tlb_info_32(mon, env);
205 static void mem_print(Monitor *mon, hwaddr *pstart,
206 int *plast_prot,
207 hwaddr end, int prot)
209 int prot1;
210 prot1 = *plast_prot;
211 if (prot != prot1) {
212 if (*pstart != -1) {
213 monitor_printf(mon, TARGET_FMT_plx "-" TARGET_FMT_plx " "
214 TARGET_FMT_plx " %c%c%c\n",
215 *pstart, end, end - *pstart,
216 prot1 & PG_USER_MASK ? 'u' : '-',
217 'r',
218 prot1 & PG_RW_MASK ? 'w' : '-');
220 if (prot != 0)
221 *pstart = end;
222 else
223 *pstart = -1;
224 *plast_prot = prot;
228 static void mem_info_32(Monitor *mon, CPUArchState *env)
230 unsigned int l1, l2;
231 int prot, last_prot;
232 uint32_t pgd, pde, pte;
233 hwaddr start, end;
235 pgd = env->cr[3] & ~0xfff;
236 last_prot = 0;
237 start = -1;
238 for(l1 = 0; l1 < 1024; l1++) {
239 cpu_physical_memory_read(pgd + l1 * 4, &pde, 4);
240 pde = le32_to_cpu(pde);
241 end = l1 << 22;
242 if (pde & PG_PRESENT_MASK) {
243 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
244 prot = pde & (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
245 mem_print(mon, &start, &last_prot, end, prot);
246 } else {
247 for(l2 = 0; l2 < 1024; l2++) {
248 cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4);
249 pte = le32_to_cpu(pte);
250 end = (l1 << 22) + (l2 << 12);
251 if (pte & PG_PRESENT_MASK) {
252 prot = pte & pde &
253 (PG_USER_MASK | PG_RW_MASK | PG_PRESENT_MASK);
254 } else {
255 prot = 0;
257 mem_print(mon, &start, &last_prot, end, prot);
260 } else {
261 prot = 0;
262 mem_print(mon, &start, &last_prot, end, prot);
265 /* Flush last range */
266 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
269 static void mem_info_pae32(Monitor *mon, CPUArchState *env)
271 unsigned int l1, l2, l3;
272 int prot, last_prot;
273 uint64_t pdpe, pde, pte;
274 uint64_t pdp_addr, pd_addr, pt_addr;
275 hwaddr start, end;
277 pdp_addr = env->cr[3] & ~0x1f;
278 last_prot = 0;
279 start = -1;
280 for (l1 = 0; l1 < 4; l1++) {
281 cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8);
282 pdpe = le64_to_cpu(pdpe);
283 end = l1 << 30;
284 if (pdpe & PG_PRESENT_MASK) {
285 pd_addr = pdpe & 0x3fffffffff000ULL;
286 for (l2 = 0; l2 < 512; l2++) {
287 cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8);
288 pde = le64_to_cpu(pde);
289 end = (l1 << 30) + (l2 << 21);
290 if (pde & PG_PRESENT_MASK) {
291 if (pde & PG_PSE_MASK) {
292 prot = pde & (PG_USER_MASK | PG_RW_MASK |
293 PG_PRESENT_MASK);
294 mem_print(mon, &start, &last_prot, end, prot);
295 } else {
296 pt_addr = pde & 0x3fffffffff000ULL;
297 for (l3 = 0; l3 < 512; l3++) {
298 cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8);
299 pte = le64_to_cpu(pte);
300 end = (l1 << 30) + (l2 << 21) + (l3 << 12);
301 if (pte & PG_PRESENT_MASK) {
302 prot = pte & pde & (PG_USER_MASK | PG_RW_MASK |
303 PG_PRESENT_MASK);
304 } else {
305 prot = 0;
307 mem_print(mon, &start, &last_prot, end, prot);
310 } else {
311 prot = 0;
312 mem_print(mon, &start, &last_prot, end, prot);
315 } else {
316 prot = 0;
317 mem_print(mon, &start, &last_prot, end, prot);
320 /* Flush last range */
321 mem_print(mon, &start, &last_prot, (hwaddr)1 << 32, 0);
325 #ifdef TARGET_X86_64
326 static void mem_info_64(Monitor *mon, CPUArchState *env)
328 int prot, last_prot;
329 uint64_t l1, l2, l3, l4;
330 uint64_t pml4e, pdpe, pde, pte;
331 uint64_t pml4_addr, pdp_addr, pd_addr, pt_addr, start, end;
333 pml4_addr = env->cr[3] & 0x3fffffffff000ULL;
334 last_prot = 0;
335 start = -1;
336 for (l1 = 0; l1 < 512; l1++) {
337 cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8);
338 pml4e = le64_to_cpu(pml4e);
339 end = l1 << 39;
340 if (pml4e & PG_PRESENT_MASK) {
341 pdp_addr = pml4e & 0x3fffffffff000ULL;
342 for (l2 = 0; l2 < 512; l2++) {
343 cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8);
344 pdpe = le64_to_cpu(pdpe);
345 end = (l1 << 39) + (l2 << 30);
346 if (pdpe & PG_PRESENT_MASK) {
347 if (pdpe & PG_PSE_MASK) {
348 prot = pdpe & (PG_USER_MASK | PG_RW_MASK |
349 PG_PRESENT_MASK);
350 prot &= pml4e;
351 mem_print(mon, &start, &last_prot, end, prot);
352 } else {
353 pd_addr = pdpe & 0x3fffffffff000ULL;
354 for (l3 = 0; l3 < 512; l3++) {
355 cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8);
356 pde = le64_to_cpu(pde);
357 end = (l1 << 39) + (l2 << 30) + (l3 << 21);
358 if (pde & PG_PRESENT_MASK) {
359 if (pde & PG_PSE_MASK) {
360 prot = pde & (PG_USER_MASK | PG_RW_MASK |
361 PG_PRESENT_MASK);
362 prot &= pml4e & pdpe;
363 mem_print(mon, &start, &last_prot, end, prot);
364 } else {
365 pt_addr = pde & 0x3fffffffff000ULL;
366 for (l4 = 0; l4 < 512; l4++) {
367 cpu_physical_memory_read(pt_addr
368 + l4 * 8,
369 &pte, 8);
370 pte = le64_to_cpu(pte);
371 end = (l1 << 39) + (l2 << 30) +
372 (l3 << 21) + (l4 << 12);
373 if (pte & PG_PRESENT_MASK) {
374 prot = pte & (PG_USER_MASK | PG_RW_MASK |
375 PG_PRESENT_MASK);
376 prot &= pml4e & pdpe & pde;
377 } else {
378 prot = 0;
380 mem_print(mon, &start, &last_prot, end, prot);
383 } else {
384 prot = 0;
385 mem_print(mon, &start, &last_prot, end, prot);
389 } else {
390 prot = 0;
391 mem_print(mon, &start, &last_prot, end, prot);
394 } else {
395 prot = 0;
396 mem_print(mon, &start, &last_prot, end, prot);
399 /* Flush last range */
400 mem_print(mon, &start, &last_prot, (hwaddr)1 << 48, 0);
402 #endif /* TARGET_X86_64 */
404 void hmp_info_mem(Monitor *mon, const QDict *qdict)
406 CPUArchState *env;
408 env = mon_get_cpu_env();
410 if (!(env->cr[0] & CR0_PG_MASK)) {
411 monitor_printf(mon, "PG disabled\n");
412 return;
414 if (env->cr[4] & CR4_PAE_MASK) {
415 #ifdef TARGET_X86_64
416 if (env->hflags & HF_LMA_MASK) {
417 mem_info_64(mon, env);
418 } else
419 #endif
421 mem_info_pae32(mon, env);
423 } else {
424 mem_info_32(mon, env);
428 void hmp_mce(Monitor *mon, const QDict *qdict)
430 X86CPU *cpu;
431 CPUState *cs;
432 int cpu_index = qdict_get_int(qdict, "cpu_index");
433 int bank = qdict_get_int(qdict, "bank");
434 uint64_t status = qdict_get_int(qdict, "status");
435 uint64_t mcg_status = qdict_get_int(qdict, "mcg_status");
436 uint64_t addr = qdict_get_int(qdict, "addr");
437 uint64_t misc = qdict_get_int(qdict, "misc");
438 int flags = MCE_INJECT_UNCOND_AO;
440 if (qdict_get_try_bool(qdict, "broadcast", false)) {
441 flags |= MCE_INJECT_BROADCAST;
443 cs = qemu_get_cpu(cpu_index);
444 if (cs != NULL) {
445 cpu = X86_CPU(cs);
446 cpu_x86_inject_mce(mon, cpu, bank, status, mcg_status, addr, misc,
447 flags);
451 static target_long monitor_get_pc(const struct MonitorDef *md, int val)
453 CPUArchState *env = mon_get_cpu_env();
454 return env->eip + env->segs[R_CS].base;
457 const MonitorDef monitor_defs[] = {
458 #define SEG(name, seg) \
459 { name, offsetof(CPUX86State, segs[seg].selector), NULL, MD_I32 },\
460 { name ".base", offsetof(CPUX86State, segs[seg].base) },\
461 { name ".limit", offsetof(CPUX86State, segs[seg].limit), NULL, MD_I32 },
463 { "eax", offsetof(CPUX86State, regs[0]) },
464 { "ecx", offsetof(CPUX86State, regs[1]) },
465 { "edx", offsetof(CPUX86State, regs[2]) },
466 { "ebx", offsetof(CPUX86State, regs[3]) },
467 { "esp|sp", offsetof(CPUX86State, regs[4]) },
468 { "ebp|fp", offsetof(CPUX86State, regs[5]) },
469 { "esi", offsetof(CPUX86State, regs[6]) },
470 { "edi", offsetof(CPUX86State, regs[7]) },
471 #ifdef TARGET_X86_64
472 { "r8", offsetof(CPUX86State, regs[8]) },
473 { "r9", offsetof(CPUX86State, regs[9]) },
474 { "r10", offsetof(CPUX86State, regs[10]) },
475 { "r11", offsetof(CPUX86State, regs[11]) },
476 { "r12", offsetof(CPUX86State, regs[12]) },
477 { "r13", offsetof(CPUX86State, regs[13]) },
478 { "r14", offsetof(CPUX86State, regs[14]) },
479 { "r15", offsetof(CPUX86State, regs[15]) },
480 #endif
481 { "eflags", offsetof(CPUX86State, eflags) },
482 { "eip", offsetof(CPUX86State, eip) },
483 SEG("cs", R_CS)
484 SEG("ds", R_DS)
485 SEG("es", R_ES)
486 SEG("ss", R_SS)
487 SEG("fs", R_FS)
488 SEG("gs", R_GS)
489 { "pc", 0, monitor_get_pc, },
490 { NULL },
493 const MonitorDef *target_monitor_defs(void)
495 return monitor_defs;
498 void hmp_info_local_apic(Monitor *mon, const QDict *qdict)
500 x86_cpu_dump_local_apic_state(mon_get_cpu(), (FILE *)mon, monitor_fprintf,
501 CPU_DUMP_FPU);
504 void hmp_info_io_apic(Monitor *mon, const QDict *qdict)
506 if (kvm_irqchip_in_kernel()) {
507 kvm_ioapic_dump_state(mon, qdict);
508 } else {
509 ioapic_dump_state(mon, qdict);