vfio-pci: VGA quirk update
[qemu/ar7.git] / gdbstub.c
blob0ee82a944f9b4fca7ba50f0dd754da7ea5da4fe9
1 /*
2 * gdb server stub
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
30 #include "qemu.h"
31 #else
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
36 #endif
38 #define MAX_PACKET_LENGTH 4096
40 #include "cpu.h"
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
45 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
46 static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
47 uint8_t *buf, int len, int is_write)
49 return cpu_memory_rw_debug(env, addr, buf, len, is_write);
51 #else
52 /* target_memory_rw_debug() defined in cpu.h */
53 #endif
55 enum {
56 GDB_SIGNAL_0 = 0,
57 GDB_SIGNAL_INT = 2,
58 GDB_SIGNAL_QUIT = 3,
59 GDB_SIGNAL_TRAP = 5,
60 GDB_SIGNAL_ABRT = 6,
61 GDB_SIGNAL_ALRM = 14,
62 GDB_SIGNAL_IO = 23,
63 GDB_SIGNAL_XCPU = 24,
64 GDB_SIGNAL_UNKNOWN = 143
67 #ifdef CONFIG_USER_ONLY
69 /* Map target signal numbers to GDB protocol signal numbers and vice
70 * versa. For user emulation's currently supported systems, we can
71 * assume most signals are defined.
74 static int gdb_signal_table[] = {
76 TARGET_SIGHUP,
77 TARGET_SIGINT,
78 TARGET_SIGQUIT,
79 TARGET_SIGILL,
80 TARGET_SIGTRAP,
81 TARGET_SIGABRT,
82 -1, /* SIGEMT */
83 TARGET_SIGFPE,
84 TARGET_SIGKILL,
85 TARGET_SIGBUS,
86 TARGET_SIGSEGV,
87 TARGET_SIGSYS,
88 TARGET_SIGPIPE,
89 TARGET_SIGALRM,
90 TARGET_SIGTERM,
91 TARGET_SIGURG,
92 TARGET_SIGSTOP,
93 TARGET_SIGTSTP,
94 TARGET_SIGCONT,
95 TARGET_SIGCHLD,
96 TARGET_SIGTTIN,
97 TARGET_SIGTTOU,
98 TARGET_SIGIO,
99 TARGET_SIGXCPU,
100 TARGET_SIGXFSZ,
101 TARGET_SIGVTALRM,
102 TARGET_SIGPROF,
103 TARGET_SIGWINCH,
104 -1, /* SIGLOST */
105 TARGET_SIGUSR1,
106 TARGET_SIGUSR2,
107 #ifdef TARGET_SIGPWR
108 TARGET_SIGPWR,
109 #else
111 #endif
112 -1, /* SIGPOLL */
124 #ifdef __SIGRTMIN
125 __SIGRTMIN + 1,
126 __SIGRTMIN + 2,
127 __SIGRTMIN + 3,
128 __SIGRTMIN + 4,
129 __SIGRTMIN + 5,
130 __SIGRTMIN + 6,
131 __SIGRTMIN + 7,
132 __SIGRTMIN + 8,
133 __SIGRTMIN + 9,
134 __SIGRTMIN + 10,
135 __SIGRTMIN + 11,
136 __SIGRTMIN + 12,
137 __SIGRTMIN + 13,
138 __SIGRTMIN + 14,
139 __SIGRTMIN + 15,
140 __SIGRTMIN + 16,
141 __SIGRTMIN + 17,
142 __SIGRTMIN + 18,
143 __SIGRTMIN + 19,
144 __SIGRTMIN + 20,
145 __SIGRTMIN + 21,
146 __SIGRTMIN + 22,
147 __SIGRTMIN + 23,
148 __SIGRTMIN + 24,
149 __SIGRTMIN + 25,
150 __SIGRTMIN + 26,
151 __SIGRTMIN + 27,
152 __SIGRTMIN + 28,
153 __SIGRTMIN + 29,
154 __SIGRTMIN + 30,
155 __SIGRTMIN + 31,
156 -1, /* SIGCANCEL */
157 __SIGRTMIN,
158 __SIGRTMIN + 32,
159 __SIGRTMIN + 33,
160 __SIGRTMIN + 34,
161 __SIGRTMIN + 35,
162 __SIGRTMIN + 36,
163 __SIGRTMIN + 37,
164 __SIGRTMIN + 38,
165 __SIGRTMIN + 39,
166 __SIGRTMIN + 40,
167 __SIGRTMIN + 41,
168 __SIGRTMIN + 42,
169 __SIGRTMIN + 43,
170 __SIGRTMIN + 44,
171 __SIGRTMIN + 45,
172 __SIGRTMIN + 46,
173 __SIGRTMIN + 47,
174 __SIGRTMIN + 48,
175 __SIGRTMIN + 49,
176 __SIGRTMIN + 50,
177 __SIGRTMIN + 51,
178 __SIGRTMIN + 52,
179 __SIGRTMIN + 53,
180 __SIGRTMIN + 54,
181 __SIGRTMIN + 55,
182 __SIGRTMIN + 56,
183 __SIGRTMIN + 57,
184 __SIGRTMIN + 58,
185 __SIGRTMIN + 59,
186 __SIGRTMIN + 60,
187 __SIGRTMIN + 61,
188 __SIGRTMIN + 62,
189 __SIGRTMIN + 63,
190 __SIGRTMIN + 64,
191 __SIGRTMIN + 65,
192 __SIGRTMIN + 66,
193 __SIGRTMIN + 67,
194 __SIGRTMIN + 68,
195 __SIGRTMIN + 69,
196 __SIGRTMIN + 70,
197 __SIGRTMIN + 71,
198 __SIGRTMIN + 72,
199 __SIGRTMIN + 73,
200 __SIGRTMIN + 74,
201 __SIGRTMIN + 75,
202 __SIGRTMIN + 76,
203 __SIGRTMIN + 77,
204 __SIGRTMIN + 78,
205 __SIGRTMIN + 79,
206 __SIGRTMIN + 80,
207 __SIGRTMIN + 81,
208 __SIGRTMIN + 82,
209 __SIGRTMIN + 83,
210 __SIGRTMIN + 84,
211 __SIGRTMIN + 85,
212 __SIGRTMIN + 86,
213 __SIGRTMIN + 87,
214 __SIGRTMIN + 88,
215 __SIGRTMIN + 89,
216 __SIGRTMIN + 90,
217 __SIGRTMIN + 91,
218 __SIGRTMIN + 92,
219 __SIGRTMIN + 93,
220 __SIGRTMIN + 94,
221 __SIGRTMIN + 95,
222 -1, /* SIGINFO */
223 -1, /* UNKNOWN */
224 -1, /* DEFAULT */
231 #endif
233 #else
234 /* In system mode we only need SIGINT and SIGTRAP; other signals
235 are not yet supported. */
237 enum {
238 TARGET_SIGINT = 2,
239 TARGET_SIGTRAP = 5
242 static int gdb_signal_table[] = {
245 TARGET_SIGINT,
248 TARGET_SIGTRAP
250 #endif
252 #ifdef CONFIG_USER_ONLY
253 static int target_signal_to_gdb (int sig)
255 int i;
256 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
257 if (gdb_signal_table[i] == sig)
258 return i;
259 return GDB_SIGNAL_UNKNOWN;
261 #endif
263 static int gdb_signal_to_target (int sig)
265 if (sig < ARRAY_SIZE (gdb_signal_table))
266 return gdb_signal_table[sig];
267 else
268 return -1;
271 //#define DEBUG_GDB
273 typedef struct GDBRegisterState {
274 int base_reg;
275 int num_regs;
276 gdb_reg_cb get_reg;
277 gdb_reg_cb set_reg;
278 const char *xml;
279 struct GDBRegisterState *next;
280 } GDBRegisterState;
282 enum RSState {
283 RS_INACTIVE,
284 RS_IDLE,
285 RS_GETLINE,
286 RS_CHKSUM1,
287 RS_CHKSUM2,
289 typedef struct GDBState {
290 CPUArchState *c_cpu; /* current CPU for step/continue ops */
291 CPUArchState *g_cpu; /* current CPU for other ops */
292 CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
293 enum RSState state; /* parsing state */
294 char line_buf[MAX_PACKET_LENGTH];
295 int line_buf_index;
296 int line_csum;
297 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
298 int last_packet_len;
299 int signal;
300 #ifdef CONFIG_USER_ONLY
301 int fd;
302 int running_state;
303 #else
304 CharDriverState *chr;
305 CharDriverState *mon_chr;
306 #endif
307 char syscall_buf[256];
308 gdb_syscall_complete_cb current_syscall_cb;
309 } GDBState;
311 /* By default use no IRQs and no timers while single stepping so as to
312 * make single stepping like an ICE HW step.
314 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
316 static GDBState *gdbserver_state;
318 /* This is an ugly hack to cope with both new and old gdb.
319 If gdb sends qXfer:features:read then assume we're talking to a newish
320 gdb that understands target descriptions. */
321 static int gdb_has_xml;
323 #ifdef CONFIG_USER_ONLY
324 /* XXX: This is not thread safe. Do we care? */
325 static int gdbserver_fd = -1;
327 static int get_char(GDBState *s)
329 uint8_t ch;
330 int ret;
332 for(;;) {
333 ret = qemu_recv(s->fd, &ch, 1, 0);
334 if (ret < 0) {
335 if (errno == ECONNRESET)
336 s->fd = -1;
337 if (errno != EINTR && errno != EAGAIN)
338 return -1;
339 } else if (ret == 0) {
340 close(s->fd);
341 s->fd = -1;
342 return -1;
343 } else {
344 break;
347 return ch;
349 #endif
351 static enum {
352 GDB_SYS_UNKNOWN,
353 GDB_SYS_ENABLED,
354 GDB_SYS_DISABLED,
355 } gdb_syscall_mode;
357 /* If gdb is connected when the first semihosting syscall occurs then use
358 remote gdb syscalls. Otherwise use native file IO. */
359 int use_gdb_syscalls(void)
361 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
362 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
363 : GDB_SYS_DISABLED);
365 return gdb_syscall_mode == GDB_SYS_ENABLED;
368 /* Resume execution. */
369 static inline void gdb_continue(GDBState *s)
371 #ifdef CONFIG_USER_ONLY
372 s->running_state = 1;
373 #else
374 if (runstate_check(RUN_STATE_GUEST_PANICKED)) {
375 runstate_set(RUN_STATE_DEBUG);
377 if (!runstate_needs_reset()) {
378 vm_start();
380 #endif
383 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
385 #ifdef CONFIG_USER_ONLY
386 int ret;
388 while (len > 0) {
389 ret = send(s->fd, buf, len, 0);
390 if (ret < 0) {
391 if (errno != EINTR && errno != EAGAIN)
392 return;
393 } else {
394 buf += ret;
395 len -= ret;
398 #else
399 qemu_chr_fe_write(s->chr, buf, len);
400 #endif
403 static inline int fromhex(int v)
405 if (v >= '0' && v <= '9')
406 return v - '0';
407 else if (v >= 'A' && v <= 'F')
408 return v - 'A' + 10;
409 else if (v >= 'a' && v <= 'f')
410 return v - 'a' + 10;
411 else
412 return 0;
415 static inline int tohex(int v)
417 if (v < 10)
418 return v + '0';
419 else
420 return v - 10 + 'a';
423 static void memtohex(char *buf, const uint8_t *mem, int len)
425 int i, c;
426 char *q;
427 q = buf;
428 for(i = 0; i < len; i++) {
429 c = mem[i];
430 *q++ = tohex(c >> 4);
431 *q++ = tohex(c & 0xf);
433 *q = '\0';
436 static void hextomem(uint8_t *mem, const char *buf, int len)
438 int i;
440 for(i = 0; i < len; i++) {
441 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
442 buf += 2;
446 /* return -1 if error, 0 if OK */
447 static int put_packet_binary(GDBState *s, const char *buf, int len)
449 int csum, i;
450 uint8_t *p;
452 for(;;) {
453 p = s->last_packet;
454 *(p++) = '$';
455 memcpy(p, buf, len);
456 p += len;
457 csum = 0;
458 for(i = 0; i < len; i++) {
459 csum += buf[i];
461 *(p++) = '#';
462 *(p++) = tohex((csum >> 4) & 0xf);
463 *(p++) = tohex((csum) & 0xf);
465 s->last_packet_len = p - s->last_packet;
466 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
468 #ifdef CONFIG_USER_ONLY
469 i = get_char(s);
470 if (i < 0)
471 return -1;
472 if (i == '+')
473 break;
474 #else
475 break;
476 #endif
478 return 0;
481 /* return -1 if error, 0 if OK */
482 static int put_packet(GDBState *s, const char *buf)
484 #ifdef DEBUG_GDB
485 printf("reply='%s'\n", buf);
486 #endif
488 return put_packet_binary(s, buf, strlen(buf));
491 /* The GDB remote protocol transfers values in target byte order. This means
492 we can use the raw memory access routines to access the value buffer.
493 Conveniently, these also handle the case where the buffer is mis-aligned.
495 #define GET_REG8(val) do { \
496 stb_p(mem_buf, val); \
497 return 1; \
498 } while(0)
499 #define GET_REG16(val) do { \
500 stw_p(mem_buf, val); \
501 return 2; \
502 } while(0)
503 #define GET_REG32(val) do { \
504 stl_p(mem_buf, val); \
505 return 4; \
506 } while(0)
507 #define GET_REG64(val) do { \
508 stq_p(mem_buf, val); \
509 return 8; \
510 } while(0)
512 #if TARGET_LONG_BITS == 64
513 #define GET_REGL(val) GET_REG64(val)
514 #define ldtul_p(addr) ldq_p(addr)
515 #else
516 #define GET_REGL(val) GET_REG32(val)
517 #define ldtul_p(addr) ldl_p(addr)
518 #endif
520 #if defined(TARGET_I386)
522 #ifdef TARGET_X86_64
523 static const int gpr_map[16] = {
524 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
525 8, 9, 10, 11, 12, 13, 14, 15
527 #else
528 #define gpr_map gpr_map32
529 #endif
530 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
532 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
534 #define IDX_IP_REG CPU_NB_REGS
535 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
536 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
537 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
538 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
539 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
541 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
543 if (n < CPU_NB_REGS) {
544 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
545 GET_REG64(env->regs[gpr_map[n]]);
546 } else if (n < CPU_NB_REGS32) {
547 GET_REG32(env->regs[gpr_map32[n]]);
549 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
550 #ifdef USE_X86LDOUBLE
551 /* FIXME: byteswap float values - after fixing fpregs layout. */
552 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
553 #else
554 memset(mem_buf, 0, 10);
555 #endif
556 return 10;
557 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
558 n -= IDX_XMM_REGS;
559 if (n < CPU_NB_REGS32 ||
560 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
561 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
562 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
563 return 16;
565 } else {
566 switch (n) {
567 case IDX_IP_REG:
568 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
569 GET_REG64(env->eip);
570 } else {
571 GET_REG32(env->eip);
573 case IDX_FLAGS_REG: GET_REG32(env->eflags);
575 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
576 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
577 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
578 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
579 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
580 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
582 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
583 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
584 (env->fpstt & 0x7) << 11);
585 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
586 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
587 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
588 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
589 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
590 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
592 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
595 return 0;
598 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
600 uint16_t selector = ldl_p(mem_buf);
602 if (selector != env->segs[sreg].selector) {
603 #if defined(CONFIG_USER_ONLY)
604 cpu_x86_load_seg(env, sreg, selector);
605 #else
606 unsigned int limit, flags;
607 target_ulong base;
609 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
610 base = selector << 4;
611 limit = 0xffff;
612 flags = 0;
613 } else {
614 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
615 return 4;
617 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
618 #endif
620 return 4;
623 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
625 uint32_t tmp;
627 if (n < CPU_NB_REGS) {
628 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
629 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
630 return sizeof(target_ulong);
631 } else if (n < CPU_NB_REGS32) {
632 n = gpr_map32[n];
633 env->regs[n] &= ~0xffffffffUL;
634 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
635 return 4;
637 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
638 #ifdef USE_X86LDOUBLE
639 /* FIXME: byteswap float values - after fixing fpregs layout. */
640 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
641 #endif
642 return 10;
643 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
644 n -= IDX_XMM_REGS;
645 if (n < CPU_NB_REGS32 ||
646 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
647 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
648 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
649 return 16;
651 } else {
652 switch (n) {
653 case IDX_IP_REG:
654 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
655 env->eip = ldq_p(mem_buf);
656 return 8;
657 } else {
658 env->eip &= ~0xffffffffUL;
659 env->eip |= (uint32_t)ldl_p(mem_buf);
660 return 4;
662 case IDX_FLAGS_REG:
663 env->eflags = ldl_p(mem_buf);
664 return 4;
666 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
667 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
668 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
669 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
670 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
671 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
673 case IDX_FP_REGS + 8:
674 env->fpuc = ldl_p(mem_buf);
675 return 4;
676 case IDX_FP_REGS + 9:
677 tmp = ldl_p(mem_buf);
678 env->fpstt = (tmp >> 11) & 7;
679 env->fpus = tmp & ~0x3800;
680 return 4;
681 case IDX_FP_REGS + 10: /* ftag */ return 4;
682 case IDX_FP_REGS + 11: /* fiseg */ return 4;
683 case IDX_FP_REGS + 12: /* fioff */ return 4;
684 case IDX_FP_REGS + 13: /* foseg */ return 4;
685 case IDX_FP_REGS + 14: /* fooff */ return 4;
686 case IDX_FP_REGS + 15: /* fop */ return 4;
688 case IDX_MXCSR_REG:
689 env->mxcsr = ldl_p(mem_buf);
690 return 4;
693 /* Unrecognised register. */
694 return 0;
697 #elif defined (TARGET_PPC)
699 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
700 expects whatever the target description contains. Due to a
701 historical mishap the FP registers appear in between core integer
702 regs and PC, MSR, CR, and so forth. We hack round this by giving the
703 FP regs zero size when talking to a newer gdb. */
704 #define NUM_CORE_REGS 71
705 #if defined (TARGET_PPC64)
706 #define GDB_CORE_XML "power64-core.xml"
707 #else
708 #define GDB_CORE_XML "power-core.xml"
709 #endif
711 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
713 if (n < 32) {
714 /* gprs */
715 GET_REGL(env->gpr[n]);
716 } else if (n < 64) {
717 /* fprs */
718 if (gdb_has_xml)
719 return 0;
720 stfq_p(mem_buf, env->fpr[n-32]);
721 return 8;
722 } else {
723 switch (n) {
724 case 64: GET_REGL(env->nip);
725 case 65: GET_REGL(env->msr);
726 case 66:
728 uint32_t cr = 0;
729 int i;
730 for (i = 0; i < 8; i++)
731 cr |= env->crf[i] << (32 - ((i + 1) * 4));
732 GET_REG32(cr);
734 case 67: GET_REGL(env->lr);
735 case 68: GET_REGL(env->ctr);
736 case 69: GET_REGL(env->xer);
737 case 70:
739 if (gdb_has_xml)
740 return 0;
741 GET_REG32(env->fpscr);
745 return 0;
748 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
750 if (n < 32) {
751 /* gprs */
752 env->gpr[n] = ldtul_p(mem_buf);
753 return sizeof(target_ulong);
754 } else if (n < 64) {
755 /* fprs */
756 if (gdb_has_xml)
757 return 0;
758 env->fpr[n-32] = ldfq_p(mem_buf);
759 return 8;
760 } else {
761 switch (n) {
762 case 64:
763 env->nip = ldtul_p(mem_buf);
764 return sizeof(target_ulong);
765 case 65:
766 ppc_store_msr(env, ldtul_p(mem_buf));
767 return sizeof(target_ulong);
768 case 66:
770 uint32_t cr = ldl_p(mem_buf);
771 int i;
772 for (i = 0; i < 8; i++)
773 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
774 return 4;
776 case 67:
777 env->lr = ldtul_p(mem_buf);
778 return sizeof(target_ulong);
779 case 68:
780 env->ctr = ldtul_p(mem_buf);
781 return sizeof(target_ulong);
782 case 69:
783 env->xer = ldtul_p(mem_buf);
784 return sizeof(target_ulong);
785 case 70:
786 /* fpscr */
787 if (gdb_has_xml)
788 return 0;
789 store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
790 return sizeof(target_ulong);
793 return 0;
796 #elif defined (TARGET_SPARC)
798 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
799 #define NUM_CORE_REGS 86
800 #else
801 #define NUM_CORE_REGS 72
802 #endif
804 #ifdef TARGET_ABI32
805 #define GET_REGA(val) GET_REG32(val)
806 #else
807 #define GET_REGA(val) GET_REGL(val)
808 #endif
810 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
812 if (n < 8) {
813 /* g0..g7 */
814 GET_REGA(env->gregs[n]);
816 if (n < 32) {
817 /* register window */
818 GET_REGA(env->regwptr[n - 8]);
820 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
821 if (n < 64) {
822 /* fprs */
823 if (n & 1) {
824 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
825 } else {
826 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
829 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
830 switch (n) {
831 case 64: GET_REGA(env->y);
832 case 65: GET_REGA(cpu_get_psr(env));
833 case 66: GET_REGA(env->wim);
834 case 67: GET_REGA(env->tbr);
835 case 68: GET_REGA(env->pc);
836 case 69: GET_REGA(env->npc);
837 case 70: GET_REGA(env->fsr);
838 case 71: GET_REGA(0); /* csr */
839 default: GET_REGA(0);
841 #else
842 if (n < 64) {
843 /* f0-f31 */
844 if (n & 1) {
845 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
846 } else {
847 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
850 if (n < 80) {
851 /* f32-f62 (double width, even numbers only) */
852 GET_REG64(env->fpr[(n - 32) / 2].ll);
854 switch (n) {
855 case 80: GET_REGL(env->pc);
856 case 81: GET_REGL(env->npc);
857 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
858 ((env->asi & 0xff) << 24) |
859 ((env->pstate & 0xfff) << 8) |
860 cpu_get_cwp64(env));
861 case 83: GET_REGL(env->fsr);
862 case 84: GET_REGL(env->fprs);
863 case 85: GET_REGL(env->y);
865 #endif
866 return 0;
869 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
871 #if defined(TARGET_ABI32)
872 abi_ulong tmp;
874 tmp = ldl_p(mem_buf);
875 #else
876 target_ulong tmp;
878 tmp = ldtul_p(mem_buf);
879 #endif
881 if (n < 8) {
882 /* g0..g7 */
883 env->gregs[n] = tmp;
884 } else if (n < 32) {
885 /* register window */
886 env->regwptr[n - 8] = tmp;
888 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
889 else if (n < 64) {
890 /* fprs */
891 /* f0-f31 */
892 if (n & 1) {
893 env->fpr[(n - 32) / 2].l.lower = tmp;
894 } else {
895 env->fpr[(n - 32) / 2].l.upper = tmp;
897 } else {
898 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
899 switch (n) {
900 case 64: env->y = tmp; break;
901 case 65: cpu_put_psr(env, tmp); break;
902 case 66: env->wim = tmp; break;
903 case 67: env->tbr = tmp; break;
904 case 68: env->pc = tmp; break;
905 case 69: env->npc = tmp; break;
906 case 70: env->fsr = tmp; break;
907 default: return 0;
910 return 4;
911 #else
912 else if (n < 64) {
913 /* f0-f31 */
914 tmp = ldl_p(mem_buf);
915 if (n & 1) {
916 env->fpr[(n - 32) / 2].l.lower = tmp;
917 } else {
918 env->fpr[(n - 32) / 2].l.upper = tmp;
920 return 4;
921 } else if (n < 80) {
922 /* f32-f62 (double width, even numbers only) */
923 env->fpr[(n - 32) / 2].ll = tmp;
924 } else {
925 switch (n) {
926 case 80: env->pc = tmp; break;
927 case 81: env->npc = tmp; break;
928 case 82:
929 cpu_put_ccr(env, tmp >> 32);
930 env->asi = (tmp >> 24) & 0xff;
931 env->pstate = (tmp >> 8) & 0xfff;
932 cpu_put_cwp64(env, tmp & 0xff);
933 break;
934 case 83: env->fsr = tmp; break;
935 case 84: env->fprs = tmp; break;
936 case 85: env->y = tmp; break;
937 default: return 0;
940 return 8;
941 #endif
943 #elif defined (TARGET_ARM)
945 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
946 whatever the target description contains. Due to a historical mishap
947 the FPA registers appear in between core integer regs and the CPSR.
948 We hack round this by giving the FPA regs zero size when talking to a
949 newer gdb. */
950 #define NUM_CORE_REGS 26
951 #define GDB_CORE_XML "arm-core.xml"
953 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
955 if (n < 16) {
956 /* Core integer register. */
957 GET_REG32(env->regs[n]);
959 if (n < 24) {
960 /* FPA registers. */
961 if (gdb_has_xml)
962 return 0;
963 memset(mem_buf, 0, 12);
964 return 12;
966 switch (n) {
967 case 24:
968 /* FPA status register. */
969 if (gdb_has_xml)
970 return 0;
971 GET_REG32(0);
972 case 25:
973 /* CPSR */
974 GET_REG32(cpsr_read(env));
976 /* Unknown register. */
977 return 0;
980 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
982 uint32_t tmp;
984 tmp = ldl_p(mem_buf);
986 /* Mask out low bit of PC to workaround gdb bugs. This will probably
987 cause problems if we ever implement the Jazelle DBX extensions. */
988 if (n == 15)
989 tmp &= ~1;
991 if (n < 16) {
992 /* Core integer register. */
993 env->regs[n] = tmp;
994 return 4;
996 if (n < 24) { /* 16-23 */
997 /* FPA registers (ignored). */
998 if (gdb_has_xml)
999 return 0;
1000 return 12;
1002 switch (n) {
1003 case 24:
1004 /* FPA status register (ignored). */
1005 if (gdb_has_xml)
1006 return 0;
1007 return 4;
1008 case 25:
1009 /* CPSR */
1010 cpsr_write (env, tmp, 0xffffffff);
1011 return 4;
1013 /* Unknown register. */
1014 return 0;
1017 #elif defined (TARGET_M68K)
1019 #define NUM_CORE_REGS 18
1021 #define GDB_CORE_XML "cf-core.xml"
1023 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1025 if (n < 8) {
1026 /* D0-D7 */
1027 GET_REG32(env->dregs[n]);
1028 } else if (n < 16) {
1029 /* A0-A7 */
1030 GET_REG32(env->aregs[n - 8]);
1031 } else {
1032 switch (n) {
1033 case 16: GET_REG32(env->sr);
1034 case 17: GET_REG32(env->pc);
1037 /* FP registers not included here because they vary between
1038 ColdFire and m68k. Use XML bits for these. */
1039 return 0;
1042 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1044 uint32_t tmp;
1046 tmp = ldl_p(mem_buf);
1048 if (n < 8) {
1049 /* D0-D7 */
1050 env->dregs[n] = tmp;
1051 } else if (n < 16) {
1052 /* A0-A7 */
1053 env->aregs[n - 8] = tmp;
1054 } else {
1055 switch (n) {
1056 case 16: env->sr = tmp; break;
1057 case 17: env->pc = tmp; break;
1058 default: return 0;
1061 return 4;
1063 #elif defined (TARGET_MIPS)
1065 #define NUM_CORE_REGS 73
1067 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1069 if (n < 32) {
1070 GET_REGL(env->active_tc.gpr[n]);
1072 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1073 if (n >= 38 && n < 70) {
1074 if (env->CP0_Status & (1 << CP0St_FR))
1075 GET_REGL(env->active_fpu.fpr[n - 38].d);
1076 else
1077 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1079 switch (n) {
1080 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1081 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1084 switch (n) {
1085 case 32: GET_REGL((int32_t)env->CP0_Status);
1086 case 33: GET_REGL(env->active_tc.LO[0]);
1087 case 34: GET_REGL(env->active_tc.HI[0]);
1088 case 35: GET_REGL(env->CP0_BadVAddr);
1089 case 36: GET_REGL((int32_t)env->CP0_Cause);
1090 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1091 case 72: GET_REGL(0); /* fp */
1092 case 89: GET_REGL((int32_t)env->CP0_PRid);
1094 if (n >= 73 && n <= 88) {
1095 /* 16 embedded regs. */
1096 GET_REGL(0);
1099 return 0;
1102 /* convert MIPS rounding mode in FCR31 to IEEE library */
1103 static unsigned int ieee_rm[] =
1105 float_round_nearest_even,
1106 float_round_to_zero,
1107 float_round_up,
1108 float_round_down
1110 #define RESTORE_ROUNDING_MODE \
1111 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1113 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1115 target_ulong tmp;
1117 tmp = ldtul_p(mem_buf);
1119 if (n < 32) {
1120 env->active_tc.gpr[n] = tmp;
1121 return sizeof(target_ulong);
1123 if (env->CP0_Config1 & (1 << CP0C1_FP)
1124 && n >= 38 && n < 73) {
1125 if (n < 70) {
1126 if (env->CP0_Status & (1 << CP0St_FR))
1127 env->active_fpu.fpr[n - 38].d = tmp;
1128 else
1129 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1131 switch (n) {
1132 case 70:
1133 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1134 /* set rounding mode */
1135 RESTORE_ROUNDING_MODE;
1136 break;
1137 case 71: env->active_fpu.fcr0 = tmp; break;
1139 return sizeof(target_ulong);
1141 switch (n) {
1142 case 32: env->CP0_Status = tmp; break;
1143 case 33: env->active_tc.LO[0] = tmp; break;
1144 case 34: env->active_tc.HI[0] = tmp; break;
1145 case 35: env->CP0_BadVAddr = tmp; break;
1146 case 36: env->CP0_Cause = tmp; break;
1147 case 37:
1148 env->active_tc.PC = tmp & ~(target_ulong)1;
1149 if (tmp & 1) {
1150 env->hflags |= MIPS_HFLAG_M16;
1151 } else {
1152 env->hflags &= ~(MIPS_HFLAG_M16);
1154 break;
1155 case 72: /* fp, ignored */ break;
1156 default:
1157 if (n > 89)
1158 return 0;
1159 /* Other registers are readonly. Ignore writes. */
1160 break;
1163 return sizeof(target_ulong);
1165 #elif defined(TARGET_OPENRISC)
1167 #define NUM_CORE_REGS (32 + 3)
1169 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1171 if (n < 32) {
1172 GET_REG32(env->gpr[n]);
1173 } else {
1174 switch (n) {
1175 case 32: /* PPC */
1176 GET_REG32(env->ppc);
1177 break;
1179 case 33: /* NPC */
1180 GET_REG32(env->npc);
1181 break;
1183 case 34: /* SR */
1184 GET_REG32(env->sr);
1185 break;
1187 default:
1188 break;
1191 return 0;
1194 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1195 uint8_t *mem_buf, int n)
1197 uint32_t tmp;
1199 if (n > NUM_CORE_REGS) {
1200 return 0;
1203 tmp = ldl_p(mem_buf);
1205 if (n < 32) {
1206 env->gpr[n] = tmp;
1207 } else {
1208 switch (n) {
1209 case 32: /* PPC */
1210 env->ppc = tmp;
1211 break;
1213 case 33: /* NPC */
1214 env->npc = tmp;
1215 break;
1217 case 34: /* SR */
1218 env->sr = tmp;
1219 break;
1221 default:
1222 break;
1225 return 4;
1227 #elif defined (TARGET_SH4)
1229 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1230 /* FIXME: We should use XML for this. */
1232 #define NUM_CORE_REGS 59
1234 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1236 switch (n) {
1237 case 0 ... 7:
1238 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1239 GET_REGL(env->gregs[n + 16]);
1240 } else {
1241 GET_REGL(env->gregs[n]);
1243 case 8 ... 15:
1244 GET_REGL(env->gregs[n]);
1245 case 16:
1246 GET_REGL(env->pc);
1247 case 17:
1248 GET_REGL(env->pr);
1249 case 18:
1250 GET_REGL(env->gbr);
1251 case 19:
1252 GET_REGL(env->vbr);
1253 case 20:
1254 GET_REGL(env->mach);
1255 case 21:
1256 GET_REGL(env->macl);
1257 case 22:
1258 GET_REGL(env->sr);
1259 case 23:
1260 GET_REGL(env->fpul);
1261 case 24:
1262 GET_REGL(env->fpscr);
1263 case 25 ... 40:
1264 if (env->fpscr & FPSCR_FR) {
1265 stfl_p(mem_buf, env->fregs[n - 9]);
1266 } else {
1267 stfl_p(mem_buf, env->fregs[n - 25]);
1269 return 4;
1270 case 41:
1271 GET_REGL(env->ssr);
1272 case 42:
1273 GET_REGL(env->spc);
1274 case 43 ... 50:
1275 GET_REGL(env->gregs[n - 43]);
1276 case 51 ... 58:
1277 GET_REGL(env->gregs[n - (51 - 16)]);
1280 return 0;
1283 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1285 switch (n) {
1286 case 0 ... 7:
1287 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1288 env->gregs[n + 16] = ldl_p(mem_buf);
1289 } else {
1290 env->gregs[n] = ldl_p(mem_buf);
1292 break;
1293 case 8 ... 15:
1294 env->gregs[n] = ldl_p(mem_buf);
1295 break;
1296 case 16:
1297 env->pc = ldl_p(mem_buf);
1298 break;
1299 case 17:
1300 env->pr = ldl_p(mem_buf);
1301 break;
1302 case 18:
1303 env->gbr = ldl_p(mem_buf);
1304 break;
1305 case 19:
1306 env->vbr = ldl_p(mem_buf);
1307 break;
1308 case 20:
1309 env->mach = ldl_p(mem_buf);
1310 break;
1311 case 21:
1312 env->macl = ldl_p(mem_buf);
1313 break;
1314 case 22:
1315 env->sr = ldl_p(mem_buf);
1316 break;
1317 case 23:
1318 env->fpul = ldl_p(mem_buf);
1319 break;
1320 case 24:
1321 env->fpscr = ldl_p(mem_buf);
1322 break;
1323 case 25 ... 40:
1324 if (env->fpscr & FPSCR_FR) {
1325 env->fregs[n - 9] = ldfl_p(mem_buf);
1326 } else {
1327 env->fregs[n - 25] = ldfl_p(mem_buf);
1329 break;
1330 case 41:
1331 env->ssr = ldl_p(mem_buf);
1332 break;
1333 case 42:
1334 env->spc = ldl_p(mem_buf);
1335 break;
1336 case 43 ... 50:
1337 env->gregs[n - 43] = ldl_p(mem_buf);
1338 break;
1339 case 51 ... 58:
1340 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1341 break;
1342 default: return 0;
1345 return 4;
1347 #elif defined (TARGET_MICROBLAZE)
1349 #define NUM_CORE_REGS (32 + 5)
1351 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1353 if (n < 32) {
1354 GET_REG32(env->regs[n]);
1355 } else {
1356 GET_REG32(env->sregs[n - 32]);
1358 return 0;
1361 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1363 uint32_t tmp;
1365 if (n > NUM_CORE_REGS)
1366 return 0;
1368 tmp = ldl_p(mem_buf);
1370 if (n < 32) {
1371 env->regs[n] = tmp;
1372 } else {
1373 env->sregs[n - 32] = tmp;
1375 return 4;
1377 #elif defined (TARGET_CRIS)
1379 #define NUM_CORE_REGS 49
1381 static int
1382 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1384 if (n < 15) {
1385 GET_REG32(env->regs[n]);
1388 if (n == 15) {
1389 GET_REG32(env->pc);
1392 if (n < 32) {
1393 switch (n) {
1394 case 16:
1395 GET_REG8(env->pregs[n - 16]);
1396 break;
1397 case 17:
1398 GET_REG8(env->pregs[n - 16]);
1399 break;
1400 case 20:
1401 case 21:
1402 GET_REG16(env->pregs[n - 16]);
1403 break;
1404 default:
1405 if (n >= 23) {
1406 GET_REG32(env->pregs[n - 16]);
1408 break;
1411 return 0;
1414 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1416 uint8_t srs;
1418 if (env->pregs[PR_VR] < 32)
1419 return read_register_crisv10(env, mem_buf, n);
1421 srs = env->pregs[PR_SRS];
1422 if (n < 16) {
1423 GET_REG32(env->regs[n]);
1426 if (n >= 21 && n < 32) {
1427 GET_REG32(env->pregs[n - 16]);
1429 if (n >= 33 && n < 49) {
1430 GET_REG32(env->sregs[srs][n - 33]);
1432 switch (n) {
1433 case 16: GET_REG8(env->pregs[0]);
1434 case 17: GET_REG8(env->pregs[1]);
1435 case 18: GET_REG32(env->pregs[2]);
1436 case 19: GET_REG8(srs);
1437 case 20: GET_REG16(env->pregs[4]);
1438 case 32: GET_REG32(env->pc);
1441 return 0;
1444 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1446 uint32_t tmp;
1448 if (n > 49)
1449 return 0;
1451 tmp = ldl_p(mem_buf);
1453 if (n < 16) {
1454 env->regs[n] = tmp;
1457 if (n >= 21 && n < 32) {
1458 env->pregs[n - 16] = tmp;
1461 /* FIXME: Should support function regs be writable? */
1462 switch (n) {
1463 case 16: return 1;
1464 case 17: return 1;
1465 case 18: env->pregs[PR_PID] = tmp; break;
1466 case 19: return 1;
1467 case 20: return 2;
1468 case 32: env->pc = tmp; break;
1471 return 4;
1473 #elif defined (TARGET_ALPHA)
1475 #define NUM_CORE_REGS 67
1477 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1479 uint64_t val;
1480 CPU_DoubleU d;
1482 switch (n) {
1483 case 0 ... 30:
1484 val = env->ir[n];
1485 break;
1486 case 32 ... 62:
1487 d.d = env->fir[n - 32];
1488 val = d.ll;
1489 break;
1490 case 63:
1491 val = cpu_alpha_load_fpcr(env);
1492 break;
1493 case 64:
1494 val = env->pc;
1495 break;
1496 case 66:
1497 val = env->unique;
1498 break;
1499 case 31:
1500 case 65:
1501 /* 31 really is the zero register; 65 is unassigned in the
1502 gdb protocol, but is still required to occupy 8 bytes. */
1503 val = 0;
1504 break;
1505 default:
1506 return 0;
1508 GET_REGL(val);
1511 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1513 target_ulong tmp = ldtul_p(mem_buf);
1514 CPU_DoubleU d;
1516 switch (n) {
1517 case 0 ... 30:
1518 env->ir[n] = tmp;
1519 break;
1520 case 32 ... 62:
1521 d.ll = tmp;
1522 env->fir[n - 32] = d.d;
1523 break;
1524 case 63:
1525 cpu_alpha_store_fpcr(env, tmp);
1526 break;
1527 case 64:
1528 env->pc = tmp;
1529 break;
1530 case 66:
1531 env->unique = tmp;
1532 break;
1533 case 31:
1534 case 65:
1535 /* 31 really is the zero register; 65 is unassigned in the
1536 gdb protocol, but is still required to occupy 8 bytes. */
1537 break;
1538 default:
1539 return 0;
1541 return 8;
1543 #elif defined (TARGET_S390X)
1545 #define NUM_CORE_REGS S390_NUM_REGS
1547 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1549 uint64_t val;
1550 int cc_op;
1552 switch (n) {
1553 case S390_PSWM_REGNUM:
1554 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1555 val = deposit64(env->psw.mask, 44, 2, cc_op);
1556 GET_REGL(val);
1557 break;
1558 case S390_PSWA_REGNUM:
1559 GET_REGL(env->psw.addr);
1560 break;
1561 case S390_R0_REGNUM ... S390_R15_REGNUM:
1562 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1563 break;
1564 case S390_A0_REGNUM ... S390_A15_REGNUM:
1565 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1566 break;
1567 case S390_FPC_REGNUM:
1568 GET_REG32(env->fpc);
1569 break;
1570 case S390_F0_REGNUM ... S390_F15_REGNUM:
1571 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1572 break;
1575 return 0;
1578 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1580 target_ulong tmpl;
1581 uint32_t tmp32;
1582 int r = 8;
1583 tmpl = ldtul_p(mem_buf);
1584 tmp32 = ldl_p(mem_buf);
1586 switch (n) {
1587 case S390_PSWM_REGNUM:
1588 env->psw.mask = tmpl;
1589 env->cc_op = extract64(tmpl, 44, 2);
1590 break;
1591 case S390_PSWA_REGNUM:
1592 env->psw.addr = tmpl;
1593 break;
1594 case S390_R0_REGNUM ... S390_R15_REGNUM:
1595 env->regs[n-S390_R0_REGNUM] = tmpl;
1596 break;
1597 case S390_A0_REGNUM ... S390_A15_REGNUM:
1598 env->aregs[n-S390_A0_REGNUM] = tmp32;
1599 r = 4;
1600 break;
1601 case S390_FPC_REGNUM:
1602 env->fpc = tmp32;
1603 r = 4;
1604 break;
1605 case S390_F0_REGNUM ... S390_F15_REGNUM:
1606 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1607 break;
1608 default:
1609 return 0;
1611 return r;
1613 #elif defined (TARGET_LM32)
1615 #include "hw/lm32/lm32_pic.h"
1616 #define NUM_CORE_REGS (32 + 7)
1618 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1620 if (n < 32) {
1621 GET_REG32(env->regs[n]);
1622 } else {
1623 switch (n) {
1624 case 32:
1625 GET_REG32(env->pc);
1626 break;
1627 /* FIXME: put in right exception ID */
1628 case 33:
1629 GET_REG32(0);
1630 break;
1631 case 34:
1632 GET_REG32(env->eba);
1633 break;
1634 case 35:
1635 GET_REG32(env->deba);
1636 break;
1637 case 36:
1638 GET_REG32(env->ie);
1639 break;
1640 case 37:
1641 GET_REG32(lm32_pic_get_im(env->pic_state));
1642 break;
1643 case 38:
1644 GET_REG32(lm32_pic_get_ip(env->pic_state));
1645 break;
1648 return 0;
1651 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1653 uint32_t tmp;
1655 if (n > NUM_CORE_REGS) {
1656 return 0;
1659 tmp = ldl_p(mem_buf);
1661 if (n < 32) {
1662 env->regs[n] = tmp;
1663 } else {
1664 switch (n) {
1665 case 32:
1666 env->pc = tmp;
1667 break;
1668 case 34:
1669 env->eba = tmp;
1670 break;
1671 case 35:
1672 env->deba = tmp;
1673 break;
1674 case 36:
1675 env->ie = tmp;
1676 break;
1677 case 37:
1678 lm32_pic_set_im(env->pic_state, tmp);
1679 break;
1680 case 38:
1681 lm32_pic_set_ip(env->pic_state, tmp);
1682 break;
1685 return 4;
1687 #elif defined(TARGET_XTENSA)
1689 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1690 * Use num_regs to see all registers. gdb modification is required for that:
1691 * reset bit 0 in the 'flags' field of the registers definitions in the
1692 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1694 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1695 #define num_g_regs NUM_CORE_REGS
1697 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1699 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1701 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1702 return 0;
1705 switch (reg->type) {
1706 case 9: /*pc*/
1707 GET_REG32(env->pc);
1708 break;
1710 case 1: /*ar*/
1711 xtensa_sync_phys_from_window(env);
1712 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1713 break;
1715 case 2: /*SR*/
1716 GET_REG32(env->sregs[reg->targno & 0xff]);
1717 break;
1719 case 3: /*UR*/
1720 GET_REG32(env->uregs[reg->targno & 0xff]);
1721 break;
1723 case 4: /*f*/
1724 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1725 break;
1727 case 8: /*a*/
1728 GET_REG32(env->regs[reg->targno & 0x0f]);
1729 break;
1731 default:
1732 qemu_log("%s from reg %d of unsupported type %d\n",
1733 __func__, n, reg->type);
1734 return 0;
1738 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1740 uint32_t tmp;
1741 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1743 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1744 return 0;
1747 tmp = ldl_p(mem_buf);
1749 switch (reg->type) {
1750 case 9: /*pc*/
1751 env->pc = tmp;
1752 break;
1754 case 1: /*ar*/
1755 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1756 xtensa_sync_window_from_phys(env);
1757 break;
1759 case 2: /*SR*/
1760 env->sregs[reg->targno & 0xff] = tmp;
1761 break;
1763 case 3: /*UR*/
1764 env->uregs[reg->targno & 0xff] = tmp;
1765 break;
1767 case 4: /*f*/
1768 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1769 break;
1771 case 8: /*a*/
1772 env->regs[reg->targno & 0x0f] = tmp;
1773 break;
1775 default:
1776 qemu_log("%s to reg %d of unsupported type %d\n",
1777 __func__, n, reg->type);
1778 return 0;
1781 return 4;
1783 #else
1785 #define NUM_CORE_REGS 0
1787 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1789 return 0;
1792 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1794 return 0;
1797 #endif
1799 #if !defined(TARGET_XTENSA)
1800 static int num_g_regs = NUM_CORE_REGS;
1801 #endif
1803 #ifdef GDB_CORE_XML
1804 /* Encode data using the encoding for 'x' packets. */
1805 static int memtox(char *buf, const char *mem, int len)
1807 char *p = buf;
1808 char c;
1810 while (len--) {
1811 c = *(mem++);
1812 switch (c) {
1813 case '#': case '$': case '*': case '}':
1814 *(p++) = '}';
1815 *(p++) = c ^ 0x20;
1816 break;
1817 default:
1818 *(p++) = c;
1819 break;
1822 return p - buf;
1825 static const char *get_feature_xml(const char *p, const char **newp)
1827 size_t len;
1828 int i;
1829 const char *name;
1830 static char target_xml[1024];
1832 len = 0;
1833 while (p[len] && p[len] != ':')
1834 len++;
1835 *newp = p + len;
1837 name = NULL;
1838 if (strncmp(p, "target.xml", len) == 0) {
1839 /* Generate the XML description for this CPU. */
1840 if (!target_xml[0]) {
1841 GDBRegisterState *r;
1842 CPUArchState *env = first_cpu->env_ptr;
1844 snprintf(target_xml, sizeof(target_xml),
1845 "<?xml version=\"1.0\"?>"
1846 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1847 "<target>"
1848 "<xi:include href=\"%s\"/>",
1849 GDB_CORE_XML);
1851 for (r = env->gdb_regs; r; r = r->next) {
1852 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1853 pstrcat(target_xml, sizeof(target_xml), r->xml);
1854 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1856 pstrcat(target_xml, sizeof(target_xml), "</target>");
1858 return target_xml;
1860 for (i = 0; ; i++) {
1861 name = xml_builtin[i][0];
1862 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1863 break;
1865 return name ? xml_builtin[i][1] : NULL;
1867 #endif
1869 static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1871 GDBRegisterState *r;
1873 if (reg < NUM_CORE_REGS)
1874 return cpu_gdb_read_register(env, mem_buf, reg);
1876 for (r = env->gdb_regs; r; r = r->next) {
1877 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1878 return r->get_reg(env, mem_buf, reg - r->base_reg);
1881 return 0;
1884 static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1886 GDBRegisterState *r;
1888 if (reg < NUM_CORE_REGS)
1889 return cpu_gdb_write_register(env, mem_buf, reg);
1891 for (r = env->gdb_regs; r; r = r->next) {
1892 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1893 return r->set_reg(env, mem_buf, reg - r->base_reg);
1896 return 0;
1899 #if !defined(TARGET_XTENSA)
1900 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1901 specifies the first register number and these registers are included in
1902 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1903 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1906 void gdb_register_coprocessor(CPUArchState * env,
1907 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1908 int num_regs, const char *xml, int g_pos)
1910 GDBRegisterState *s;
1911 GDBRegisterState **p;
1912 static int last_reg = NUM_CORE_REGS;
1914 p = &env->gdb_regs;
1915 while (*p) {
1916 /* Check for duplicates. */
1917 if (strcmp((*p)->xml, xml) == 0)
1918 return;
1919 p = &(*p)->next;
1922 s = g_new0(GDBRegisterState, 1);
1923 s->base_reg = last_reg;
1924 s->num_regs = num_regs;
1925 s->get_reg = get_reg;
1926 s->set_reg = set_reg;
1927 s->xml = xml;
1929 /* Add to end of list. */
1930 last_reg += num_regs;
1931 *p = s;
1932 if (g_pos) {
1933 if (g_pos != s->base_reg) {
1934 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1935 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1936 } else {
1937 num_g_regs = last_reg;
1941 #endif
1943 #ifndef CONFIG_USER_ONLY
1944 static const int xlat_gdb_type[] = {
1945 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1946 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1947 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1949 #endif
1951 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1953 CPUState *cpu;
1954 CPUArchState *env;
1955 int err = 0;
1957 if (kvm_enabled())
1958 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1960 switch (type) {
1961 case GDB_BREAKPOINT_SW:
1962 case GDB_BREAKPOINT_HW:
1963 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1964 env = cpu->env_ptr;
1965 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1966 if (err)
1967 break;
1969 return err;
1970 #ifndef CONFIG_USER_ONLY
1971 case GDB_WATCHPOINT_WRITE:
1972 case GDB_WATCHPOINT_READ:
1973 case GDB_WATCHPOINT_ACCESS:
1974 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1975 env = cpu->env_ptr;
1976 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1977 NULL);
1978 if (err)
1979 break;
1981 return err;
1982 #endif
1983 default:
1984 return -ENOSYS;
1988 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1990 CPUState *cpu;
1991 CPUArchState *env;
1992 int err = 0;
1994 if (kvm_enabled())
1995 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1997 switch (type) {
1998 case GDB_BREAKPOINT_SW:
1999 case GDB_BREAKPOINT_HW:
2000 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2001 env = cpu->env_ptr;
2002 err = cpu_breakpoint_remove(env, addr, BP_GDB);
2003 if (err)
2004 break;
2006 return err;
2007 #ifndef CONFIG_USER_ONLY
2008 case GDB_WATCHPOINT_WRITE:
2009 case GDB_WATCHPOINT_READ:
2010 case GDB_WATCHPOINT_ACCESS:
2011 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2012 env = cpu->env_ptr;
2013 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2014 if (err)
2015 break;
2017 return err;
2018 #endif
2019 default:
2020 return -ENOSYS;
2024 static void gdb_breakpoint_remove_all(void)
2026 CPUState *cpu;
2027 CPUArchState *env;
2029 if (kvm_enabled()) {
2030 kvm_remove_all_breakpoints(ENV_GET_CPU(gdbserver_state->c_cpu));
2031 return;
2034 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2035 env = cpu->env_ptr;
2036 cpu_breakpoint_remove_all(env, BP_GDB);
2037 #ifndef CONFIG_USER_ONLY
2038 cpu_watchpoint_remove_all(env, BP_GDB);
2039 #endif
2043 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2045 cpu_synchronize_state(ENV_GET_CPU(s->c_cpu));
2046 #if defined(TARGET_I386)
2047 s->c_cpu->eip = pc;
2048 #elif defined (TARGET_PPC)
2049 s->c_cpu->nip = pc;
2050 #elif defined (TARGET_SPARC)
2051 s->c_cpu->pc = pc;
2052 s->c_cpu->npc = pc + 4;
2053 #elif defined (TARGET_ARM)
2054 s->c_cpu->regs[15] = pc;
2055 #elif defined (TARGET_SH4)
2056 s->c_cpu->pc = pc;
2057 #elif defined (TARGET_MIPS)
2058 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
2059 if (pc & 1) {
2060 s->c_cpu->hflags |= MIPS_HFLAG_M16;
2061 } else {
2062 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
2064 #elif defined (TARGET_MICROBLAZE)
2065 s->c_cpu->sregs[SR_PC] = pc;
2066 #elif defined(TARGET_OPENRISC)
2067 s->c_cpu->pc = pc;
2068 #elif defined (TARGET_CRIS)
2069 s->c_cpu->pc = pc;
2070 #elif defined (TARGET_ALPHA)
2071 s->c_cpu->pc = pc;
2072 #elif defined (TARGET_S390X)
2073 s->c_cpu->psw.addr = pc;
2074 #elif defined (TARGET_LM32)
2075 s->c_cpu->pc = pc;
2076 #elif defined(TARGET_XTENSA)
2077 s->c_cpu->pc = pc;
2078 #endif
2081 static CPUArchState *find_cpu(uint32_t thread_id)
2083 CPUState *cpu;
2085 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2086 if (cpu_index(cpu) == thread_id) {
2087 return cpu->env_ptr;
2091 return NULL;
2094 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2096 CPUArchState *env;
2097 const char *p;
2098 uint32_t thread;
2099 int ch, reg_size, type, res;
2100 char buf[MAX_PACKET_LENGTH];
2101 uint8_t mem_buf[MAX_PACKET_LENGTH];
2102 uint8_t *registers;
2103 target_ulong addr, len;
2105 #ifdef DEBUG_GDB
2106 printf("command='%s'\n", line_buf);
2107 #endif
2108 p = line_buf;
2109 ch = *p++;
2110 switch(ch) {
2111 case '?':
2112 /* TODO: Make this return the correct value for user-mode. */
2113 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2114 cpu_index(ENV_GET_CPU(s->c_cpu)));
2115 put_packet(s, buf);
2116 /* Remove all the breakpoints when this query is issued,
2117 * because gdb is doing and initial connect and the state
2118 * should be cleaned up.
2120 gdb_breakpoint_remove_all();
2121 break;
2122 case 'c':
2123 if (*p != '\0') {
2124 addr = strtoull(p, (char **)&p, 16);
2125 gdb_set_cpu_pc(s, addr);
2127 s->signal = 0;
2128 gdb_continue(s);
2129 return RS_IDLE;
2130 case 'C':
2131 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2132 if (s->signal == -1)
2133 s->signal = 0;
2134 gdb_continue(s);
2135 return RS_IDLE;
2136 case 'v':
2137 if (strncmp(p, "Cont", 4) == 0) {
2138 int res_signal, res_thread;
2140 p += 4;
2141 if (*p == '?') {
2142 put_packet(s, "vCont;c;C;s;S");
2143 break;
2145 res = 0;
2146 res_signal = 0;
2147 res_thread = 0;
2148 while (*p) {
2149 int action, signal;
2151 if (*p++ != ';') {
2152 res = 0;
2153 break;
2155 action = *p++;
2156 signal = 0;
2157 if (action == 'C' || action == 'S') {
2158 signal = strtoul(p, (char **)&p, 16);
2159 } else if (action != 'c' && action != 's') {
2160 res = 0;
2161 break;
2163 thread = 0;
2164 if (*p == ':') {
2165 thread = strtoull(p+1, (char **)&p, 16);
2167 action = tolower(action);
2168 if (res == 0 || (res == 'c' && action == 's')) {
2169 res = action;
2170 res_signal = signal;
2171 res_thread = thread;
2174 if (res) {
2175 if (res_thread != -1 && res_thread != 0) {
2176 env = find_cpu(res_thread);
2177 if (env == NULL) {
2178 put_packet(s, "E22");
2179 break;
2181 s->c_cpu = env;
2183 if (res == 's') {
2184 cpu_single_step(s->c_cpu, sstep_flags);
2186 s->signal = res_signal;
2187 gdb_continue(s);
2188 return RS_IDLE;
2190 break;
2191 } else {
2192 goto unknown_command;
2194 case 'k':
2195 #ifdef CONFIG_USER_ONLY
2196 /* Kill the target */
2197 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2198 exit(0);
2199 #endif
2200 case 'D':
2201 /* Detach packet */
2202 gdb_breakpoint_remove_all();
2203 gdb_syscall_mode = GDB_SYS_DISABLED;
2204 gdb_continue(s);
2205 put_packet(s, "OK");
2206 break;
2207 case 's':
2208 if (*p != '\0') {
2209 addr = strtoull(p, (char **)&p, 16);
2210 gdb_set_cpu_pc(s, addr);
2212 cpu_single_step(s->c_cpu, sstep_flags);
2213 gdb_continue(s);
2214 return RS_IDLE;
2215 case 'F':
2217 target_ulong ret;
2218 target_ulong err;
2220 ret = strtoull(p, (char **)&p, 16);
2221 if (*p == ',') {
2222 p++;
2223 err = strtoull(p, (char **)&p, 16);
2224 } else {
2225 err = 0;
2227 if (*p == ',')
2228 p++;
2229 type = *p;
2230 if (s->current_syscall_cb) {
2231 s->current_syscall_cb(s->c_cpu, ret, err);
2232 s->current_syscall_cb = NULL;
2234 if (type == 'C') {
2235 put_packet(s, "T02");
2236 } else {
2237 gdb_continue(s);
2240 break;
2241 case 'g':
2242 cpu_synchronize_state(ENV_GET_CPU(s->g_cpu));
2243 env = s->g_cpu;
2244 len = 0;
2245 for (addr = 0; addr < num_g_regs; addr++) {
2246 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2247 len += reg_size;
2249 memtohex(buf, mem_buf, len);
2250 put_packet(s, buf);
2251 break;
2252 case 'G':
2253 cpu_synchronize_state(ENV_GET_CPU(s->g_cpu));
2254 env = s->g_cpu;
2255 registers = mem_buf;
2256 len = strlen(p) / 2;
2257 hextomem((uint8_t *)registers, p, len);
2258 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2259 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2260 len -= reg_size;
2261 registers += reg_size;
2263 put_packet(s, "OK");
2264 break;
2265 case 'm':
2266 addr = strtoull(p, (char **)&p, 16);
2267 if (*p == ',')
2268 p++;
2269 len = strtoull(p, NULL, 16);
2270 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2271 put_packet (s, "E14");
2272 } else {
2273 memtohex(buf, mem_buf, len);
2274 put_packet(s, buf);
2276 break;
2277 case 'M':
2278 addr = strtoull(p, (char **)&p, 16);
2279 if (*p == ',')
2280 p++;
2281 len = strtoull(p, (char **)&p, 16);
2282 if (*p == ':')
2283 p++;
2284 hextomem(mem_buf, p, len);
2285 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) {
2286 put_packet(s, "E14");
2287 } else {
2288 put_packet(s, "OK");
2290 break;
2291 case 'p':
2292 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2293 This works, but can be very slow. Anything new enough to
2294 understand XML also knows how to use this properly. */
2295 if (!gdb_has_xml)
2296 goto unknown_command;
2297 addr = strtoull(p, (char **)&p, 16);
2298 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2299 if (reg_size) {
2300 memtohex(buf, mem_buf, reg_size);
2301 put_packet(s, buf);
2302 } else {
2303 put_packet(s, "E14");
2305 break;
2306 case 'P':
2307 if (!gdb_has_xml)
2308 goto unknown_command;
2309 addr = strtoull(p, (char **)&p, 16);
2310 if (*p == '=')
2311 p++;
2312 reg_size = strlen(p) / 2;
2313 hextomem(mem_buf, p, reg_size);
2314 gdb_write_register(s->g_cpu, mem_buf, addr);
2315 put_packet(s, "OK");
2316 break;
2317 case 'Z':
2318 case 'z':
2319 type = strtoul(p, (char **)&p, 16);
2320 if (*p == ',')
2321 p++;
2322 addr = strtoull(p, (char **)&p, 16);
2323 if (*p == ',')
2324 p++;
2325 len = strtoull(p, (char **)&p, 16);
2326 if (ch == 'Z')
2327 res = gdb_breakpoint_insert(addr, len, type);
2328 else
2329 res = gdb_breakpoint_remove(addr, len, type);
2330 if (res >= 0)
2331 put_packet(s, "OK");
2332 else if (res == -ENOSYS)
2333 put_packet(s, "");
2334 else
2335 put_packet(s, "E22");
2336 break;
2337 case 'H':
2338 type = *p++;
2339 thread = strtoull(p, (char **)&p, 16);
2340 if (thread == -1 || thread == 0) {
2341 put_packet(s, "OK");
2342 break;
2344 env = find_cpu(thread);
2345 if (env == NULL) {
2346 put_packet(s, "E22");
2347 break;
2349 switch (type) {
2350 case 'c':
2351 s->c_cpu = env;
2352 put_packet(s, "OK");
2353 break;
2354 case 'g':
2355 s->g_cpu = env;
2356 put_packet(s, "OK");
2357 break;
2358 default:
2359 put_packet(s, "E22");
2360 break;
2362 break;
2363 case 'T':
2364 thread = strtoull(p, (char **)&p, 16);
2365 env = find_cpu(thread);
2367 if (env != NULL) {
2368 put_packet(s, "OK");
2369 } else {
2370 put_packet(s, "E22");
2372 break;
2373 case 'q':
2374 case 'Q':
2375 /* parse any 'q' packets here */
2376 if (!strcmp(p,"qemu.sstepbits")) {
2377 /* Query Breakpoint bit definitions */
2378 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2379 SSTEP_ENABLE,
2380 SSTEP_NOIRQ,
2381 SSTEP_NOTIMER);
2382 put_packet(s, buf);
2383 break;
2384 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2385 /* Display or change the sstep_flags */
2386 p += 10;
2387 if (*p != '=') {
2388 /* Display current setting */
2389 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2390 put_packet(s, buf);
2391 break;
2393 p++;
2394 type = strtoul(p, (char **)&p, 16);
2395 sstep_flags = type;
2396 put_packet(s, "OK");
2397 break;
2398 } else if (strcmp(p,"C") == 0) {
2399 /* "Current thread" remains vague in the spec, so always return
2400 * the first CPU (gdb returns the first thread). */
2401 put_packet(s, "QC1");
2402 break;
2403 } else if (strcmp(p,"fThreadInfo") == 0) {
2404 s->query_cpu = first_cpu->env_ptr;
2405 goto report_cpuinfo;
2406 } else if (strcmp(p,"sThreadInfo") == 0) {
2407 report_cpuinfo:
2408 if (s->query_cpu) {
2409 snprintf(buf, sizeof(buf), "m%x",
2410 cpu_index(ENV_GET_CPU(s->query_cpu)));
2411 put_packet(s, buf);
2412 s->query_cpu = ENV_GET_CPU(s->query_cpu)->next_cpu->env_ptr;
2413 } else
2414 put_packet(s, "l");
2415 break;
2416 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2417 thread = strtoull(p+16, (char **)&p, 16);
2418 env = find_cpu(thread);
2419 if (env != NULL) {
2420 CPUState *cpu = ENV_GET_CPU(env);
2421 cpu_synchronize_state(cpu);
2422 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2423 "CPU#%d [%s]", cpu->cpu_index,
2424 cpu->halted ? "halted " : "running");
2425 memtohex(buf, mem_buf, len);
2426 put_packet(s, buf);
2428 break;
2430 #ifdef CONFIG_USER_ONLY
2431 else if (strncmp(p, "Offsets", 7) == 0) {
2432 TaskState *ts = s->c_cpu->opaque;
2434 snprintf(buf, sizeof(buf),
2435 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2436 ";Bss=" TARGET_ABI_FMT_lx,
2437 ts->info->code_offset,
2438 ts->info->data_offset,
2439 ts->info->data_offset);
2440 put_packet(s, buf);
2441 break;
2443 #else /* !CONFIG_USER_ONLY */
2444 else if (strncmp(p, "Rcmd,", 5) == 0) {
2445 int len = strlen(p + 5);
2447 if ((len % 2) != 0) {
2448 put_packet(s, "E01");
2449 break;
2451 hextomem(mem_buf, p + 5, len);
2452 len = len / 2;
2453 mem_buf[len++] = 0;
2454 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2455 put_packet(s, "OK");
2456 break;
2458 #endif /* !CONFIG_USER_ONLY */
2459 if (strncmp(p, "Supported", 9) == 0) {
2460 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2461 #ifdef GDB_CORE_XML
2462 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2463 #endif
2464 put_packet(s, buf);
2465 break;
2467 #ifdef GDB_CORE_XML
2468 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2469 const char *xml;
2470 target_ulong total_len;
2472 gdb_has_xml = 1;
2473 p += 19;
2474 xml = get_feature_xml(p, &p);
2475 if (!xml) {
2476 snprintf(buf, sizeof(buf), "E00");
2477 put_packet(s, buf);
2478 break;
2481 if (*p == ':')
2482 p++;
2483 addr = strtoul(p, (char **)&p, 16);
2484 if (*p == ',')
2485 p++;
2486 len = strtoul(p, (char **)&p, 16);
2488 total_len = strlen(xml);
2489 if (addr > total_len) {
2490 snprintf(buf, sizeof(buf), "E00");
2491 put_packet(s, buf);
2492 break;
2494 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2495 len = (MAX_PACKET_LENGTH - 5) / 2;
2496 if (len < total_len - addr) {
2497 buf[0] = 'm';
2498 len = memtox(buf + 1, xml + addr, len);
2499 } else {
2500 buf[0] = 'l';
2501 len = memtox(buf + 1, xml + addr, total_len - addr);
2503 put_packet_binary(s, buf, len + 1);
2504 break;
2506 #endif
2507 /* Unrecognised 'q' command. */
2508 goto unknown_command;
2510 default:
2511 unknown_command:
2512 /* put empty packet */
2513 buf[0] = '\0';
2514 put_packet(s, buf);
2515 break;
2517 return RS_IDLE;
2520 void gdb_set_stop_cpu(CPUState *cpu)
2522 CPUArchState *env = cpu->env_ptr;
2524 gdbserver_state->c_cpu = env;
2525 gdbserver_state->g_cpu = env;
2528 #ifndef CONFIG_USER_ONLY
2529 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2531 GDBState *s = gdbserver_state;
2532 CPUArchState *env = s->c_cpu;
2533 CPUState *cpu = ENV_GET_CPU(env);
2534 char buf[256];
2535 const char *type;
2536 int ret;
2538 if (running || s->state == RS_INACTIVE) {
2539 return;
2541 /* Is there a GDB syscall waiting to be sent? */
2542 if (s->current_syscall_cb) {
2543 put_packet(s, s->syscall_buf);
2544 return;
2546 switch (state) {
2547 case RUN_STATE_DEBUG:
2548 if (env->watchpoint_hit) {
2549 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2550 case BP_MEM_READ:
2551 type = "r";
2552 break;
2553 case BP_MEM_ACCESS:
2554 type = "a";
2555 break;
2556 default:
2557 type = "";
2558 break;
2560 snprintf(buf, sizeof(buf),
2561 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2562 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2563 env->watchpoint_hit->vaddr);
2564 env->watchpoint_hit = NULL;
2565 goto send_packet;
2567 tb_flush(env);
2568 ret = GDB_SIGNAL_TRAP;
2569 break;
2570 case RUN_STATE_PAUSED:
2571 ret = GDB_SIGNAL_INT;
2572 break;
2573 case RUN_STATE_SHUTDOWN:
2574 ret = GDB_SIGNAL_QUIT;
2575 break;
2576 case RUN_STATE_IO_ERROR:
2577 ret = GDB_SIGNAL_IO;
2578 break;
2579 case RUN_STATE_WATCHDOG:
2580 ret = GDB_SIGNAL_ALRM;
2581 break;
2582 case RUN_STATE_INTERNAL_ERROR:
2583 ret = GDB_SIGNAL_ABRT;
2584 break;
2585 case RUN_STATE_SAVE_VM:
2586 case RUN_STATE_RESTORE_VM:
2587 return;
2588 case RUN_STATE_FINISH_MIGRATE:
2589 ret = GDB_SIGNAL_XCPU;
2590 break;
2591 default:
2592 ret = GDB_SIGNAL_UNKNOWN;
2593 break;
2595 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2597 send_packet:
2598 put_packet(s, buf);
2600 /* disable single step if it was enabled */
2601 cpu_single_step(env, 0);
2603 #endif
2605 /* Send a gdb syscall request.
2606 This accepts limited printf-style format specifiers, specifically:
2607 %x - target_ulong argument printed in hex.
2608 %lx - 64-bit argument printed in hex.
2609 %s - string pointer (target_ulong) and length (int) pair. */
2610 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2612 va_list va;
2613 char *p;
2614 char *p_end;
2615 target_ulong addr;
2616 uint64_t i64;
2617 GDBState *s;
2619 s = gdbserver_state;
2620 if (!s)
2621 return;
2622 s->current_syscall_cb = cb;
2623 #ifndef CONFIG_USER_ONLY
2624 vm_stop(RUN_STATE_DEBUG);
2625 #endif
2626 va_start(va, fmt);
2627 p = s->syscall_buf;
2628 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2629 *(p++) = 'F';
2630 while (*fmt) {
2631 if (*fmt == '%') {
2632 fmt++;
2633 switch (*fmt++) {
2634 case 'x':
2635 addr = va_arg(va, target_ulong);
2636 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2637 break;
2638 case 'l':
2639 if (*(fmt++) != 'x')
2640 goto bad_format;
2641 i64 = va_arg(va, uint64_t);
2642 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2643 break;
2644 case 's':
2645 addr = va_arg(va, target_ulong);
2646 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2647 addr, va_arg(va, int));
2648 break;
2649 default:
2650 bad_format:
2651 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2652 fmt - 1);
2653 break;
2655 } else {
2656 *(p++) = *(fmt++);
2659 *p = 0;
2660 va_end(va);
2661 #ifdef CONFIG_USER_ONLY
2662 put_packet(s, s->syscall_buf);
2663 gdb_handlesig(s->c_cpu, 0);
2664 #else
2665 /* In this case wait to send the syscall packet until notification that
2666 the CPU has stopped. This must be done because if the packet is sent
2667 now the reply from the syscall request could be received while the CPU
2668 is still in the running state, which can cause packets to be dropped
2669 and state transition 'T' packets to be sent while the syscall is still
2670 being processed. */
2671 cpu_exit(ENV_GET_CPU(s->c_cpu));
2672 #endif
2675 static void gdb_read_byte(GDBState *s, int ch)
2677 int i, csum;
2678 uint8_t reply;
2680 #ifndef CONFIG_USER_ONLY
2681 if (s->last_packet_len) {
2682 /* Waiting for a response to the last packet. If we see the start
2683 of a new command then abandon the previous response. */
2684 if (ch == '-') {
2685 #ifdef DEBUG_GDB
2686 printf("Got NACK, retransmitting\n");
2687 #endif
2688 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2690 #ifdef DEBUG_GDB
2691 else if (ch == '+')
2692 printf("Got ACK\n");
2693 else
2694 printf("Got '%c' when expecting ACK/NACK\n", ch);
2695 #endif
2696 if (ch == '+' || ch == '$')
2697 s->last_packet_len = 0;
2698 if (ch != '$')
2699 return;
2701 if (runstate_is_running()) {
2702 /* when the CPU is running, we cannot do anything except stop
2703 it when receiving a char */
2704 vm_stop(RUN_STATE_PAUSED);
2705 } else
2706 #endif
2708 switch(s->state) {
2709 case RS_IDLE:
2710 if (ch == '$') {
2711 s->line_buf_index = 0;
2712 s->state = RS_GETLINE;
2714 break;
2715 case RS_GETLINE:
2716 if (ch == '#') {
2717 s->state = RS_CHKSUM1;
2718 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2719 s->state = RS_IDLE;
2720 } else {
2721 s->line_buf[s->line_buf_index++] = ch;
2723 break;
2724 case RS_CHKSUM1:
2725 s->line_buf[s->line_buf_index] = '\0';
2726 s->line_csum = fromhex(ch) << 4;
2727 s->state = RS_CHKSUM2;
2728 break;
2729 case RS_CHKSUM2:
2730 s->line_csum |= fromhex(ch);
2731 csum = 0;
2732 for(i = 0; i < s->line_buf_index; i++) {
2733 csum += s->line_buf[i];
2735 if (s->line_csum != (csum & 0xff)) {
2736 reply = '-';
2737 put_buffer(s, &reply, 1);
2738 s->state = RS_IDLE;
2739 } else {
2740 reply = '+';
2741 put_buffer(s, &reply, 1);
2742 s->state = gdb_handle_packet(s, s->line_buf);
2744 break;
2745 default:
2746 abort();
2751 /* Tell the remote gdb that the process has exited. */
2752 void gdb_exit(CPUArchState *env, int code)
2754 GDBState *s;
2755 char buf[4];
2757 s = gdbserver_state;
2758 if (!s) {
2759 return;
2761 #ifdef CONFIG_USER_ONLY
2762 if (gdbserver_fd < 0 || s->fd < 0) {
2763 return;
2765 #endif
2767 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2768 put_packet(s, buf);
2770 #ifndef CONFIG_USER_ONLY
2771 if (s->chr) {
2772 qemu_chr_delete(s->chr);
2774 #endif
2777 #ifdef CONFIG_USER_ONLY
2779 gdb_queuesig (void)
2781 GDBState *s;
2783 s = gdbserver_state;
2785 if (gdbserver_fd < 0 || s->fd < 0)
2786 return 0;
2787 else
2788 return 1;
2792 gdb_handlesig (CPUArchState *env, int sig)
2794 GDBState *s;
2795 char buf[256];
2796 int n;
2798 s = gdbserver_state;
2799 if (gdbserver_fd < 0 || s->fd < 0)
2800 return sig;
2802 /* disable single step if it was enabled */
2803 cpu_single_step(env, 0);
2804 tb_flush(env);
2806 if (sig != 0)
2808 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2809 put_packet(s, buf);
2811 /* put_packet() might have detected that the peer terminated the
2812 connection. */
2813 if (s->fd < 0)
2814 return sig;
2816 sig = 0;
2817 s->state = RS_IDLE;
2818 s->running_state = 0;
2819 while (s->running_state == 0) {
2820 n = read (s->fd, buf, 256);
2821 if (n > 0)
2823 int i;
2825 for (i = 0; i < n; i++)
2826 gdb_read_byte (s, buf[i]);
2828 else if (n == 0 || errno != EAGAIN)
2830 /* XXX: Connection closed. Should probably wait for another
2831 connection before continuing. */
2832 return sig;
2835 sig = s->signal;
2836 s->signal = 0;
2837 return sig;
2840 /* Tell the remote gdb that the process has exited due to SIG. */
2841 void gdb_signalled(CPUArchState *env, int sig)
2843 GDBState *s;
2844 char buf[4];
2846 s = gdbserver_state;
2847 if (gdbserver_fd < 0 || s->fd < 0)
2848 return;
2850 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2851 put_packet(s, buf);
2854 static void gdb_accept(void)
2856 GDBState *s;
2857 struct sockaddr_in sockaddr;
2858 socklen_t len;
2859 int fd;
2861 for(;;) {
2862 len = sizeof(sockaddr);
2863 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2864 if (fd < 0 && errno != EINTR) {
2865 perror("accept");
2866 return;
2867 } else if (fd >= 0) {
2868 #ifndef _WIN32
2869 fcntl(fd, F_SETFD, FD_CLOEXEC);
2870 #endif
2871 break;
2875 /* set short latency */
2876 socket_set_nodelay(fd);
2878 s = g_malloc0(sizeof(GDBState));
2879 s->c_cpu = first_cpu->env_ptr;
2880 s->g_cpu = first_cpu->env_ptr;
2881 s->fd = fd;
2882 gdb_has_xml = 0;
2884 gdbserver_state = s;
2886 fcntl(fd, F_SETFL, O_NONBLOCK);
2889 static int gdbserver_open(int port)
2891 struct sockaddr_in sockaddr;
2892 int fd, val, ret;
2894 fd = socket(PF_INET, SOCK_STREAM, 0);
2895 if (fd < 0) {
2896 perror("socket");
2897 return -1;
2899 #ifndef _WIN32
2900 fcntl(fd, F_SETFD, FD_CLOEXEC);
2901 #endif
2903 /* allow fast reuse */
2904 val = 1;
2905 qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
2907 sockaddr.sin_family = AF_INET;
2908 sockaddr.sin_port = htons(port);
2909 sockaddr.sin_addr.s_addr = 0;
2910 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2911 if (ret < 0) {
2912 perror("bind");
2913 close(fd);
2914 return -1;
2916 ret = listen(fd, 0);
2917 if (ret < 0) {
2918 perror("listen");
2919 close(fd);
2920 return -1;
2922 return fd;
2925 int gdbserver_start(int port)
2927 gdbserver_fd = gdbserver_open(port);
2928 if (gdbserver_fd < 0)
2929 return -1;
2930 /* accept connections */
2931 gdb_accept();
2932 return 0;
2935 /* Disable gdb stub for child processes. */
2936 void gdbserver_fork(CPUArchState *env)
2938 GDBState *s = gdbserver_state;
2939 if (gdbserver_fd < 0 || s->fd < 0)
2940 return;
2941 close(s->fd);
2942 s->fd = -1;
2943 cpu_breakpoint_remove_all(env, BP_GDB);
2944 cpu_watchpoint_remove_all(env, BP_GDB);
2946 #else
2947 static int gdb_chr_can_receive(void *opaque)
2949 /* We can handle an arbitrarily large amount of data.
2950 Pick the maximum packet size, which is as good as anything. */
2951 return MAX_PACKET_LENGTH;
2954 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2956 int i;
2958 for (i = 0; i < size; i++) {
2959 gdb_read_byte(gdbserver_state, buf[i]);
2963 static void gdb_chr_event(void *opaque, int event)
2965 switch (event) {
2966 case CHR_EVENT_OPENED:
2967 vm_stop(RUN_STATE_PAUSED);
2968 gdb_has_xml = 0;
2969 break;
2970 default:
2971 break;
2975 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2977 char buf[MAX_PACKET_LENGTH];
2979 buf[0] = 'O';
2980 if (len > (MAX_PACKET_LENGTH/2) - 1)
2981 len = (MAX_PACKET_LENGTH/2) - 1;
2982 memtohex(buf + 1, (uint8_t *)msg, len);
2983 put_packet(s, buf);
2986 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2988 const char *p = (const char *)buf;
2989 int max_sz;
2991 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2992 for (;;) {
2993 if (len <= max_sz) {
2994 gdb_monitor_output(gdbserver_state, p, len);
2995 break;
2997 gdb_monitor_output(gdbserver_state, p, max_sz);
2998 p += max_sz;
2999 len -= max_sz;
3001 return len;
3004 #ifndef _WIN32
3005 static void gdb_sigterm_handler(int signal)
3007 if (runstate_is_running()) {
3008 vm_stop(RUN_STATE_PAUSED);
3011 #endif
3013 int gdbserver_start(const char *device)
3015 GDBState *s;
3016 char gdbstub_device_name[128];
3017 CharDriverState *chr = NULL;
3018 CharDriverState *mon_chr;
3020 if (!device)
3021 return -1;
3022 if (strcmp(device, "none") != 0) {
3023 if (strstart(device, "tcp:", NULL)) {
3024 /* enforce required TCP attributes */
3025 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3026 "%s,nowait,nodelay,server", device);
3027 device = gdbstub_device_name;
3029 #ifndef _WIN32
3030 else if (strcmp(device, "stdio") == 0) {
3031 struct sigaction act;
3033 memset(&act, 0, sizeof(act));
3034 act.sa_handler = gdb_sigterm_handler;
3035 sigaction(SIGINT, &act, NULL);
3037 #endif
3038 chr = qemu_chr_new("gdb", device, NULL);
3039 if (!chr)
3040 return -1;
3042 qemu_chr_fe_claim_no_fail(chr);
3043 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3044 gdb_chr_event, NULL);
3047 s = gdbserver_state;
3048 if (!s) {
3049 s = g_malloc0(sizeof(GDBState));
3050 gdbserver_state = s;
3052 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3054 /* Initialize a monitor terminal for gdb */
3055 mon_chr = g_malloc0(sizeof(*mon_chr));
3056 mon_chr->chr_write = gdb_monitor_write;
3057 monitor_init(mon_chr, 0);
3058 } else {
3059 if (s->chr)
3060 qemu_chr_delete(s->chr);
3061 mon_chr = s->mon_chr;
3062 memset(s, 0, sizeof(GDBState));
3064 s->c_cpu = first_cpu->env_ptr;
3065 s->g_cpu = first_cpu->env_ptr;
3066 s->chr = chr;
3067 s->state = chr ? RS_IDLE : RS_INACTIVE;
3068 s->mon_chr = mon_chr;
3069 s->current_syscall_cb = NULL;
3071 return 0;
3073 #endif