pci_host: Turn into SysBus-derived QOM type
[qemu-kvm.git] / gdbstub.c
blob5d37dd98f493f4fc8578661db09fff7530f2e2d0
1 /*
2 * gdb server stub
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
30 #include "qemu.h"
31 #else
32 #include "monitor.h"
33 #include "qemu-char.h"
34 #include "sysemu.h"
35 #include "gdbstub.h"
36 #endif
38 #define MAX_PACKET_LENGTH 4096
40 #include "cpu.h"
41 #include "qemu_socket.h"
42 #include "kvm.h"
44 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
45 static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
46 uint8_t *buf, int len, int is_write)
48 return cpu_memory_rw_debug(env, addr, buf, len, is_write);
50 #else
51 /* target_memory_rw_debug() defined in cpu.h */
52 #endif
54 enum {
55 GDB_SIGNAL_0 = 0,
56 GDB_SIGNAL_INT = 2,
57 GDB_SIGNAL_QUIT = 3,
58 GDB_SIGNAL_TRAP = 5,
59 GDB_SIGNAL_ABRT = 6,
60 GDB_SIGNAL_ALRM = 14,
61 GDB_SIGNAL_IO = 23,
62 GDB_SIGNAL_XCPU = 24,
63 GDB_SIGNAL_UNKNOWN = 143
66 #ifdef CONFIG_USER_ONLY
68 /* Map target signal numbers to GDB protocol signal numbers and vice
69 * versa. For user emulation's currently supported systems, we can
70 * assume most signals are defined.
73 static int gdb_signal_table[] = {
75 TARGET_SIGHUP,
76 TARGET_SIGINT,
77 TARGET_SIGQUIT,
78 TARGET_SIGILL,
79 TARGET_SIGTRAP,
80 TARGET_SIGABRT,
81 -1, /* SIGEMT */
82 TARGET_SIGFPE,
83 TARGET_SIGKILL,
84 TARGET_SIGBUS,
85 TARGET_SIGSEGV,
86 TARGET_SIGSYS,
87 TARGET_SIGPIPE,
88 TARGET_SIGALRM,
89 TARGET_SIGTERM,
90 TARGET_SIGURG,
91 TARGET_SIGSTOP,
92 TARGET_SIGTSTP,
93 TARGET_SIGCONT,
94 TARGET_SIGCHLD,
95 TARGET_SIGTTIN,
96 TARGET_SIGTTOU,
97 TARGET_SIGIO,
98 TARGET_SIGXCPU,
99 TARGET_SIGXFSZ,
100 TARGET_SIGVTALRM,
101 TARGET_SIGPROF,
102 TARGET_SIGWINCH,
103 -1, /* SIGLOST */
104 TARGET_SIGUSR1,
105 TARGET_SIGUSR2,
106 #ifdef TARGET_SIGPWR
107 TARGET_SIGPWR,
108 #else
110 #endif
111 -1, /* SIGPOLL */
123 #ifdef __SIGRTMIN
124 __SIGRTMIN + 1,
125 __SIGRTMIN + 2,
126 __SIGRTMIN + 3,
127 __SIGRTMIN + 4,
128 __SIGRTMIN + 5,
129 __SIGRTMIN + 6,
130 __SIGRTMIN + 7,
131 __SIGRTMIN + 8,
132 __SIGRTMIN + 9,
133 __SIGRTMIN + 10,
134 __SIGRTMIN + 11,
135 __SIGRTMIN + 12,
136 __SIGRTMIN + 13,
137 __SIGRTMIN + 14,
138 __SIGRTMIN + 15,
139 __SIGRTMIN + 16,
140 __SIGRTMIN + 17,
141 __SIGRTMIN + 18,
142 __SIGRTMIN + 19,
143 __SIGRTMIN + 20,
144 __SIGRTMIN + 21,
145 __SIGRTMIN + 22,
146 __SIGRTMIN + 23,
147 __SIGRTMIN + 24,
148 __SIGRTMIN + 25,
149 __SIGRTMIN + 26,
150 __SIGRTMIN + 27,
151 __SIGRTMIN + 28,
152 __SIGRTMIN + 29,
153 __SIGRTMIN + 30,
154 __SIGRTMIN + 31,
155 -1, /* SIGCANCEL */
156 __SIGRTMIN,
157 __SIGRTMIN + 32,
158 __SIGRTMIN + 33,
159 __SIGRTMIN + 34,
160 __SIGRTMIN + 35,
161 __SIGRTMIN + 36,
162 __SIGRTMIN + 37,
163 __SIGRTMIN + 38,
164 __SIGRTMIN + 39,
165 __SIGRTMIN + 40,
166 __SIGRTMIN + 41,
167 __SIGRTMIN + 42,
168 __SIGRTMIN + 43,
169 __SIGRTMIN + 44,
170 __SIGRTMIN + 45,
171 __SIGRTMIN + 46,
172 __SIGRTMIN + 47,
173 __SIGRTMIN + 48,
174 __SIGRTMIN + 49,
175 __SIGRTMIN + 50,
176 __SIGRTMIN + 51,
177 __SIGRTMIN + 52,
178 __SIGRTMIN + 53,
179 __SIGRTMIN + 54,
180 __SIGRTMIN + 55,
181 __SIGRTMIN + 56,
182 __SIGRTMIN + 57,
183 __SIGRTMIN + 58,
184 __SIGRTMIN + 59,
185 __SIGRTMIN + 60,
186 __SIGRTMIN + 61,
187 __SIGRTMIN + 62,
188 __SIGRTMIN + 63,
189 __SIGRTMIN + 64,
190 __SIGRTMIN + 65,
191 __SIGRTMIN + 66,
192 __SIGRTMIN + 67,
193 __SIGRTMIN + 68,
194 __SIGRTMIN + 69,
195 __SIGRTMIN + 70,
196 __SIGRTMIN + 71,
197 __SIGRTMIN + 72,
198 __SIGRTMIN + 73,
199 __SIGRTMIN + 74,
200 __SIGRTMIN + 75,
201 __SIGRTMIN + 76,
202 __SIGRTMIN + 77,
203 __SIGRTMIN + 78,
204 __SIGRTMIN + 79,
205 __SIGRTMIN + 80,
206 __SIGRTMIN + 81,
207 __SIGRTMIN + 82,
208 __SIGRTMIN + 83,
209 __SIGRTMIN + 84,
210 __SIGRTMIN + 85,
211 __SIGRTMIN + 86,
212 __SIGRTMIN + 87,
213 __SIGRTMIN + 88,
214 __SIGRTMIN + 89,
215 __SIGRTMIN + 90,
216 __SIGRTMIN + 91,
217 __SIGRTMIN + 92,
218 __SIGRTMIN + 93,
219 __SIGRTMIN + 94,
220 __SIGRTMIN + 95,
221 -1, /* SIGINFO */
222 -1, /* UNKNOWN */
223 -1, /* DEFAULT */
230 #endif
232 #else
233 /* In system mode we only need SIGINT and SIGTRAP; other signals
234 are not yet supported. */
236 enum {
237 TARGET_SIGINT = 2,
238 TARGET_SIGTRAP = 5
241 static int gdb_signal_table[] = {
244 TARGET_SIGINT,
247 TARGET_SIGTRAP
249 #endif
251 #ifdef CONFIG_USER_ONLY
252 static int target_signal_to_gdb (int sig)
254 int i;
255 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
256 if (gdb_signal_table[i] == sig)
257 return i;
258 return GDB_SIGNAL_UNKNOWN;
260 #endif
262 static int gdb_signal_to_target (int sig)
264 if (sig < ARRAY_SIZE (gdb_signal_table))
265 return gdb_signal_table[sig];
266 else
267 return -1;
270 //#define DEBUG_GDB
272 typedef struct GDBRegisterState {
273 int base_reg;
274 int num_regs;
275 gdb_reg_cb get_reg;
276 gdb_reg_cb set_reg;
277 const char *xml;
278 struct GDBRegisterState *next;
279 } GDBRegisterState;
281 enum RSState {
282 RS_INACTIVE,
283 RS_IDLE,
284 RS_GETLINE,
285 RS_CHKSUM1,
286 RS_CHKSUM2,
288 typedef struct GDBState {
289 CPUArchState *c_cpu; /* current CPU for step/continue ops */
290 CPUArchState *g_cpu; /* current CPU for other ops */
291 CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
292 enum RSState state; /* parsing state */
293 char line_buf[MAX_PACKET_LENGTH];
294 int line_buf_index;
295 int line_csum;
296 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
297 int last_packet_len;
298 int signal;
299 #ifdef CONFIG_USER_ONLY
300 int fd;
301 int running_state;
302 #else
303 CharDriverState *chr;
304 CharDriverState *mon_chr;
305 #endif
306 char syscall_buf[256];
307 gdb_syscall_complete_cb current_syscall_cb;
308 } GDBState;
310 /* By default use no IRQs and no timers while single stepping so as to
311 * make single stepping like an ICE HW step.
313 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
315 static GDBState *gdbserver_state;
317 /* This is an ugly hack to cope with both new and old gdb.
318 If gdb sends qXfer:features:read then assume we're talking to a newish
319 gdb that understands target descriptions. */
320 static int gdb_has_xml;
322 #ifdef CONFIG_USER_ONLY
323 /* XXX: This is not thread safe. Do we care? */
324 static int gdbserver_fd = -1;
326 static int get_char(GDBState *s)
328 uint8_t ch;
329 int ret;
331 for(;;) {
332 ret = qemu_recv(s->fd, &ch, 1, 0);
333 if (ret < 0) {
334 if (errno == ECONNRESET)
335 s->fd = -1;
336 if (errno != EINTR && errno != EAGAIN)
337 return -1;
338 } else if (ret == 0) {
339 close(s->fd);
340 s->fd = -1;
341 return -1;
342 } else {
343 break;
346 return ch;
348 #endif
350 static enum {
351 GDB_SYS_UNKNOWN,
352 GDB_SYS_ENABLED,
353 GDB_SYS_DISABLED,
354 } gdb_syscall_mode;
356 /* If gdb is connected when the first semihosting syscall occurs then use
357 remote gdb syscalls. Otherwise use native file IO. */
358 int use_gdb_syscalls(void)
360 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
361 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
362 : GDB_SYS_DISABLED);
364 return gdb_syscall_mode == GDB_SYS_ENABLED;
367 /* Resume execution. */
368 static inline void gdb_continue(GDBState *s)
370 #ifdef CONFIG_USER_ONLY
371 s->running_state = 1;
372 #else
373 vm_start();
374 #endif
377 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
379 #ifdef CONFIG_USER_ONLY
380 int ret;
382 while (len > 0) {
383 ret = send(s->fd, buf, len, 0);
384 if (ret < 0) {
385 if (errno != EINTR && errno != EAGAIN)
386 return;
387 } else {
388 buf += ret;
389 len -= ret;
392 #else
393 qemu_chr_fe_write(s->chr, buf, len);
394 #endif
397 static inline int fromhex(int v)
399 if (v >= '0' && v <= '9')
400 return v - '0';
401 else if (v >= 'A' && v <= 'F')
402 return v - 'A' + 10;
403 else if (v >= 'a' && v <= 'f')
404 return v - 'a' + 10;
405 else
406 return 0;
409 static inline int tohex(int v)
411 if (v < 10)
412 return v + '0';
413 else
414 return v - 10 + 'a';
417 static void memtohex(char *buf, const uint8_t *mem, int len)
419 int i, c;
420 char *q;
421 q = buf;
422 for(i = 0; i < len; i++) {
423 c = mem[i];
424 *q++ = tohex(c >> 4);
425 *q++ = tohex(c & 0xf);
427 *q = '\0';
430 static void hextomem(uint8_t *mem, const char *buf, int len)
432 int i;
434 for(i = 0; i < len; i++) {
435 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
436 buf += 2;
440 /* return -1 if error, 0 if OK */
441 static int put_packet_binary(GDBState *s, const char *buf, int len)
443 int csum, i;
444 uint8_t *p;
446 for(;;) {
447 p = s->last_packet;
448 *(p++) = '$';
449 memcpy(p, buf, len);
450 p += len;
451 csum = 0;
452 for(i = 0; i < len; i++) {
453 csum += buf[i];
455 *(p++) = '#';
456 *(p++) = tohex((csum >> 4) & 0xf);
457 *(p++) = tohex((csum) & 0xf);
459 s->last_packet_len = p - s->last_packet;
460 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
462 #ifdef CONFIG_USER_ONLY
463 i = get_char(s);
464 if (i < 0)
465 return -1;
466 if (i == '+')
467 break;
468 #else
469 break;
470 #endif
472 return 0;
475 /* return -1 if error, 0 if OK */
476 static int put_packet(GDBState *s, const char *buf)
478 #ifdef DEBUG_GDB
479 printf("reply='%s'\n", buf);
480 #endif
482 return put_packet_binary(s, buf, strlen(buf));
485 /* The GDB remote protocol transfers values in target byte order. This means
486 we can use the raw memory access routines to access the value buffer.
487 Conveniently, these also handle the case where the buffer is mis-aligned.
489 #define GET_REG8(val) do { \
490 stb_p(mem_buf, val); \
491 return 1; \
492 } while(0)
493 #define GET_REG16(val) do { \
494 stw_p(mem_buf, val); \
495 return 2; \
496 } while(0)
497 #define GET_REG32(val) do { \
498 stl_p(mem_buf, val); \
499 return 4; \
500 } while(0)
501 #define GET_REG64(val) do { \
502 stq_p(mem_buf, val); \
503 return 8; \
504 } while(0)
506 #if TARGET_LONG_BITS == 64
507 #define GET_REGL(val) GET_REG64(val)
508 #define ldtul_p(addr) ldq_p(addr)
509 #else
510 #define GET_REGL(val) GET_REG32(val)
511 #define ldtul_p(addr) ldl_p(addr)
512 #endif
514 #if defined(TARGET_I386)
516 #ifdef TARGET_X86_64
517 static const int gpr_map[16] = {
518 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
519 8, 9, 10, 11, 12, 13, 14, 15
521 #else
522 #define gpr_map gpr_map32
523 #endif
524 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
526 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
528 #define IDX_IP_REG CPU_NB_REGS
529 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
530 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
531 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
532 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
533 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
535 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
537 if (n < CPU_NB_REGS) {
538 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
539 GET_REG64(env->regs[gpr_map[n]]);
540 } else if (n < CPU_NB_REGS32) {
541 GET_REG32(env->regs[gpr_map32[n]]);
543 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
544 #ifdef USE_X86LDOUBLE
545 /* FIXME: byteswap float values - after fixing fpregs layout. */
546 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
547 #else
548 memset(mem_buf, 0, 10);
549 #endif
550 return 10;
551 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
552 n -= IDX_XMM_REGS;
553 if (n < CPU_NB_REGS32 ||
554 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
555 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
556 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
557 return 16;
559 } else {
560 switch (n) {
561 case IDX_IP_REG:
562 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
563 GET_REG64(env->eip);
564 } else {
565 GET_REG32(env->eip);
567 case IDX_FLAGS_REG: GET_REG32(env->eflags);
569 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
570 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
571 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
572 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
573 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
574 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
576 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
577 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
578 (env->fpstt & 0x7) << 11);
579 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
580 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
581 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
582 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
583 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
584 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
586 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
589 return 0;
592 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
594 uint16_t selector = ldl_p(mem_buf);
596 if (selector != env->segs[sreg].selector) {
597 #if defined(CONFIG_USER_ONLY)
598 cpu_x86_load_seg(env, sreg, selector);
599 #else
600 unsigned int limit, flags;
601 target_ulong base;
603 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
604 base = selector << 4;
605 limit = 0xffff;
606 flags = 0;
607 } else {
608 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
609 return 4;
611 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
612 #endif
614 return 4;
617 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
619 uint32_t tmp;
621 if (n < CPU_NB_REGS) {
622 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
623 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
624 return sizeof(target_ulong);
625 } else if (n < CPU_NB_REGS32) {
626 n = gpr_map32[n];
627 env->regs[n] &= ~0xffffffffUL;
628 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
629 return 4;
631 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
632 #ifdef USE_X86LDOUBLE
633 /* FIXME: byteswap float values - after fixing fpregs layout. */
634 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
635 #endif
636 return 10;
637 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
638 n -= IDX_XMM_REGS;
639 if (n < CPU_NB_REGS32 ||
640 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
641 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
642 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
643 return 16;
645 } else {
646 switch (n) {
647 case IDX_IP_REG:
648 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
649 env->eip = ldq_p(mem_buf);
650 return 8;
651 } else {
652 env->eip &= ~0xffffffffUL;
653 env->eip |= (uint32_t)ldl_p(mem_buf);
654 return 4;
656 case IDX_FLAGS_REG:
657 env->eflags = ldl_p(mem_buf);
658 return 4;
660 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
661 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
662 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
663 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
664 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
665 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
667 case IDX_FP_REGS + 8:
668 env->fpuc = ldl_p(mem_buf);
669 return 4;
670 case IDX_FP_REGS + 9:
671 tmp = ldl_p(mem_buf);
672 env->fpstt = (tmp >> 11) & 7;
673 env->fpus = tmp & ~0x3800;
674 return 4;
675 case IDX_FP_REGS + 10: /* ftag */ return 4;
676 case IDX_FP_REGS + 11: /* fiseg */ return 4;
677 case IDX_FP_REGS + 12: /* fioff */ return 4;
678 case IDX_FP_REGS + 13: /* foseg */ return 4;
679 case IDX_FP_REGS + 14: /* fooff */ return 4;
680 case IDX_FP_REGS + 15: /* fop */ return 4;
682 case IDX_MXCSR_REG:
683 env->mxcsr = ldl_p(mem_buf);
684 return 4;
687 /* Unrecognised register. */
688 return 0;
691 #elif defined (TARGET_PPC)
693 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
694 expects whatever the target description contains. Due to a
695 historical mishap the FP registers appear in between core integer
696 regs and PC, MSR, CR, and so forth. We hack round this by giving the
697 FP regs zero size when talking to a newer gdb. */
698 #define NUM_CORE_REGS 71
699 #if defined (TARGET_PPC64)
700 #define GDB_CORE_XML "power64-core.xml"
701 #else
702 #define GDB_CORE_XML "power-core.xml"
703 #endif
705 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
707 if (n < 32) {
708 /* gprs */
709 GET_REGL(env->gpr[n]);
710 } else if (n < 64) {
711 /* fprs */
712 if (gdb_has_xml)
713 return 0;
714 stfq_p(mem_buf, env->fpr[n-32]);
715 return 8;
716 } else {
717 switch (n) {
718 case 64: GET_REGL(env->nip);
719 case 65: GET_REGL(env->msr);
720 case 66:
722 uint32_t cr = 0;
723 int i;
724 for (i = 0; i < 8; i++)
725 cr |= env->crf[i] << (32 - ((i + 1) * 4));
726 GET_REG32(cr);
728 case 67: GET_REGL(env->lr);
729 case 68: GET_REGL(env->ctr);
730 case 69: GET_REGL(env->xer);
731 case 70:
733 if (gdb_has_xml)
734 return 0;
735 GET_REG32(env->fpscr);
739 return 0;
742 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
744 if (n < 32) {
745 /* gprs */
746 env->gpr[n] = ldtul_p(mem_buf);
747 return sizeof(target_ulong);
748 } else if (n < 64) {
749 /* fprs */
750 if (gdb_has_xml)
751 return 0;
752 env->fpr[n-32] = ldfq_p(mem_buf);
753 return 8;
754 } else {
755 switch (n) {
756 case 64:
757 env->nip = ldtul_p(mem_buf);
758 return sizeof(target_ulong);
759 case 65:
760 ppc_store_msr(env, ldtul_p(mem_buf));
761 return sizeof(target_ulong);
762 case 66:
764 uint32_t cr = ldl_p(mem_buf);
765 int i;
766 for (i = 0; i < 8; i++)
767 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
768 return 4;
770 case 67:
771 env->lr = ldtul_p(mem_buf);
772 return sizeof(target_ulong);
773 case 68:
774 env->ctr = ldtul_p(mem_buf);
775 return sizeof(target_ulong);
776 case 69:
777 env->xer = ldtul_p(mem_buf);
778 return sizeof(target_ulong);
779 case 70:
780 /* fpscr */
781 if (gdb_has_xml)
782 return 0;
783 return 4;
786 return 0;
789 #elif defined (TARGET_SPARC)
791 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
792 #define NUM_CORE_REGS 86
793 #else
794 #define NUM_CORE_REGS 72
795 #endif
797 #ifdef TARGET_ABI32
798 #define GET_REGA(val) GET_REG32(val)
799 #else
800 #define GET_REGA(val) GET_REGL(val)
801 #endif
803 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
805 if (n < 8) {
806 /* g0..g7 */
807 GET_REGA(env->gregs[n]);
809 if (n < 32) {
810 /* register window */
811 GET_REGA(env->regwptr[n - 8]);
813 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
814 if (n < 64) {
815 /* fprs */
816 if (n & 1) {
817 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
818 } else {
819 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
822 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
823 switch (n) {
824 case 64: GET_REGA(env->y);
825 case 65: GET_REGA(cpu_get_psr(env));
826 case 66: GET_REGA(env->wim);
827 case 67: GET_REGA(env->tbr);
828 case 68: GET_REGA(env->pc);
829 case 69: GET_REGA(env->npc);
830 case 70: GET_REGA(env->fsr);
831 case 71: GET_REGA(0); /* csr */
832 default: GET_REGA(0);
834 #else
835 if (n < 64) {
836 /* f0-f31 */
837 if (n & 1) {
838 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
839 } else {
840 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
843 if (n < 80) {
844 /* f32-f62 (double width, even numbers only) */
845 GET_REG64(env->fpr[(n - 32) / 2].ll);
847 switch (n) {
848 case 80: GET_REGL(env->pc);
849 case 81: GET_REGL(env->npc);
850 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
851 ((env->asi & 0xff) << 24) |
852 ((env->pstate & 0xfff) << 8) |
853 cpu_get_cwp64(env));
854 case 83: GET_REGL(env->fsr);
855 case 84: GET_REGL(env->fprs);
856 case 85: GET_REGL(env->y);
858 #endif
859 return 0;
862 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
864 #if defined(TARGET_ABI32)
865 abi_ulong tmp;
867 tmp = ldl_p(mem_buf);
868 #else
869 target_ulong tmp;
871 tmp = ldtul_p(mem_buf);
872 #endif
874 if (n < 8) {
875 /* g0..g7 */
876 env->gregs[n] = tmp;
877 } else if (n < 32) {
878 /* register window */
879 env->regwptr[n - 8] = tmp;
881 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
882 else if (n < 64) {
883 /* fprs */
884 /* f0-f31 */
885 if (n & 1) {
886 env->fpr[(n - 32) / 2].l.lower = tmp;
887 } else {
888 env->fpr[(n - 32) / 2].l.upper = tmp;
890 } else {
891 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
892 switch (n) {
893 case 64: env->y = tmp; break;
894 case 65: cpu_put_psr(env, tmp); break;
895 case 66: env->wim = tmp; break;
896 case 67: env->tbr = tmp; break;
897 case 68: env->pc = tmp; break;
898 case 69: env->npc = tmp; break;
899 case 70: env->fsr = tmp; break;
900 default: return 0;
903 return 4;
904 #else
905 else if (n < 64) {
906 /* f0-f31 */
907 tmp = ldl_p(mem_buf);
908 if (n & 1) {
909 env->fpr[(n - 32) / 2].l.lower = tmp;
910 } else {
911 env->fpr[(n - 32) / 2].l.upper = tmp;
913 return 4;
914 } else if (n < 80) {
915 /* f32-f62 (double width, even numbers only) */
916 env->fpr[(n - 32) / 2].ll = tmp;
917 } else {
918 switch (n) {
919 case 80: env->pc = tmp; break;
920 case 81: env->npc = tmp; break;
921 case 82:
922 cpu_put_ccr(env, tmp >> 32);
923 env->asi = (tmp >> 24) & 0xff;
924 env->pstate = (tmp >> 8) & 0xfff;
925 cpu_put_cwp64(env, tmp & 0xff);
926 break;
927 case 83: env->fsr = tmp; break;
928 case 84: env->fprs = tmp; break;
929 case 85: env->y = tmp; break;
930 default: return 0;
933 return 8;
934 #endif
936 #elif defined (TARGET_ARM)
938 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
939 whatever the target description contains. Due to a historical mishap
940 the FPA registers appear in between core integer regs and the CPSR.
941 We hack round this by giving the FPA regs zero size when talking to a
942 newer gdb. */
943 #define NUM_CORE_REGS 26
944 #define GDB_CORE_XML "arm-core.xml"
946 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
948 if (n < 16) {
949 /* Core integer register. */
950 GET_REG32(env->regs[n]);
952 if (n < 24) {
953 /* FPA registers. */
954 if (gdb_has_xml)
955 return 0;
956 memset(mem_buf, 0, 12);
957 return 12;
959 switch (n) {
960 case 24:
961 /* FPA status register. */
962 if (gdb_has_xml)
963 return 0;
964 GET_REG32(0);
965 case 25:
966 /* CPSR */
967 GET_REG32(cpsr_read(env));
969 /* Unknown register. */
970 return 0;
973 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
975 uint32_t tmp;
977 tmp = ldl_p(mem_buf);
979 /* Mask out low bit of PC to workaround gdb bugs. This will probably
980 cause problems if we ever implement the Jazelle DBX extensions. */
981 if (n == 15)
982 tmp &= ~1;
984 if (n < 16) {
985 /* Core integer register. */
986 env->regs[n] = tmp;
987 return 4;
989 if (n < 24) { /* 16-23 */
990 /* FPA registers (ignored). */
991 if (gdb_has_xml)
992 return 0;
993 return 12;
995 switch (n) {
996 case 24:
997 /* FPA status register (ignored). */
998 if (gdb_has_xml)
999 return 0;
1000 return 4;
1001 case 25:
1002 /* CPSR */
1003 cpsr_write (env, tmp, 0xffffffff);
1004 return 4;
1006 /* Unknown register. */
1007 return 0;
1010 #elif defined (TARGET_M68K)
1012 #define NUM_CORE_REGS 18
1014 #define GDB_CORE_XML "cf-core.xml"
1016 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1018 if (n < 8) {
1019 /* D0-D7 */
1020 GET_REG32(env->dregs[n]);
1021 } else if (n < 16) {
1022 /* A0-A7 */
1023 GET_REG32(env->aregs[n - 8]);
1024 } else {
1025 switch (n) {
1026 case 16: GET_REG32(env->sr);
1027 case 17: GET_REG32(env->pc);
1030 /* FP registers not included here because they vary between
1031 ColdFire and m68k. Use XML bits for these. */
1032 return 0;
1035 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1037 uint32_t tmp;
1039 tmp = ldl_p(mem_buf);
1041 if (n < 8) {
1042 /* D0-D7 */
1043 env->dregs[n] = tmp;
1044 } else if (n < 16) {
1045 /* A0-A7 */
1046 env->aregs[n - 8] = tmp;
1047 } else {
1048 switch (n) {
1049 case 16: env->sr = tmp; break;
1050 case 17: env->pc = tmp; break;
1051 default: return 0;
1054 return 4;
1056 #elif defined (TARGET_MIPS)
1058 #define NUM_CORE_REGS 73
1060 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1062 if (n < 32) {
1063 GET_REGL(env->active_tc.gpr[n]);
1065 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1066 if (n >= 38 && n < 70) {
1067 if (env->CP0_Status & (1 << CP0St_FR))
1068 GET_REGL(env->active_fpu.fpr[n - 38].d);
1069 else
1070 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1072 switch (n) {
1073 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1074 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1077 switch (n) {
1078 case 32: GET_REGL((int32_t)env->CP0_Status);
1079 case 33: GET_REGL(env->active_tc.LO[0]);
1080 case 34: GET_REGL(env->active_tc.HI[0]);
1081 case 35: GET_REGL(env->CP0_BadVAddr);
1082 case 36: GET_REGL((int32_t)env->CP0_Cause);
1083 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1084 case 72: GET_REGL(0); /* fp */
1085 case 89: GET_REGL((int32_t)env->CP0_PRid);
1087 if (n >= 73 && n <= 88) {
1088 /* 16 embedded regs. */
1089 GET_REGL(0);
1092 return 0;
1095 /* convert MIPS rounding mode in FCR31 to IEEE library */
1096 static unsigned int ieee_rm[] =
1098 float_round_nearest_even,
1099 float_round_to_zero,
1100 float_round_up,
1101 float_round_down
1103 #define RESTORE_ROUNDING_MODE \
1104 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1106 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1108 target_ulong tmp;
1110 tmp = ldtul_p(mem_buf);
1112 if (n < 32) {
1113 env->active_tc.gpr[n] = tmp;
1114 return sizeof(target_ulong);
1116 if (env->CP0_Config1 & (1 << CP0C1_FP)
1117 && n >= 38 && n < 73) {
1118 if (n < 70) {
1119 if (env->CP0_Status & (1 << CP0St_FR))
1120 env->active_fpu.fpr[n - 38].d = tmp;
1121 else
1122 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1124 switch (n) {
1125 case 70:
1126 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1127 /* set rounding mode */
1128 RESTORE_ROUNDING_MODE;
1129 break;
1130 case 71: env->active_fpu.fcr0 = tmp; break;
1132 return sizeof(target_ulong);
1134 switch (n) {
1135 case 32: env->CP0_Status = tmp; break;
1136 case 33: env->active_tc.LO[0] = tmp; break;
1137 case 34: env->active_tc.HI[0] = tmp; break;
1138 case 35: env->CP0_BadVAddr = tmp; break;
1139 case 36: env->CP0_Cause = tmp; break;
1140 case 37:
1141 env->active_tc.PC = tmp & ~(target_ulong)1;
1142 if (tmp & 1) {
1143 env->hflags |= MIPS_HFLAG_M16;
1144 } else {
1145 env->hflags &= ~(MIPS_HFLAG_M16);
1147 break;
1148 case 72: /* fp, ignored */ break;
1149 default:
1150 if (n > 89)
1151 return 0;
1152 /* Other registers are readonly. Ignore writes. */
1153 break;
1156 return sizeof(target_ulong);
1158 #elif defined(TARGET_OPENRISC)
1160 #define NUM_CORE_REGS (32 + 3)
1162 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1164 if (n < 32) {
1165 GET_REG32(env->gpr[n]);
1166 } else {
1167 switch (n) {
1168 case 32: /* PPC */
1169 GET_REG32(env->ppc);
1170 break;
1172 case 33: /* NPC */
1173 GET_REG32(env->npc);
1174 break;
1176 case 34: /* SR */
1177 GET_REG32(env->sr);
1178 break;
1180 default:
1181 break;
1184 return 0;
1187 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1188 uint8_t *mem_buf, int n)
1190 uint32_t tmp;
1192 if (n > NUM_CORE_REGS) {
1193 return 0;
1196 tmp = ldl_p(mem_buf);
1198 if (n < 32) {
1199 env->gpr[n] = tmp;
1200 } else {
1201 switch (n) {
1202 case 32: /* PPC */
1203 env->ppc = tmp;
1204 break;
1206 case 33: /* NPC */
1207 env->npc = tmp;
1208 break;
1210 case 34: /* SR */
1211 env->sr = tmp;
1212 break;
1214 default:
1215 break;
1218 return 4;
1220 #elif defined (TARGET_SH4)
1222 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1223 /* FIXME: We should use XML for this. */
1225 #define NUM_CORE_REGS 59
1227 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1229 if (n < 8) {
1230 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1231 GET_REGL(env->gregs[n + 16]);
1232 } else {
1233 GET_REGL(env->gregs[n]);
1235 } else if (n < 16) {
1236 GET_REGL(env->gregs[n]);
1237 } else if (n >= 25 && n < 41) {
1238 GET_REGL(env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)]);
1239 } else if (n >= 43 && n < 51) {
1240 GET_REGL(env->gregs[n - 43]);
1241 } else if (n >= 51 && n < 59) {
1242 GET_REGL(env->gregs[n - (51 - 16)]);
1244 switch (n) {
1245 case 16: GET_REGL(env->pc);
1246 case 17: GET_REGL(env->pr);
1247 case 18: GET_REGL(env->gbr);
1248 case 19: GET_REGL(env->vbr);
1249 case 20: GET_REGL(env->mach);
1250 case 21: GET_REGL(env->macl);
1251 case 22: GET_REGL(env->sr);
1252 case 23: GET_REGL(env->fpul);
1253 case 24: GET_REGL(env->fpscr);
1254 case 41: GET_REGL(env->ssr);
1255 case 42: GET_REGL(env->spc);
1258 return 0;
1261 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1263 uint32_t tmp;
1265 tmp = ldl_p(mem_buf);
1267 if (n < 8) {
1268 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1269 env->gregs[n + 16] = tmp;
1270 } else {
1271 env->gregs[n] = tmp;
1273 return 4;
1274 } else if (n < 16) {
1275 env->gregs[n] = tmp;
1276 return 4;
1277 } else if (n >= 25 && n < 41) {
1278 env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)] = tmp;
1279 return 4;
1280 } else if (n >= 43 && n < 51) {
1281 env->gregs[n - 43] = tmp;
1282 return 4;
1283 } else if (n >= 51 && n < 59) {
1284 env->gregs[n - (51 - 16)] = tmp;
1285 return 4;
1287 switch (n) {
1288 case 16: env->pc = tmp; break;
1289 case 17: env->pr = tmp; break;
1290 case 18: env->gbr = tmp; break;
1291 case 19: env->vbr = tmp; break;
1292 case 20: env->mach = tmp; break;
1293 case 21: env->macl = tmp; break;
1294 case 22: env->sr = tmp; break;
1295 case 23: env->fpul = tmp; break;
1296 case 24: env->fpscr = tmp; break;
1297 case 41: env->ssr = tmp; break;
1298 case 42: env->spc = tmp; break;
1299 default: return 0;
1302 return 4;
1304 #elif defined (TARGET_MICROBLAZE)
1306 #define NUM_CORE_REGS (32 + 5)
1308 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1310 if (n < 32) {
1311 GET_REG32(env->regs[n]);
1312 } else {
1313 GET_REG32(env->sregs[n - 32]);
1315 return 0;
1318 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1320 uint32_t tmp;
1322 if (n > NUM_CORE_REGS)
1323 return 0;
1325 tmp = ldl_p(mem_buf);
1327 if (n < 32) {
1328 env->regs[n] = tmp;
1329 } else {
1330 env->sregs[n - 32] = tmp;
1332 return 4;
1334 #elif defined (TARGET_CRIS)
1336 #define NUM_CORE_REGS 49
1338 static int
1339 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1341 if (n < 15) {
1342 GET_REG32(env->regs[n]);
1345 if (n == 15) {
1346 GET_REG32(env->pc);
1349 if (n < 32) {
1350 switch (n) {
1351 case 16:
1352 GET_REG8(env->pregs[n - 16]);
1353 break;
1354 case 17:
1355 GET_REG8(env->pregs[n - 16]);
1356 break;
1357 case 20:
1358 case 21:
1359 GET_REG16(env->pregs[n - 16]);
1360 break;
1361 default:
1362 if (n >= 23) {
1363 GET_REG32(env->pregs[n - 16]);
1365 break;
1368 return 0;
1371 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1373 uint8_t srs;
1375 if (env->pregs[PR_VR] < 32)
1376 return read_register_crisv10(env, mem_buf, n);
1378 srs = env->pregs[PR_SRS];
1379 if (n < 16) {
1380 GET_REG32(env->regs[n]);
1383 if (n >= 21 && n < 32) {
1384 GET_REG32(env->pregs[n - 16]);
1386 if (n >= 33 && n < 49) {
1387 GET_REG32(env->sregs[srs][n - 33]);
1389 switch (n) {
1390 case 16: GET_REG8(env->pregs[0]);
1391 case 17: GET_REG8(env->pregs[1]);
1392 case 18: GET_REG32(env->pregs[2]);
1393 case 19: GET_REG8(srs);
1394 case 20: GET_REG16(env->pregs[4]);
1395 case 32: GET_REG32(env->pc);
1398 return 0;
1401 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1403 uint32_t tmp;
1405 if (n > 49)
1406 return 0;
1408 tmp = ldl_p(mem_buf);
1410 if (n < 16) {
1411 env->regs[n] = tmp;
1414 if (n >= 21 && n < 32) {
1415 env->pregs[n - 16] = tmp;
1418 /* FIXME: Should support function regs be writable? */
1419 switch (n) {
1420 case 16: return 1;
1421 case 17: return 1;
1422 case 18: env->pregs[PR_PID] = tmp; break;
1423 case 19: return 1;
1424 case 20: return 2;
1425 case 32: env->pc = tmp; break;
1428 return 4;
1430 #elif defined (TARGET_ALPHA)
1432 #define NUM_CORE_REGS 67
1434 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1436 uint64_t val;
1437 CPU_DoubleU d;
1439 switch (n) {
1440 case 0 ... 30:
1441 val = env->ir[n];
1442 break;
1443 case 32 ... 62:
1444 d.d = env->fir[n - 32];
1445 val = d.ll;
1446 break;
1447 case 63:
1448 val = cpu_alpha_load_fpcr(env);
1449 break;
1450 case 64:
1451 val = env->pc;
1452 break;
1453 case 66:
1454 val = env->unique;
1455 break;
1456 case 31:
1457 case 65:
1458 /* 31 really is the zero register; 65 is unassigned in the
1459 gdb protocol, but is still required to occupy 8 bytes. */
1460 val = 0;
1461 break;
1462 default:
1463 return 0;
1465 GET_REGL(val);
1468 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1470 target_ulong tmp = ldtul_p(mem_buf);
1471 CPU_DoubleU d;
1473 switch (n) {
1474 case 0 ... 30:
1475 env->ir[n] = tmp;
1476 break;
1477 case 32 ... 62:
1478 d.ll = tmp;
1479 env->fir[n - 32] = d.d;
1480 break;
1481 case 63:
1482 cpu_alpha_store_fpcr(env, tmp);
1483 break;
1484 case 64:
1485 env->pc = tmp;
1486 break;
1487 case 66:
1488 env->unique = tmp;
1489 break;
1490 case 31:
1491 case 65:
1492 /* 31 really is the zero register; 65 is unassigned in the
1493 gdb protocol, but is still required to occupy 8 bytes. */
1494 break;
1495 default:
1496 return 0;
1498 return 8;
1500 #elif defined (TARGET_S390X)
1502 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1504 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1506 switch (n) {
1507 case S390_PSWM_REGNUM: GET_REGL(env->psw.mask); break;
1508 case S390_PSWA_REGNUM: GET_REGL(env->psw.addr); break;
1509 case S390_R0_REGNUM ... S390_R15_REGNUM:
1510 GET_REGL(env->regs[n-S390_R0_REGNUM]); break;
1511 case S390_A0_REGNUM ... S390_A15_REGNUM:
1512 GET_REG32(env->aregs[n-S390_A0_REGNUM]); break;
1513 case S390_FPC_REGNUM: GET_REG32(env->fpc); break;
1514 case S390_F0_REGNUM ... S390_F15_REGNUM:
1515 /* XXX */
1516 break;
1517 case S390_PC_REGNUM: GET_REGL(env->psw.addr); break;
1518 case S390_CC_REGNUM:
1519 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
1520 env->cc_vr);
1521 GET_REG32(env->cc_op);
1522 break;
1525 return 0;
1528 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1530 target_ulong tmpl;
1531 uint32_t tmp32;
1532 int r = 8;
1533 tmpl = ldtul_p(mem_buf);
1534 tmp32 = ldl_p(mem_buf);
1536 switch (n) {
1537 case S390_PSWM_REGNUM: env->psw.mask = tmpl; break;
1538 case S390_PSWA_REGNUM: env->psw.addr = tmpl; break;
1539 case S390_R0_REGNUM ... S390_R15_REGNUM:
1540 env->regs[n-S390_R0_REGNUM] = tmpl; break;
1541 case S390_A0_REGNUM ... S390_A15_REGNUM:
1542 env->aregs[n-S390_A0_REGNUM] = tmp32; r=4; break;
1543 case S390_FPC_REGNUM: env->fpc = tmp32; r=4; break;
1544 case S390_F0_REGNUM ... S390_F15_REGNUM:
1545 /* XXX */
1546 break;
1547 case S390_PC_REGNUM: env->psw.addr = tmpl; break;
1548 case S390_CC_REGNUM: env->cc_op = tmp32; r=4; break;
1551 return r;
1553 #elif defined (TARGET_LM32)
1555 #include "hw/lm32_pic.h"
1556 #define NUM_CORE_REGS (32 + 7)
1558 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1560 if (n < 32) {
1561 GET_REG32(env->regs[n]);
1562 } else {
1563 switch (n) {
1564 case 32:
1565 GET_REG32(env->pc);
1566 break;
1567 /* FIXME: put in right exception ID */
1568 case 33:
1569 GET_REG32(0);
1570 break;
1571 case 34:
1572 GET_REG32(env->eba);
1573 break;
1574 case 35:
1575 GET_REG32(env->deba);
1576 break;
1577 case 36:
1578 GET_REG32(env->ie);
1579 break;
1580 case 37:
1581 GET_REG32(lm32_pic_get_im(env->pic_state));
1582 break;
1583 case 38:
1584 GET_REG32(lm32_pic_get_ip(env->pic_state));
1585 break;
1588 return 0;
1591 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1593 uint32_t tmp;
1595 if (n > NUM_CORE_REGS) {
1596 return 0;
1599 tmp = ldl_p(mem_buf);
1601 if (n < 32) {
1602 env->regs[n] = tmp;
1603 } else {
1604 switch (n) {
1605 case 32:
1606 env->pc = tmp;
1607 break;
1608 case 34:
1609 env->eba = tmp;
1610 break;
1611 case 35:
1612 env->deba = tmp;
1613 break;
1614 case 36:
1615 env->ie = tmp;
1616 break;
1617 case 37:
1618 lm32_pic_set_im(env->pic_state, tmp);
1619 break;
1620 case 38:
1621 lm32_pic_set_ip(env->pic_state, tmp);
1622 break;
1625 return 4;
1627 #elif defined(TARGET_XTENSA)
1629 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1630 * Use num_regs to see all registers. gdb modification is required for that:
1631 * reset bit 0 in the 'flags' field of the registers definitions in the
1632 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1634 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1635 #define num_g_regs NUM_CORE_REGS
1637 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1639 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1641 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1642 return 0;
1645 switch (reg->type) {
1646 case 9: /*pc*/
1647 GET_REG32(env->pc);
1648 break;
1650 case 1: /*ar*/
1651 xtensa_sync_phys_from_window(env);
1652 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1653 break;
1655 case 2: /*SR*/
1656 GET_REG32(env->sregs[reg->targno & 0xff]);
1657 break;
1659 case 3: /*UR*/
1660 GET_REG32(env->uregs[reg->targno & 0xff]);
1661 break;
1663 case 8: /*a*/
1664 GET_REG32(env->regs[reg->targno & 0x0f]);
1665 break;
1667 default:
1668 qemu_log("%s from reg %d of unsupported type %d\n",
1669 __func__, n, reg->type);
1670 return 0;
1674 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1676 uint32_t tmp;
1677 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1679 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1680 return 0;
1683 tmp = ldl_p(mem_buf);
1685 switch (reg->type) {
1686 case 9: /*pc*/
1687 env->pc = tmp;
1688 break;
1690 case 1: /*ar*/
1691 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1692 xtensa_sync_window_from_phys(env);
1693 break;
1695 case 2: /*SR*/
1696 env->sregs[reg->targno & 0xff] = tmp;
1697 break;
1699 case 3: /*UR*/
1700 env->uregs[reg->targno & 0xff] = tmp;
1701 break;
1703 case 8: /*a*/
1704 env->regs[reg->targno & 0x0f] = tmp;
1705 break;
1707 default:
1708 qemu_log("%s to reg %d of unsupported type %d\n",
1709 __func__, n, reg->type);
1710 return 0;
1713 return 4;
1715 #else
1717 #define NUM_CORE_REGS 0
1719 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1721 return 0;
1724 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1726 return 0;
1729 #endif
1731 #if !defined(TARGET_XTENSA)
1732 static int num_g_regs = NUM_CORE_REGS;
1733 #endif
1735 #ifdef GDB_CORE_XML
1736 /* Encode data using the encoding for 'x' packets. */
1737 static int memtox(char *buf, const char *mem, int len)
1739 char *p = buf;
1740 char c;
1742 while (len--) {
1743 c = *(mem++);
1744 switch (c) {
1745 case '#': case '$': case '*': case '}':
1746 *(p++) = '}';
1747 *(p++) = c ^ 0x20;
1748 break;
1749 default:
1750 *(p++) = c;
1751 break;
1754 return p - buf;
1757 static const char *get_feature_xml(const char *p, const char **newp)
1759 size_t len;
1760 int i;
1761 const char *name;
1762 static char target_xml[1024];
1764 len = 0;
1765 while (p[len] && p[len] != ':')
1766 len++;
1767 *newp = p + len;
1769 name = NULL;
1770 if (strncmp(p, "target.xml", len) == 0) {
1771 /* Generate the XML description for this CPU. */
1772 if (!target_xml[0]) {
1773 GDBRegisterState *r;
1775 snprintf(target_xml, sizeof(target_xml),
1776 "<?xml version=\"1.0\"?>"
1777 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1778 "<target>"
1779 "<xi:include href=\"%s\"/>",
1780 GDB_CORE_XML);
1782 for (r = first_cpu->gdb_regs; r; r = r->next) {
1783 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1784 pstrcat(target_xml, sizeof(target_xml), r->xml);
1785 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1787 pstrcat(target_xml, sizeof(target_xml), "</target>");
1789 return target_xml;
1791 for (i = 0; ; i++) {
1792 name = xml_builtin[i][0];
1793 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1794 break;
1796 return name ? xml_builtin[i][1] : NULL;
1798 #endif
1800 static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1802 GDBRegisterState *r;
1804 if (reg < NUM_CORE_REGS)
1805 return cpu_gdb_read_register(env, mem_buf, reg);
1807 for (r = env->gdb_regs; r; r = r->next) {
1808 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1809 return r->get_reg(env, mem_buf, reg - r->base_reg);
1812 return 0;
1815 static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1817 GDBRegisterState *r;
1819 if (reg < NUM_CORE_REGS)
1820 return cpu_gdb_write_register(env, mem_buf, reg);
1822 for (r = env->gdb_regs; r; r = r->next) {
1823 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1824 return r->set_reg(env, mem_buf, reg - r->base_reg);
1827 return 0;
1830 #if !defined(TARGET_XTENSA)
1831 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1832 specifies the first register number and these registers are included in
1833 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1834 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1837 void gdb_register_coprocessor(CPUArchState * env,
1838 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1839 int num_regs, const char *xml, int g_pos)
1841 GDBRegisterState *s;
1842 GDBRegisterState **p;
1843 static int last_reg = NUM_CORE_REGS;
1845 p = &env->gdb_regs;
1846 while (*p) {
1847 /* Check for duplicates. */
1848 if (strcmp((*p)->xml, xml) == 0)
1849 return;
1850 p = &(*p)->next;
1853 s = g_new0(GDBRegisterState, 1);
1854 s->base_reg = last_reg;
1855 s->num_regs = num_regs;
1856 s->get_reg = get_reg;
1857 s->set_reg = set_reg;
1858 s->xml = xml;
1860 /* Add to end of list. */
1861 last_reg += num_regs;
1862 *p = s;
1863 if (g_pos) {
1864 if (g_pos != s->base_reg) {
1865 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1866 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1867 } else {
1868 num_g_regs = last_reg;
1872 #endif
1874 #ifndef CONFIG_USER_ONLY
1875 static const int xlat_gdb_type[] = {
1876 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1877 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1878 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1880 #endif
1882 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1884 CPUArchState *env;
1885 int err = 0;
1887 if (kvm_enabled())
1888 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1890 switch (type) {
1891 case GDB_BREAKPOINT_SW:
1892 case GDB_BREAKPOINT_HW:
1893 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1894 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1895 if (err)
1896 break;
1898 return err;
1899 #ifndef CONFIG_USER_ONLY
1900 case GDB_WATCHPOINT_WRITE:
1901 case GDB_WATCHPOINT_READ:
1902 case GDB_WATCHPOINT_ACCESS:
1903 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1904 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1905 NULL);
1906 if (err)
1907 break;
1909 return err;
1910 #endif
1911 default:
1912 return -ENOSYS;
1916 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1918 CPUArchState *env;
1919 int err = 0;
1921 if (kvm_enabled())
1922 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1924 switch (type) {
1925 case GDB_BREAKPOINT_SW:
1926 case GDB_BREAKPOINT_HW:
1927 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1928 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1929 if (err)
1930 break;
1932 return err;
1933 #ifndef CONFIG_USER_ONLY
1934 case GDB_WATCHPOINT_WRITE:
1935 case GDB_WATCHPOINT_READ:
1936 case GDB_WATCHPOINT_ACCESS:
1937 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1938 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
1939 if (err)
1940 break;
1942 return err;
1943 #endif
1944 default:
1945 return -ENOSYS;
1949 static void gdb_breakpoint_remove_all(void)
1951 CPUArchState *env;
1953 if (kvm_enabled()) {
1954 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
1955 return;
1958 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1959 cpu_breakpoint_remove_all(env, BP_GDB);
1960 #ifndef CONFIG_USER_ONLY
1961 cpu_watchpoint_remove_all(env, BP_GDB);
1962 #endif
1966 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
1968 cpu_synchronize_state(s->c_cpu);
1969 #if defined(TARGET_I386)
1970 s->c_cpu->eip = pc;
1971 #elif defined (TARGET_PPC)
1972 s->c_cpu->nip = pc;
1973 #elif defined (TARGET_SPARC)
1974 s->c_cpu->pc = pc;
1975 s->c_cpu->npc = pc + 4;
1976 #elif defined (TARGET_ARM)
1977 s->c_cpu->regs[15] = pc;
1978 #elif defined (TARGET_SH4)
1979 s->c_cpu->pc = pc;
1980 #elif defined (TARGET_MIPS)
1981 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
1982 if (pc & 1) {
1983 s->c_cpu->hflags |= MIPS_HFLAG_M16;
1984 } else {
1985 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
1987 #elif defined (TARGET_MICROBLAZE)
1988 s->c_cpu->sregs[SR_PC] = pc;
1989 #elif defined(TARGET_OPENRISC)
1990 s->c_cpu->pc = pc;
1991 #elif defined (TARGET_CRIS)
1992 s->c_cpu->pc = pc;
1993 #elif defined (TARGET_ALPHA)
1994 s->c_cpu->pc = pc;
1995 #elif defined (TARGET_S390X)
1996 s->c_cpu->psw.addr = pc;
1997 #elif defined (TARGET_LM32)
1998 s->c_cpu->pc = pc;
1999 #elif defined(TARGET_XTENSA)
2000 s->c_cpu->pc = pc;
2001 #endif
2004 static CPUArchState *find_cpu(uint32_t thread_id)
2006 CPUArchState *env;
2008 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2009 if (cpu_index(env) == thread_id) {
2010 return env;
2014 return NULL;
2017 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2019 CPUArchState *env;
2020 const char *p;
2021 uint32_t thread;
2022 int ch, reg_size, type, res;
2023 char buf[MAX_PACKET_LENGTH];
2024 uint8_t mem_buf[MAX_PACKET_LENGTH];
2025 uint8_t *registers;
2026 target_ulong addr, len;
2028 #ifdef DEBUG_GDB
2029 printf("command='%s'\n", line_buf);
2030 #endif
2031 p = line_buf;
2032 ch = *p++;
2033 switch(ch) {
2034 case '?':
2035 /* TODO: Make this return the correct value for user-mode. */
2036 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2037 cpu_index(s->c_cpu));
2038 put_packet(s, buf);
2039 /* Remove all the breakpoints when this query is issued,
2040 * because gdb is doing and initial connect and the state
2041 * should be cleaned up.
2043 gdb_breakpoint_remove_all();
2044 break;
2045 case 'c':
2046 if (*p != '\0') {
2047 addr = strtoull(p, (char **)&p, 16);
2048 gdb_set_cpu_pc(s, addr);
2050 s->signal = 0;
2051 gdb_continue(s);
2052 return RS_IDLE;
2053 case 'C':
2054 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2055 if (s->signal == -1)
2056 s->signal = 0;
2057 gdb_continue(s);
2058 return RS_IDLE;
2059 case 'v':
2060 if (strncmp(p, "Cont", 4) == 0) {
2061 int res_signal, res_thread;
2063 p += 4;
2064 if (*p == '?') {
2065 put_packet(s, "vCont;c;C;s;S");
2066 break;
2068 res = 0;
2069 res_signal = 0;
2070 res_thread = 0;
2071 while (*p) {
2072 int action, signal;
2074 if (*p++ != ';') {
2075 res = 0;
2076 break;
2078 action = *p++;
2079 signal = 0;
2080 if (action == 'C' || action == 'S') {
2081 signal = strtoul(p, (char **)&p, 16);
2082 } else if (action != 'c' && action != 's') {
2083 res = 0;
2084 break;
2086 thread = 0;
2087 if (*p == ':') {
2088 thread = strtoull(p+1, (char **)&p, 16);
2090 action = tolower(action);
2091 if (res == 0 || (res == 'c' && action == 's')) {
2092 res = action;
2093 res_signal = signal;
2094 res_thread = thread;
2097 if (res) {
2098 if (res_thread != -1 && res_thread != 0) {
2099 env = find_cpu(res_thread);
2100 if (env == NULL) {
2101 put_packet(s, "E22");
2102 break;
2104 s->c_cpu = env;
2106 if (res == 's') {
2107 cpu_single_step(s->c_cpu, sstep_flags);
2109 s->signal = res_signal;
2110 gdb_continue(s);
2111 return RS_IDLE;
2113 break;
2114 } else {
2115 goto unknown_command;
2117 case 'k':
2118 #ifdef CONFIG_USER_ONLY
2119 /* Kill the target */
2120 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2121 exit(0);
2122 #endif
2123 case 'D':
2124 /* Detach packet */
2125 gdb_breakpoint_remove_all();
2126 gdb_syscall_mode = GDB_SYS_DISABLED;
2127 gdb_continue(s);
2128 put_packet(s, "OK");
2129 break;
2130 case 's':
2131 if (*p != '\0') {
2132 addr = strtoull(p, (char **)&p, 16);
2133 gdb_set_cpu_pc(s, addr);
2135 cpu_single_step(s->c_cpu, sstep_flags);
2136 gdb_continue(s);
2137 return RS_IDLE;
2138 case 'F':
2140 target_ulong ret;
2141 target_ulong err;
2143 ret = strtoull(p, (char **)&p, 16);
2144 if (*p == ',') {
2145 p++;
2146 err = strtoull(p, (char **)&p, 16);
2147 } else {
2148 err = 0;
2150 if (*p == ',')
2151 p++;
2152 type = *p;
2153 if (s->current_syscall_cb) {
2154 s->current_syscall_cb(s->c_cpu, ret, err);
2155 s->current_syscall_cb = NULL;
2157 if (type == 'C') {
2158 put_packet(s, "T02");
2159 } else {
2160 gdb_continue(s);
2163 break;
2164 case 'g':
2165 cpu_synchronize_state(s->g_cpu);
2166 env = s->g_cpu;
2167 len = 0;
2168 for (addr = 0; addr < num_g_regs; addr++) {
2169 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2170 len += reg_size;
2172 memtohex(buf, mem_buf, len);
2173 put_packet(s, buf);
2174 break;
2175 case 'G':
2176 cpu_synchronize_state(s->g_cpu);
2177 env = s->g_cpu;
2178 registers = mem_buf;
2179 len = strlen(p) / 2;
2180 hextomem((uint8_t *)registers, p, len);
2181 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2182 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2183 len -= reg_size;
2184 registers += reg_size;
2186 put_packet(s, "OK");
2187 break;
2188 case 'm':
2189 addr = strtoull(p, (char **)&p, 16);
2190 if (*p == ',')
2191 p++;
2192 len = strtoull(p, NULL, 16);
2193 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2194 put_packet (s, "E14");
2195 } else {
2196 memtohex(buf, mem_buf, len);
2197 put_packet(s, buf);
2199 break;
2200 case 'M':
2201 addr = strtoull(p, (char **)&p, 16);
2202 if (*p == ',')
2203 p++;
2204 len = strtoull(p, (char **)&p, 16);
2205 if (*p == ':')
2206 p++;
2207 hextomem(mem_buf, p, len);
2208 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) {
2209 put_packet(s, "E14");
2210 } else {
2211 put_packet(s, "OK");
2213 break;
2214 case 'p':
2215 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2216 This works, but can be very slow. Anything new enough to
2217 understand XML also knows how to use this properly. */
2218 if (!gdb_has_xml)
2219 goto unknown_command;
2220 addr = strtoull(p, (char **)&p, 16);
2221 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2222 if (reg_size) {
2223 memtohex(buf, mem_buf, reg_size);
2224 put_packet(s, buf);
2225 } else {
2226 put_packet(s, "E14");
2228 break;
2229 case 'P':
2230 if (!gdb_has_xml)
2231 goto unknown_command;
2232 addr = strtoull(p, (char **)&p, 16);
2233 if (*p == '=')
2234 p++;
2235 reg_size = strlen(p) / 2;
2236 hextomem(mem_buf, p, reg_size);
2237 gdb_write_register(s->g_cpu, mem_buf, addr);
2238 put_packet(s, "OK");
2239 break;
2240 case 'Z':
2241 case 'z':
2242 type = strtoul(p, (char **)&p, 16);
2243 if (*p == ',')
2244 p++;
2245 addr = strtoull(p, (char **)&p, 16);
2246 if (*p == ',')
2247 p++;
2248 len = strtoull(p, (char **)&p, 16);
2249 if (ch == 'Z')
2250 res = gdb_breakpoint_insert(addr, len, type);
2251 else
2252 res = gdb_breakpoint_remove(addr, len, type);
2253 if (res >= 0)
2254 put_packet(s, "OK");
2255 else if (res == -ENOSYS)
2256 put_packet(s, "");
2257 else
2258 put_packet(s, "E22");
2259 break;
2260 case 'H':
2261 type = *p++;
2262 thread = strtoull(p, (char **)&p, 16);
2263 if (thread == -1 || thread == 0) {
2264 put_packet(s, "OK");
2265 break;
2267 env = find_cpu(thread);
2268 if (env == NULL) {
2269 put_packet(s, "E22");
2270 break;
2272 switch (type) {
2273 case 'c':
2274 s->c_cpu = env;
2275 put_packet(s, "OK");
2276 break;
2277 case 'g':
2278 s->g_cpu = env;
2279 put_packet(s, "OK");
2280 break;
2281 default:
2282 put_packet(s, "E22");
2283 break;
2285 break;
2286 case 'T':
2287 thread = strtoull(p, (char **)&p, 16);
2288 env = find_cpu(thread);
2290 if (env != NULL) {
2291 put_packet(s, "OK");
2292 } else {
2293 put_packet(s, "E22");
2295 break;
2296 case 'q':
2297 case 'Q':
2298 /* parse any 'q' packets here */
2299 if (!strcmp(p,"qemu.sstepbits")) {
2300 /* Query Breakpoint bit definitions */
2301 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2302 SSTEP_ENABLE,
2303 SSTEP_NOIRQ,
2304 SSTEP_NOTIMER);
2305 put_packet(s, buf);
2306 break;
2307 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2308 /* Display or change the sstep_flags */
2309 p += 10;
2310 if (*p != '=') {
2311 /* Display current setting */
2312 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2313 put_packet(s, buf);
2314 break;
2316 p++;
2317 type = strtoul(p, (char **)&p, 16);
2318 sstep_flags = type;
2319 put_packet(s, "OK");
2320 break;
2321 } else if (strcmp(p,"C") == 0) {
2322 /* "Current thread" remains vague in the spec, so always return
2323 * the first CPU (gdb returns the first thread). */
2324 put_packet(s, "QC1");
2325 break;
2326 } else if (strcmp(p,"fThreadInfo") == 0) {
2327 s->query_cpu = first_cpu;
2328 goto report_cpuinfo;
2329 } else if (strcmp(p,"sThreadInfo") == 0) {
2330 report_cpuinfo:
2331 if (s->query_cpu) {
2332 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2333 put_packet(s, buf);
2334 s->query_cpu = s->query_cpu->next_cpu;
2335 } else
2336 put_packet(s, "l");
2337 break;
2338 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2339 thread = strtoull(p+16, (char **)&p, 16);
2340 env = find_cpu(thread);
2341 if (env != NULL) {
2342 cpu_synchronize_state(env);
2343 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2344 "CPU#%d [%s]", env->cpu_index,
2345 env->halted ? "halted " : "running");
2346 memtohex(buf, mem_buf, len);
2347 put_packet(s, buf);
2349 break;
2351 #ifdef CONFIG_USER_ONLY
2352 else if (strncmp(p, "Offsets", 7) == 0) {
2353 TaskState *ts = s->c_cpu->opaque;
2355 snprintf(buf, sizeof(buf),
2356 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2357 ";Bss=" TARGET_ABI_FMT_lx,
2358 ts->info->code_offset,
2359 ts->info->data_offset,
2360 ts->info->data_offset);
2361 put_packet(s, buf);
2362 break;
2364 #else /* !CONFIG_USER_ONLY */
2365 else if (strncmp(p, "Rcmd,", 5) == 0) {
2366 int len = strlen(p + 5);
2368 if ((len % 2) != 0) {
2369 put_packet(s, "E01");
2370 break;
2372 hextomem(mem_buf, p + 5, len);
2373 len = len / 2;
2374 mem_buf[len++] = 0;
2375 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2376 put_packet(s, "OK");
2377 break;
2379 #endif /* !CONFIG_USER_ONLY */
2380 if (strncmp(p, "Supported", 9) == 0) {
2381 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2382 #ifdef GDB_CORE_XML
2383 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2384 #endif
2385 put_packet(s, buf);
2386 break;
2388 #ifdef GDB_CORE_XML
2389 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2390 const char *xml;
2391 target_ulong total_len;
2393 gdb_has_xml = 1;
2394 p += 19;
2395 xml = get_feature_xml(p, &p);
2396 if (!xml) {
2397 snprintf(buf, sizeof(buf), "E00");
2398 put_packet(s, buf);
2399 break;
2402 if (*p == ':')
2403 p++;
2404 addr = strtoul(p, (char **)&p, 16);
2405 if (*p == ',')
2406 p++;
2407 len = strtoul(p, (char **)&p, 16);
2409 total_len = strlen(xml);
2410 if (addr > total_len) {
2411 snprintf(buf, sizeof(buf), "E00");
2412 put_packet(s, buf);
2413 break;
2415 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2416 len = (MAX_PACKET_LENGTH - 5) / 2;
2417 if (len < total_len - addr) {
2418 buf[0] = 'm';
2419 len = memtox(buf + 1, xml + addr, len);
2420 } else {
2421 buf[0] = 'l';
2422 len = memtox(buf + 1, xml + addr, total_len - addr);
2424 put_packet_binary(s, buf, len + 1);
2425 break;
2427 #endif
2428 /* Unrecognised 'q' command. */
2429 goto unknown_command;
2431 default:
2432 unknown_command:
2433 /* put empty packet */
2434 buf[0] = '\0';
2435 put_packet(s, buf);
2436 break;
2438 return RS_IDLE;
2441 void gdb_set_stop_cpu(CPUArchState *env)
2443 gdbserver_state->c_cpu = env;
2444 gdbserver_state->g_cpu = env;
2447 #ifndef CONFIG_USER_ONLY
2448 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2450 GDBState *s = gdbserver_state;
2451 CPUArchState *env = s->c_cpu;
2452 char buf[256];
2453 const char *type;
2454 int ret;
2456 if (running || s->state == RS_INACTIVE) {
2457 return;
2459 /* Is there a GDB syscall waiting to be sent? */
2460 if (s->current_syscall_cb) {
2461 put_packet(s, s->syscall_buf);
2462 return;
2464 switch (state) {
2465 case RUN_STATE_DEBUG:
2466 if (env->watchpoint_hit) {
2467 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2468 case BP_MEM_READ:
2469 type = "r";
2470 break;
2471 case BP_MEM_ACCESS:
2472 type = "a";
2473 break;
2474 default:
2475 type = "";
2476 break;
2478 snprintf(buf, sizeof(buf),
2479 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2480 GDB_SIGNAL_TRAP, cpu_index(env), type,
2481 env->watchpoint_hit->vaddr);
2482 env->watchpoint_hit = NULL;
2483 goto send_packet;
2485 tb_flush(env);
2486 ret = GDB_SIGNAL_TRAP;
2487 break;
2488 case RUN_STATE_PAUSED:
2489 ret = GDB_SIGNAL_INT;
2490 break;
2491 case RUN_STATE_SHUTDOWN:
2492 ret = GDB_SIGNAL_QUIT;
2493 break;
2494 case RUN_STATE_IO_ERROR:
2495 ret = GDB_SIGNAL_IO;
2496 break;
2497 case RUN_STATE_WATCHDOG:
2498 ret = GDB_SIGNAL_ALRM;
2499 break;
2500 case RUN_STATE_INTERNAL_ERROR:
2501 ret = GDB_SIGNAL_ABRT;
2502 break;
2503 case RUN_STATE_SAVE_VM:
2504 case RUN_STATE_RESTORE_VM:
2505 return;
2506 case RUN_STATE_FINISH_MIGRATE:
2507 ret = GDB_SIGNAL_XCPU;
2508 break;
2509 default:
2510 ret = GDB_SIGNAL_UNKNOWN;
2511 break;
2513 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(env));
2515 send_packet:
2516 put_packet(s, buf);
2518 /* disable single step if it was enabled */
2519 cpu_single_step(env, 0);
2521 #endif
2523 /* Send a gdb syscall request.
2524 This accepts limited printf-style format specifiers, specifically:
2525 %x - target_ulong argument printed in hex.
2526 %lx - 64-bit argument printed in hex.
2527 %s - string pointer (target_ulong) and length (int) pair. */
2528 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2530 va_list va;
2531 char *p;
2532 char *p_end;
2533 target_ulong addr;
2534 uint64_t i64;
2535 GDBState *s;
2537 s = gdbserver_state;
2538 if (!s)
2539 return;
2540 s->current_syscall_cb = cb;
2541 #ifndef CONFIG_USER_ONLY
2542 vm_stop(RUN_STATE_DEBUG);
2543 #endif
2544 va_start(va, fmt);
2545 p = s->syscall_buf;
2546 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2547 *(p++) = 'F';
2548 while (*fmt) {
2549 if (*fmt == '%') {
2550 fmt++;
2551 switch (*fmt++) {
2552 case 'x':
2553 addr = va_arg(va, target_ulong);
2554 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2555 break;
2556 case 'l':
2557 if (*(fmt++) != 'x')
2558 goto bad_format;
2559 i64 = va_arg(va, uint64_t);
2560 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2561 break;
2562 case 's':
2563 addr = va_arg(va, target_ulong);
2564 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2565 addr, va_arg(va, int));
2566 break;
2567 default:
2568 bad_format:
2569 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2570 fmt - 1);
2571 break;
2573 } else {
2574 *(p++) = *(fmt++);
2577 *p = 0;
2578 va_end(va);
2579 #ifdef CONFIG_USER_ONLY
2580 put_packet(s, s->syscall_buf);
2581 gdb_handlesig(s->c_cpu, 0);
2582 #else
2583 /* In this case wait to send the syscall packet until notification that
2584 the CPU has stopped. This must be done because if the packet is sent
2585 now the reply from the syscall request could be received while the CPU
2586 is still in the running state, which can cause packets to be dropped
2587 and state transition 'T' packets to be sent while the syscall is still
2588 being processed. */
2589 cpu_exit(s->c_cpu);
2590 #endif
2593 static void gdb_read_byte(GDBState *s, int ch)
2595 int i, csum;
2596 uint8_t reply;
2598 #ifndef CONFIG_USER_ONLY
2599 if (s->last_packet_len) {
2600 /* Waiting for a response to the last packet. If we see the start
2601 of a new command then abandon the previous response. */
2602 if (ch == '-') {
2603 #ifdef DEBUG_GDB
2604 printf("Got NACK, retransmitting\n");
2605 #endif
2606 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2608 #ifdef DEBUG_GDB
2609 else if (ch == '+')
2610 printf("Got ACK\n");
2611 else
2612 printf("Got '%c' when expecting ACK/NACK\n", ch);
2613 #endif
2614 if (ch == '+' || ch == '$')
2615 s->last_packet_len = 0;
2616 if (ch != '$')
2617 return;
2619 if (runstate_is_running()) {
2620 /* when the CPU is running, we cannot do anything except stop
2621 it when receiving a char */
2622 vm_stop(RUN_STATE_PAUSED);
2623 } else
2624 #endif
2626 switch(s->state) {
2627 case RS_IDLE:
2628 if (ch == '$') {
2629 s->line_buf_index = 0;
2630 s->state = RS_GETLINE;
2632 break;
2633 case RS_GETLINE:
2634 if (ch == '#') {
2635 s->state = RS_CHKSUM1;
2636 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2637 s->state = RS_IDLE;
2638 } else {
2639 s->line_buf[s->line_buf_index++] = ch;
2641 break;
2642 case RS_CHKSUM1:
2643 s->line_buf[s->line_buf_index] = '\0';
2644 s->line_csum = fromhex(ch) << 4;
2645 s->state = RS_CHKSUM2;
2646 break;
2647 case RS_CHKSUM2:
2648 s->line_csum |= fromhex(ch);
2649 csum = 0;
2650 for(i = 0; i < s->line_buf_index; i++) {
2651 csum += s->line_buf[i];
2653 if (s->line_csum != (csum & 0xff)) {
2654 reply = '-';
2655 put_buffer(s, &reply, 1);
2656 s->state = RS_IDLE;
2657 } else {
2658 reply = '+';
2659 put_buffer(s, &reply, 1);
2660 s->state = gdb_handle_packet(s, s->line_buf);
2662 break;
2663 default:
2664 abort();
2669 /* Tell the remote gdb that the process has exited. */
2670 void gdb_exit(CPUArchState *env, int code)
2672 GDBState *s;
2673 char buf[4];
2675 s = gdbserver_state;
2676 if (!s) {
2677 return;
2679 #ifdef CONFIG_USER_ONLY
2680 if (gdbserver_fd < 0 || s->fd < 0) {
2681 return;
2683 #endif
2685 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2686 put_packet(s, buf);
2688 #ifndef CONFIG_USER_ONLY
2689 if (s->chr) {
2690 qemu_chr_delete(s->chr);
2692 #endif
2695 #ifdef CONFIG_USER_ONLY
2697 gdb_queuesig (void)
2699 GDBState *s;
2701 s = gdbserver_state;
2703 if (gdbserver_fd < 0 || s->fd < 0)
2704 return 0;
2705 else
2706 return 1;
2710 gdb_handlesig (CPUArchState *env, int sig)
2712 GDBState *s;
2713 char buf[256];
2714 int n;
2716 s = gdbserver_state;
2717 if (gdbserver_fd < 0 || s->fd < 0)
2718 return sig;
2720 /* disable single step if it was enabled */
2721 cpu_single_step(env, 0);
2722 tb_flush(env);
2724 if (sig != 0)
2726 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2727 put_packet(s, buf);
2729 /* put_packet() might have detected that the peer terminated the
2730 connection. */
2731 if (s->fd < 0)
2732 return sig;
2734 sig = 0;
2735 s->state = RS_IDLE;
2736 s->running_state = 0;
2737 while (s->running_state == 0) {
2738 n = read (s->fd, buf, 256);
2739 if (n > 0)
2741 int i;
2743 for (i = 0; i < n; i++)
2744 gdb_read_byte (s, buf[i]);
2746 else if (n == 0 || errno != EAGAIN)
2748 /* XXX: Connection closed. Should probably wait for another
2749 connection before continuing. */
2750 return sig;
2753 sig = s->signal;
2754 s->signal = 0;
2755 return sig;
2758 /* Tell the remote gdb that the process has exited due to SIG. */
2759 void gdb_signalled(CPUArchState *env, int sig)
2761 GDBState *s;
2762 char buf[4];
2764 s = gdbserver_state;
2765 if (gdbserver_fd < 0 || s->fd < 0)
2766 return;
2768 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2769 put_packet(s, buf);
2772 static void gdb_accept(void)
2774 GDBState *s;
2775 struct sockaddr_in sockaddr;
2776 socklen_t len;
2777 int val, fd;
2779 for(;;) {
2780 len = sizeof(sockaddr);
2781 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2782 if (fd < 0 && errno != EINTR) {
2783 perror("accept");
2784 return;
2785 } else if (fd >= 0) {
2786 #ifndef _WIN32
2787 fcntl(fd, F_SETFD, FD_CLOEXEC);
2788 #endif
2789 break;
2793 /* set short latency */
2794 val = 1;
2795 setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *)&val, sizeof(val));
2797 s = g_malloc0(sizeof(GDBState));
2798 s->c_cpu = first_cpu;
2799 s->g_cpu = first_cpu;
2800 s->fd = fd;
2801 gdb_has_xml = 0;
2803 gdbserver_state = s;
2805 fcntl(fd, F_SETFL, O_NONBLOCK);
2808 static int gdbserver_open(int port)
2810 struct sockaddr_in sockaddr;
2811 int fd, val, ret;
2813 fd = socket(PF_INET, SOCK_STREAM, 0);
2814 if (fd < 0) {
2815 perror("socket");
2816 return -1;
2818 #ifndef _WIN32
2819 fcntl(fd, F_SETFD, FD_CLOEXEC);
2820 #endif
2822 /* allow fast reuse */
2823 val = 1;
2824 setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val));
2826 sockaddr.sin_family = AF_INET;
2827 sockaddr.sin_port = htons(port);
2828 sockaddr.sin_addr.s_addr = 0;
2829 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2830 if (ret < 0) {
2831 perror("bind");
2832 close(fd);
2833 return -1;
2835 ret = listen(fd, 0);
2836 if (ret < 0) {
2837 perror("listen");
2838 close(fd);
2839 return -1;
2841 return fd;
2844 int gdbserver_start(int port)
2846 gdbserver_fd = gdbserver_open(port);
2847 if (gdbserver_fd < 0)
2848 return -1;
2849 /* accept connections */
2850 gdb_accept();
2851 return 0;
2854 /* Disable gdb stub for child processes. */
2855 void gdbserver_fork(CPUArchState *env)
2857 GDBState *s = gdbserver_state;
2858 if (gdbserver_fd < 0 || s->fd < 0)
2859 return;
2860 close(s->fd);
2861 s->fd = -1;
2862 cpu_breakpoint_remove_all(env, BP_GDB);
2863 cpu_watchpoint_remove_all(env, BP_GDB);
2865 #else
2866 static int gdb_chr_can_receive(void *opaque)
2868 /* We can handle an arbitrarily large amount of data.
2869 Pick the maximum packet size, which is as good as anything. */
2870 return MAX_PACKET_LENGTH;
2873 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2875 int i;
2877 for (i = 0; i < size; i++) {
2878 gdb_read_byte(gdbserver_state, buf[i]);
2882 static void gdb_chr_event(void *opaque, int event)
2884 switch (event) {
2885 case CHR_EVENT_OPENED:
2886 vm_stop(RUN_STATE_PAUSED);
2887 gdb_has_xml = 0;
2888 break;
2889 default:
2890 break;
2894 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2896 char buf[MAX_PACKET_LENGTH];
2898 buf[0] = 'O';
2899 if (len > (MAX_PACKET_LENGTH/2) - 1)
2900 len = (MAX_PACKET_LENGTH/2) - 1;
2901 memtohex(buf + 1, (uint8_t *)msg, len);
2902 put_packet(s, buf);
2905 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2907 const char *p = (const char *)buf;
2908 int max_sz;
2910 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2911 for (;;) {
2912 if (len <= max_sz) {
2913 gdb_monitor_output(gdbserver_state, p, len);
2914 break;
2916 gdb_monitor_output(gdbserver_state, p, max_sz);
2917 p += max_sz;
2918 len -= max_sz;
2920 return len;
2923 #ifndef _WIN32
2924 static void gdb_sigterm_handler(int signal)
2926 if (runstate_is_running()) {
2927 vm_stop(RUN_STATE_PAUSED);
2930 #endif
2932 int gdbserver_start(const char *device)
2934 GDBState *s;
2935 char gdbstub_device_name[128];
2936 CharDriverState *chr = NULL;
2937 CharDriverState *mon_chr;
2939 if (!device)
2940 return -1;
2941 if (strcmp(device, "none") != 0) {
2942 if (strstart(device, "tcp:", NULL)) {
2943 /* enforce required TCP attributes */
2944 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
2945 "%s,nowait,nodelay,server", device);
2946 device = gdbstub_device_name;
2948 #ifndef _WIN32
2949 else if (strcmp(device, "stdio") == 0) {
2950 struct sigaction act;
2952 memset(&act, 0, sizeof(act));
2953 act.sa_handler = gdb_sigterm_handler;
2954 sigaction(SIGINT, &act, NULL);
2956 #endif
2957 chr = qemu_chr_new("gdb", device, NULL);
2958 if (!chr)
2959 return -1;
2961 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
2962 gdb_chr_event, NULL);
2965 s = gdbserver_state;
2966 if (!s) {
2967 s = g_malloc0(sizeof(GDBState));
2968 gdbserver_state = s;
2970 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
2972 /* Initialize a monitor terminal for gdb */
2973 mon_chr = g_malloc0(sizeof(*mon_chr));
2974 mon_chr->chr_write = gdb_monitor_write;
2975 monitor_init(mon_chr, 0);
2976 } else {
2977 if (s->chr)
2978 qemu_chr_delete(s->chr);
2979 mon_chr = s->mon_chr;
2980 memset(s, 0, sizeof(GDBState));
2982 s->c_cpu = first_cpu;
2983 s->g_cpu = first_cpu;
2984 s->chr = chr;
2985 s->state = chr ? RS_IDLE : RS_INACTIVE;
2986 s->mon_chr = mon_chr;
2987 s->current_syscall_cb = NULL;
2989 return 0;
2991 #endif