Merge remote-tracking branch 'mst/tags/for_anthony' into staging
[qemu-kvm.git] / gdbstub.c
blob08cf8645d7e301f10b77dd769358a8432e7187ed
1 /*
2 * gdb server stub
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
30 #include "qemu.h"
31 #else
32 #include "monitor.h"
33 #include "qemu-char.h"
34 #include "sysemu.h"
35 #include "gdbstub.h"
36 #endif
38 #define MAX_PACKET_LENGTH 4096
40 #include "cpu.h"
41 #include "qemu_socket.h"
42 #include "kvm.h"
44 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
45 static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
46 uint8_t *buf, int len, int is_write)
48 return cpu_memory_rw_debug(env, addr, buf, len, is_write);
50 #else
51 /* target_memory_rw_debug() defined in cpu.h */
52 #endif
54 enum {
55 GDB_SIGNAL_0 = 0,
56 GDB_SIGNAL_INT = 2,
57 GDB_SIGNAL_QUIT = 3,
58 GDB_SIGNAL_TRAP = 5,
59 GDB_SIGNAL_ABRT = 6,
60 GDB_SIGNAL_ALRM = 14,
61 GDB_SIGNAL_IO = 23,
62 GDB_SIGNAL_XCPU = 24,
63 GDB_SIGNAL_UNKNOWN = 143
66 #ifdef CONFIG_USER_ONLY
68 /* Map target signal numbers to GDB protocol signal numbers and vice
69 * versa. For user emulation's currently supported systems, we can
70 * assume most signals are defined.
73 static int gdb_signal_table[] = {
75 TARGET_SIGHUP,
76 TARGET_SIGINT,
77 TARGET_SIGQUIT,
78 TARGET_SIGILL,
79 TARGET_SIGTRAP,
80 TARGET_SIGABRT,
81 -1, /* SIGEMT */
82 TARGET_SIGFPE,
83 TARGET_SIGKILL,
84 TARGET_SIGBUS,
85 TARGET_SIGSEGV,
86 TARGET_SIGSYS,
87 TARGET_SIGPIPE,
88 TARGET_SIGALRM,
89 TARGET_SIGTERM,
90 TARGET_SIGURG,
91 TARGET_SIGSTOP,
92 TARGET_SIGTSTP,
93 TARGET_SIGCONT,
94 TARGET_SIGCHLD,
95 TARGET_SIGTTIN,
96 TARGET_SIGTTOU,
97 TARGET_SIGIO,
98 TARGET_SIGXCPU,
99 TARGET_SIGXFSZ,
100 TARGET_SIGVTALRM,
101 TARGET_SIGPROF,
102 TARGET_SIGWINCH,
103 -1, /* SIGLOST */
104 TARGET_SIGUSR1,
105 TARGET_SIGUSR2,
106 #ifdef TARGET_SIGPWR
107 TARGET_SIGPWR,
108 #else
110 #endif
111 -1, /* SIGPOLL */
123 #ifdef __SIGRTMIN
124 __SIGRTMIN + 1,
125 __SIGRTMIN + 2,
126 __SIGRTMIN + 3,
127 __SIGRTMIN + 4,
128 __SIGRTMIN + 5,
129 __SIGRTMIN + 6,
130 __SIGRTMIN + 7,
131 __SIGRTMIN + 8,
132 __SIGRTMIN + 9,
133 __SIGRTMIN + 10,
134 __SIGRTMIN + 11,
135 __SIGRTMIN + 12,
136 __SIGRTMIN + 13,
137 __SIGRTMIN + 14,
138 __SIGRTMIN + 15,
139 __SIGRTMIN + 16,
140 __SIGRTMIN + 17,
141 __SIGRTMIN + 18,
142 __SIGRTMIN + 19,
143 __SIGRTMIN + 20,
144 __SIGRTMIN + 21,
145 __SIGRTMIN + 22,
146 __SIGRTMIN + 23,
147 __SIGRTMIN + 24,
148 __SIGRTMIN + 25,
149 __SIGRTMIN + 26,
150 __SIGRTMIN + 27,
151 __SIGRTMIN + 28,
152 __SIGRTMIN + 29,
153 __SIGRTMIN + 30,
154 __SIGRTMIN + 31,
155 -1, /* SIGCANCEL */
156 __SIGRTMIN,
157 __SIGRTMIN + 32,
158 __SIGRTMIN + 33,
159 __SIGRTMIN + 34,
160 __SIGRTMIN + 35,
161 __SIGRTMIN + 36,
162 __SIGRTMIN + 37,
163 __SIGRTMIN + 38,
164 __SIGRTMIN + 39,
165 __SIGRTMIN + 40,
166 __SIGRTMIN + 41,
167 __SIGRTMIN + 42,
168 __SIGRTMIN + 43,
169 __SIGRTMIN + 44,
170 __SIGRTMIN + 45,
171 __SIGRTMIN + 46,
172 __SIGRTMIN + 47,
173 __SIGRTMIN + 48,
174 __SIGRTMIN + 49,
175 __SIGRTMIN + 50,
176 __SIGRTMIN + 51,
177 __SIGRTMIN + 52,
178 __SIGRTMIN + 53,
179 __SIGRTMIN + 54,
180 __SIGRTMIN + 55,
181 __SIGRTMIN + 56,
182 __SIGRTMIN + 57,
183 __SIGRTMIN + 58,
184 __SIGRTMIN + 59,
185 __SIGRTMIN + 60,
186 __SIGRTMIN + 61,
187 __SIGRTMIN + 62,
188 __SIGRTMIN + 63,
189 __SIGRTMIN + 64,
190 __SIGRTMIN + 65,
191 __SIGRTMIN + 66,
192 __SIGRTMIN + 67,
193 __SIGRTMIN + 68,
194 __SIGRTMIN + 69,
195 __SIGRTMIN + 70,
196 __SIGRTMIN + 71,
197 __SIGRTMIN + 72,
198 __SIGRTMIN + 73,
199 __SIGRTMIN + 74,
200 __SIGRTMIN + 75,
201 __SIGRTMIN + 76,
202 __SIGRTMIN + 77,
203 __SIGRTMIN + 78,
204 __SIGRTMIN + 79,
205 __SIGRTMIN + 80,
206 __SIGRTMIN + 81,
207 __SIGRTMIN + 82,
208 __SIGRTMIN + 83,
209 __SIGRTMIN + 84,
210 __SIGRTMIN + 85,
211 __SIGRTMIN + 86,
212 __SIGRTMIN + 87,
213 __SIGRTMIN + 88,
214 __SIGRTMIN + 89,
215 __SIGRTMIN + 90,
216 __SIGRTMIN + 91,
217 __SIGRTMIN + 92,
218 __SIGRTMIN + 93,
219 __SIGRTMIN + 94,
220 __SIGRTMIN + 95,
221 -1, /* SIGINFO */
222 -1, /* UNKNOWN */
223 -1, /* DEFAULT */
230 #endif
232 #else
233 /* In system mode we only need SIGINT and SIGTRAP; other signals
234 are not yet supported. */
236 enum {
237 TARGET_SIGINT = 2,
238 TARGET_SIGTRAP = 5
241 static int gdb_signal_table[] = {
244 TARGET_SIGINT,
247 TARGET_SIGTRAP
249 #endif
251 #ifdef CONFIG_USER_ONLY
252 static int target_signal_to_gdb (int sig)
254 int i;
255 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
256 if (gdb_signal_table[i] == sig)
257 return i;
258 return GDB_SIGNAL_UNKNOWN;
260 #endif
262 static int gdb_signal_to_target (int sig)
264 if (sig < ARRAY_SIZE (gdb_signal_table))
265 return gdb_signal_table[sig];
266 else
267 return -1;
270 //#define DEBUG_GDB
272 typedef struct GDBRegisterState {
273 int base_reg;
274 int num_regs;
275 gdb_reg_cb get_reg;
276 gdb_reg_cb set_reg;
277 const char *xml;
278 struct GDBRegisterState *next;
279 } GDBRegisterState;
281 enum RSState {
282 RS_INACTIVE,
283 RS_IDLE,
284 RS_GETLINE,
285 RS_CHKSUM1,
286 RS_CHKSUM2,
288 typedef struct GDBState {
289 CPUArchState *c_cpu; /* current CPU for step/continue ops */
290 CPUArchState *g_cpu; /* current CPU for other ops */
291 CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
292 enum RSState state; /* parsing state */
293 char line_buf[MAX_PACKET_LENGTH];
294 int line_buf_index;
295 int line_csum;
296 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
297 int last_packet_len;
298 int signal;
299 #ifdef CONFIG_USER_ONLY
300 int fd;
301 int running_state;
302 #else
303 CharDriverState *chr;
304 CharDriverState *mon_chr;
305 #endif
306 char syscall_buf[256];
307 gdb_syscall_complete_cb current_syscall_cb;
308 } GDBState;
310 /* By default use no IRQs and no timers while single stepping so as to
311 * make single stepping like an ICE HW step.
313 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
315 static GDBState *gdbserver_state;
317 /* This is an ugly hack to cope with both new and old gdb.
318 If gdb sends qXfer:features:read then assume we're talking to a newish
319 gdb that understands target descriptions. */
320 static int gdb_has_xml;
322 #ifdef CONFIG_USER_ONLY
323 /* XXX: This is not thread safe. Do we care? */
324 static int gdbserver_fd = -1;
326 static int get_char(GDBState *s)
328 uint8_t ch;
329 int ret;
331 for(;;) {
332 ret = qemu_recv(s->fd, &ch, 1, 0);
333 if (ret < 0) {
334 if (errno == ECONNRESET)
335 s->fd = -1;
336 if (errno != EINTR && errno != EAGAIN)
337 return -1;
338 } else if (ret == 0) {
339 close(s->fd);
340 s->fd = -1;
341 return -1;
342 } else {
343 break;
346 return ch;
348 #endif
350 static enum {
351 GDB_SYS_UNKNOWN,
352 GDB_SYS_ENABLED,
353 GDB_SYS_DISABLED,
354 } gdb_syscall_mode;
356 /* If gdb is connected when the first semihosting syscall occurs then use
357 remote gdb syscalls. Otherwise use native file IO. */
358 int use_gdb_syscalls(void)
360 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
361 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
362 : GDB_SYS_DISABLED);
364 return gdb_syscall_mode == GDB_SYS_ENABLED;
367 /* Resume execution. */
368 static inline void gdb_continue(GDBState *s)
370 #ifdef CONFIG_USER_ONLY
371 s->running_state = 1;
372 #else
373 vm_start();
374 #endif
377 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
379 #ifdef CONFIG_USER_ONLY
380 int ret;
382 while (len > 0) {
383 ret = send(s->fd, buf, len, 0);
384 if (ret < 0) {
385 if (errno != EINTR && errno != EAGAIN)
386 return;
387 } else {
388 buf += ret;
389 len -= ret;
392 #else
393 qemu_chr_fe_write(s->chr, buf, len);
394 #endif
397 static inline int fromhex(int v)
399 if (v >= '0' && v <= '9')
400 return v - '0';
401 else if (v >= 'A' && v <= 'F')
402 return v - 'A' + 10;
403 else if (v >= 'a' && v <= 'f')
404 return v - 'a' + 10;
405 else
406 return 0;
409 static inline int tohex(int v)
411 if (v < 10)
412 return v + '0';
413 else
414 return v - 10 + 'a';
417 static void memtohex(char *buf, const uint8_t *mem, int len)
419 int i, c;
420 char *q;
421 q = buf;
422 for(i = 0; i < len; i++) {
423 c = mem[i];
424 *q++ = tohex(c >> 4);
425 *q++ = tohex(c & 0xf);
427 *q = '\0';
430 static void hextomem(uint8_t *mem, const char *buf, int len)
432 int i;
434 for(i = 0; i < len; i++) {
435 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
436 buf += 2;
440 /* return -1 if error, 0 if OK */
441 static int put_packet_binary(GDBState *s, const char *buf, int len)
443 int csum, i;
444 uint8_t *p;
446 for(;;) {
447 p = s->last_packet;
448 *(p++) = '$';
449 memcpy(p, buf, len);
450 p += len;
451 csum = 0;
452 for(i = 0; i < len; i++) {
453 csum += buf[i];
455 *(p++) = '#';
456 *(p++) = tohex((csum >> 4) & 0xf);
457 *(p++) = tohex((csum) & 0xf);
459 s->last_packet_len = p - s->last_packet;
460 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
462 #ifdef CONFIG_USER_ONLY
463 i = get_char(s);
464 if (i < 0)
465 return -1;
466 if (i == '+')
467 break;
468 #else
469 break;
470 #endif
472 return 0;
475 /* return -1 if error, 0 if OK */
476 static int put_packet(GDBState *s, const char *buf)
478 #ifdef DEBUG_GDB
479 printf("reply='%s'\n", buf);
480 #endif
482 return put_packet_binary(s, buf, strlen(buf));
485 /* The GDB remote protocol transfers values in target byte order. This means
486 we can use the raw memory access routines to access the value buffer.
487 Conveniently, these also handle the case where the buffer is mis-aligned.
489 #define GET_REG8(val) do { \
490 stb_p(mem_buf, val); \
491 return 1; \
492 } while(0)
493 #define GET_REG16(val) do { \
494 stw_p(mem_buf, val); \
495 return 2; \
496 } while(0)
497 #define GET_REG32(val) do { \
498 stl_p(mem_buf, val); \
499 return 4; \
500 } while(0)
501 #define GET_REG64(val) do { \
502 stq_p(mem_buf, val); \
503 return 8; \
504 } while(0)
506 #if TARGET_LONG_BITS == 64
507 #define GET_REGL(val) GET_REG64(val)
508 #define ldtul_p(addr) ldq_p(addr)
509 #else
510 #define GET_REGL(val) GET_REG32(val)
511 #define ldtul_p(addr) ldl_p(addr)
512 #endif
514 #if defined(TARGET_I386)
516 #ifdef TARGET_X86_64
517 static const int gpr_map[16] = {
518 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
519 8, 9, 10, 11, 12, 13, 14, 15
521 #else
522 #define gpr_map gpr_map32
523 #endif
524 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
526 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
528 #define IDX_IP_REG CPU_NB_REGS
529 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
530 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
531 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
532 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
533 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
535 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
537 if (n < CPU_NB_REGS) {
538 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
539 GET_REG64(env->regs[gpr_map[n]]);
540 } else if (n < CPU_NB_REGS32) {
541 GET_REG32(env->regs[gpr_map32[n]]);
543 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
544 #ifdef USE_X86LDOUBLE
545 /* FIXME: byteswap float values - after fixing fpregs layout. */
546 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
547 #else
548 memset(mem_buf, 0, 10);
549 #endif
550 return 10;
551 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
552 n -= IDX_XMM_REGS;
553 if (n < CPU_NB_REGS32 ||
554 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
555 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
556 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
557 return 16;
559 } else {
560 switch (n) {
561 case IDX_IP_REG:
562 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
563 GET_REG64(env->eip);
564 } else {
565 GET_REG32(env->eip);
567 case IDX_FLAGS_REG: GET_REG32(env->eflags);
569 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
570 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
571 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
572 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
573 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
574 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
576 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
577 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
578 (env->fpstt & 0x7) << 11);
579 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
580 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
581 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
582 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
583 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
584 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
586 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
589 return 0;
592 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
594 uint16_t selector = ldl_p(mem_buf);
596 if (selector != env->segs[sreg].selector) {
597 #if defined(CONFIG_USER_ONLY)
598 cpu_x86_load_seg(env, sreg, selector);
599 #else
600 unsigned int limit, flags;
601 target_ulong base;
603 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
604 base = selector << 4;
605 limit = 0xffff;
606 flags = 0;
607 } else {
608 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
609 return 4;
611 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
612 #endif
614 return 4;
617 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
619 uint32_t tmp;
621 if (n < CPU_NB_REGS) {
622 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
623 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
624 return sizeof(target_ulong);
625 } else if (n < CPU_NB_REGS32) {
626 n = gpr_map32[n];
627 env->regs[n] &= ~0xffffffffUL;
628 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
629 return 4;
631 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
632 #ifdef USE_X86LDOUBLE
633 /* FIXME: byteswap float values - after fixing fpregs layout. */
634 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
635 #endif
636 return 10;
637 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
638 n -= IDX_XMM_REGS;
639 if (n < CPU_NB_REGS32 ||
640 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
641 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
642 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
643 return 16;
645 } else {
646 switch (n) {
647 case IDX_IP_REG:
648 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
649 env->eip = ldq_p(mem_buf);
650 return 8;
651 } else {
652 env->eip &= ~0xffffffffUL;
653 env->eip |= (uint32_t)ldl_p(mem_buf);
654 return 4;
656 case IDX_FLAGS_REG:
657 env->eflags = ldl_p(mem_buf);
658 return 4;
660 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
661 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
662 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
663 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
664 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
665 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
667 case IDX_FP_REGS + 8:
668 env->fpuc = ldl_p(mem_buf);
669 return 4;
670 case IDX_FP_REGS + 9:
671 tmp = ldl_p(mem_buf);
672 env->fpstt = (tmp >> 11) & 7;
673 env->fpus = tmp & ~0x3800;
674 return 4;
675 case IDX_FP_REGS + 10: /* ftag */ return 4;
676 case IDX_FP_REGS + 11: /* fiseg */ return 4;
677 case IDX_FP_REGS + 12: /* fioff */ return 4;
678 case IDX_FP_REGS + 13: /* foseg */ return 4;
679 case IDX_FP_REGS + 14: /* fooff */ return 4;
680 case IDX_FP_REGS + 15: /* fop */ return 4;
682 case IDX_MXCSR_REG:
683 env->mxcsr = ldl_p(mem_buf);
684 return 4;
687 /* Unrecognised register. */
688 return 0;
691 #elif defined (TARGET_PPC)
693 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
694 expects whatever the target description contains. Due to a
695 historical mishap the FP registers appear in between core integer
696 regs and PC, MSR, CR, and so forth. We hack round this by giving the
697 FP regs zero size when talking to a newer gdb. */
698 #define NUM_CORE_REGS 71
699 #if defined (TARGET_PPC64)
700 #define GDB_CORE_XML "power64-core.xml"
701 #else
702 #define GDB_CORE_XML "power-core.xml"
703 #endif
705 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
707 if (n < 32) {
708 /* gprs */
709 GET_REGL(env->gpr[n]);
710 } else if (n < 64) {
711 /* fprs */
712 if (gdb_has_xml)
713 return 0;
714 stfq_p(mem_buf, env->fpr[n-32]);
715 return 8;
716 } else {
717 switch (n) {
718 case 64: GET_REGL(env->nip);
719 case 65: GET_REGL(env->msr);
720 case 66:
722 uint32_t cr = 0;
723 int i;
724 for (i = 0; i < 8; i++)
725 cr |= env->crf[i] << (32 - ((i + 1) * 4));
726 GET_REG32(cr);
728 case 67: GET_REGL(env->lr);
729 case 68: GET_REGL(env->ctr);
730 case 69: GET_REGL(env->xer);
731 case 70:
733 if (gdb_has_xml)
734 return 0;
735 GET_REG32(env->fpscr);
739 return 0;
742 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
744 if (n < 32) {
745 /* gprs */
746 env->gpr[n] = ldtul_p(mem_buf);
747 return sizeof(target_ulong);
748 } else if (n < 64) {
749 /* fprs */
750 if (gdb_has_xml)
751 return 0;
752 env->fpr[n-32] = ldfq_p(mem_buf);
753 return 8;
754 } else {
755 switch (n) {
756 case 64:
757 env->nip = ldtul_p(mem_buf);
758 return sizeof(target_ulong);
759 case 65:
760 ppc_store_msr(env, ldtul_p(mem_buf));
761 return sizeof(target_ulong);
762 case 66:
764 uint32_t cr = ldl_p(mem_buf);
765 int i;
766 for (i = 0; i < 8; i++)
767 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
768 return 4;
770 case 67:
771 env->lr = ldtul_p(mem_buf);
772 return sizeof(target_ulong);
773 case 68:
774 env->ctr = ldtul_p(mem_buf);
775 return sizeof(target_ulong);
776 case 69:
777 env->xer = ldtul_p(mem_buf);
778 return sizeof(target_ulong);
779 case 70:
780 /* fpscr */
781 if (gdb_has_xml)
782 return 0;
783 return 4;
786 return 0;
789 #elif defined (TARGET_SPARC)
791 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
792 #define NUM_CORE_REGS 86
793 #else
794 #define NUM_CORE_REGS 72
795 #endif
797 #ifdef TARGET_ABI32
798 #define GET_REGA(val) GET_REG32(val)
799 #else
800 #define GET_REGA(val) GET_REGL(val)
801 #endif
803 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
805 if (n < 8) {
806 /* g0..g7 */
807 GET_REGA(env->gregs[n]);
809 if (n < 32) {
810 /* register window */
811 GET_REGA(env->regwptr[n - 8]);
813 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
814 if (n < 64) {
815 /* fprs */
816 if (n & 1) {
817 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
818 } else {
819 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
822 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
823 switch (n) {
824 case 64: GET_REGA(env->y);
825 case 65: GET_REGA(cpu_get_psr(env));
826 case 66: GET_REGA(env->wim);
827 case 67: GET_REGA(env->tbr);
828 case 68: GET_REGA(env->pc);
829 case 69: GET_REGA(env->npc);
830 case 70: GET_REGA(env->fsr);
831 case 71: GET_REGA(0); /* csr */
832 default: GET_REGA(0);
834 #else
835 if (n < 64) {
836 /* f0-f31 */
837 if (n & 1) {
838 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
839 } else {
840 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
843 if (n < 80) {
844 /* f32-f62 (double width, even numbers only) */
845 GET_REG64(env->fpr[(n - 32) / 2].ll);
847 switch (n) {
848 case 80: GET_REGL(env->pc);
849 case 81: GET_REGL(env->npc);
850 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
851 ((env->asi & 0xff) << 24) |
852 ((env->pstate & 0xfff) << 8) |
853 cpu_get_cwp64(env));
854 case 83: GET_REGL(env->fsr);
855 case 84: GET_REGL(env->fprs);
856 case 85: GET_REGL(env->y);
858 #endif
859 return 0;
862 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
864 #if defined(TARGET_ABI32)
865 abi_ulong tmp;
867 tmp = ldl_p(mem_buf);
868 #else
869 target_ulong tmp;
871 tmp = ldtul_p(mem_buf);
872 #endif
874 if (n < 8) {
875 /* g0..g7 */
876 env->gregs[n] = tmp;
877 } else if (n < 32) {
878 /* register window */
879 env->regwptr[n - 8] = tmp;
881 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
882 else if (n < 64) {
883 /* fprs */
884 /* f0-f31 */
885 if (n & 1) {
886 env->fpr[(n - 32) / 2].l.lower = tmp;
887 } else {
888 env->fpr[(n - 32) / 2].l.upper = tmp;
890 } else {
891 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
892 switch (n) {
893 case 64: env->y = tmp; break;
894 case 65: cpu_put_psr(env, tmp); break;
895 case 66: env->wim = tmp; break;
896 case 67: env->tbr = tmp; break;
897 case 68: env->pc = tmp; break;
898 case 69: env->npc = tmp; break;
899 case 70: env->fsr = tmp; break;
900 default: return 0;
903 return 4;
904 #else
905 else if (n < 64) {
906 /* f0-f31 */
907 tmp = ldl_p(mem_buf);
908 if (n & 1) {
909 env->fpr[(n - 32) / 2].l.lower = tmp;
910 } else {
911 env->fpr[(n - 32) / 2].l.upper = tmp;
913 return 4;
914 } else if (n < 80) {
915 /* f32-f62 (double width, even numbers only) */
916 env->fpr[(n - 32) / 2].ll = tmp;
917 } else {
918 switch (n) {
919 case 80: env->pc = tmp; break;
920 case 81: env->npc = tmp; break;
921 case 82:
922 cpu_put_ccr(env, tmp >> 32);
923 env->asi = (tmp >> 24) & 0xff;
924 env->pstate = (tmp >> 8) & 0xfff;
925 cpu_put_cwp64(env, tmp & 0xff);
926 break;
927 case 83: env->fsr = tmp; break;
928 case 84: env->fprs = tmp; break;
929 case 85: env->y = tmp; break;
930 default: return 0;
933 return 8;
934 #endif
936 #elif defined (TARGET_ARM)
938 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
939 whatever the target description contains. Due to a historical mishap
940 the FPA registers appear in between core integer regs and the CPSR.
941 We hack round this by giving the FPA regs zero size when talking to a
942 newer gdb. */
943 #define NUM_CORE_REGS 26
944 #define GDB_CORE_XML "arm-core.xml"
946 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
948 if (n < 16) {
949 /* Core integer register. */
950 GET_REG32(env->regs[n]);
952 if (n < 24) {
953 /* FPA registers. */
954 if (gdb_has_xml)
955 return 0;
956 memset(mem_buf, 0, 12);
957 return 12;
959 switch (n) {
960 case 24:
961 /* FPA status register. */
962 if (gdb_has_xml)
963 return 0;
964 GET_REG32(0);
965 case 25:
966 /* CPSR */
967 GET_REG32(cpsr_read(env));
969 /* Unknown register. */
970 return 0;
973 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
975 uint32_t tmp;
977 tmp = ldl_p(mem_buf);
979 /* Mask out low bit of PC to workaround gdb bugs. This will probably
980 cause problems if we ever implement the Jazelle DBX extensions. */
981 if (n == 15)
982 tmp &= ~1;
984 if (n < 16) {
985 /* Core integer register. */
986 env->regs[n] = tmp;
987 return 4;
989 if (n < 24) { /* 16-23 */
990 /* FPA registers (ignored). */
991 if (gdb_has_xml)
992 return 0;
993 return 12;
995 switch (n) {
996 case 24:
997 /* FPA status register (ignored). */
998 if (gdb_has_xml)
999 return 0;
1000 return 4;
1001 case 25:
1002 /* CPSR */
1003 cpsr_write (env, tmp, 0xffffffff);
1004 return 4;
1006 /* Unknown register. */
1007 return 0;
1010 #elif defined (TARGET_M68K)
1012 #define NUM_CORE_REGS 18
1014 #define GDB_CORE_XML "cf-core.xml"
1016 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1018 if (n < 8) {
1019 /* D0-D7 */
1020 GET_REG32(env->dregs[n]);
1021 } else if (n < 16) {
1022 /* A0-A7 */
1023 GET_REG32(env->aregs[n - 8]);
1024 } else {
1025 switch (n) {
1026 case 16: GET_REG32(env->sr);
1027 case 17: GET_REG32(env->pc);
1030 /* FP registers not included here because they vary between
1031 ColdFire and m68k. Use XML bits for these. */
1032 return 0;
1035 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1037 uint32_t tmp;
1039 tmp = ldl_p(mem_buf);
1041 if (n < 8) {
1042 /* D0-D7 */
1043 env->dregs[n] = tmp;
1044 } else if (n < 16) {
1045 /* A0-A7 */
1046 env->aregs[n - 8] = tmp;
1047 } else {
1048 switch (n) {
1049 case 16: env->sr = tmp; break;
1050 case 17: env->pc = tmp; break;
1051 default: return 0;
1054 return 4;
1056 #elif defined (TARGET_MIPS)
1058 #define NUM_CORE_REGS 73
1060 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1062 if (n < 32) {
1063 GET_REGL(env->active_tc.gpr[n]);
1065 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1066 if (n >= 38 && n < 70) {
1067 if (env->CP0_Status & (1 << CP0St_FR))
1068 GET_REGL(env->active_fpu.fpr[n - 38].d);
1069 else
1070 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1072 switch (n) {
1073 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1074 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1077 switch (n) {
1078 case 32: GET_REGL((int32_t)env->CP0_Status);
1079 case 33: GET_REGL(env->active_tc.LO[0]);
1080 case 34: GET_REGL(env->active_tc.HI[0]);
1081 case 35: GET_REGL(env->CP0_BadVAddr);
1082 case 36: GET_REGL((int32_t)env->CP0_Cause);
1083 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1084 case 72: GET_REGL(0); /* fp */
1085 case 89: GET_REGL((int32_t)env->CP0_PRid);
1087 if (n >= 73 && n <= 88) {
1088 /* 16 embedded regs. */
1089 GET_REGL(0);
1092 return 0;
1095 /* convert MIPS rounding mode in FCR31 to IEEE library */
1096 static unsigned int ieee_rm[] =
1098 float_round_nearest_even,
1099 float_round_to_zero,
1100 float_round_up,
1101 float_round_down
1103 #define RESTORE_ROUNDING_MODE \
1104 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1106 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1108 target_ulong tmp;
1110 tmp = ldtul_p(mem_buf);
1112 if (n < 32) {
1113 env->active_tc.gpr[n] = tmp;
1114 return sizeof(target_ulong);
1116 if (env->CP0_Config1 & (1 << CP0C1_FP)
1117 && n >= 38 && n < 73) {
1118 if (n < 70) {
1119 if (env->CP0_Status & (1 << CP0St_FR))
1120 env->active_fpu.fpr[n - 38].d = tmp;
1121 else
1122 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1124 switch (n) {
1125 case 70:
1126 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1127 /* set rounding mode */
1128 RESTORE_ROUNDING_MODE;
1129 break;
1130 case 71: env->active_fpu.fcr0 = tmp; break;
1132 return sizeof(target_ulong);
1134 switch (n) {
1135 case 32: env->CP0_Status = tmp; break;
1136 case 33: env->active_tc.LO[0] = tmp; break;
1137 case 34: env->active_tc.HI[0] = tmp; break;
1138 case 35: env->CP0_BadVAddr = tmp; break;
1139 case 36: env->CP0_Cause = tmp; break;
1140 case 37:
1141 env->active_tc.PC = tmp & ~(target_ulong)1;
1142 if (tmp & 1) {
1143 env->hflags |= MIPS_HFLAG_M16;
1144 } else {
1145 env->hflags &= ~(MIPS_HFLAG_M16);
1147 break;
1148 case 72: /* fp, ignored */ break;
1149 default:
1150 if (n > 89)
1151 return 0;
1152 /* Other registers are readonly. Ignore writes. */
1153 break;
1156 return sizeof(target_ulong);
1158 #elif defined (TARGET_SH4)
1160 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1161 /* FIXME: We should use XML for this. */
1163 #define NUM_CORE_REGS 59
1165 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1167 if (n < 8) {
1168 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1169 GET_REGL(env->gregs[n + 16]);
1170 } else {
1171 GET_REGL(env->gregs[n]);
1173 } else if (n < 16) {
1174 GET_REGL(env->gregs[n]);
1175 } else if (n >= 25 && n < 41) {
1176 GET_REGL(env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)]);
1177 } else if (n >= 43 && n < 51) {
1178 GET_REGL(env->gregs[n - 43]);
1179 } else if (n >= 51 && n < 59) {
1180 GET_REGL(env->gregs[n - (51 - 16)]);
1182 switch (n) {
1183 case 16: GET_REGL(env->pc);
1184 case 17: GET_REGL(env->pr);
1185 case 18: GET_REGL(env->gbr);
1186 case 19: GET_REGL(env->vbr);
1187 case 20: GET_REGL(env->mach);
1188 case 21: GET_REGL(env->macl);
1189 case 22: GET_REGL(env->sr);
1190 case 23: GET_REGL(env->fpul);
1191 case 24: GET_REGL(env->fpscr);
1192 case 41: GET_REGL(env->ssr);
1193 case 42: GET_REGL(env->spc);
1196 return 0;
1199 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1201 uint32_t tmp;
1203 tmp = ldl_p(mem_buf);
1205 if (n < 8) {
1206 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1207 env->gregs[n + 16] = tmp;
1208 } else {
1209 env->gregs[n] = tmp;
1211 return 4;
1212 } else if (n < 16) {
1213 env->gregs[n] = tmp;
1214 return 4;
1215 } else if (n >= 25 && n < 41) {
1216 env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)] = tmp;
1217 return 4;
1218 } else if (n >= 43 && n < 51) {
1219 env->gregs[n - 43] = tmp;
1220 return 4;
1221 } else if (n >= 51 && n < 59) {
1222 env->gregs[n - (51 - 16)] = tmp;
1223 return 4;
1225 switch (n) {
1226 case 16: env->pc = tmp; break;
1227 case 17: env->pr = tmp; break;
1228 case 18: env->gbr = tmp; break;
1229 case 19: env->vbr = tmp; break;
1230 case 20: env->mach = tmp; break;
1231 case 21: env->macl = tmp; break;
1232 case 22: env->sr = tmp; break;
1233 case 23: env->fpul = tmp; break;
1234 case 24: env->fpscr = tmp; break;
1235 case 41: env->ssr = tmp; break;
1236 case 42: env->spc = tmp; break;
1237 default: return 0;
1240 return 4;
1242 #elif defined (TARGET_MICROBLAZE)
1244 #define NUM_CORE_REGS (32 + 5)
1246 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1248 if (n < 32) {
1249 GET_REG32(env->regs[n]);
1250 } else {
1251 GET_REG32(env->sregs[n - 32]);
1253 return 0;
1256 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1258 uint32_t tmp;
1260 if (n > NUM_CORE_REGS)
1261 return 0;
1263 tmp = ldl_p(mem_buf);
1265 if (n < 32) {
1266 env->regs[n] = tmp;
1267 } else {
1268 env->sregs[n - 32] = tmp;
1270 return 4;
1272 #elif defined (TARGET_CRIS)
1274 #define NUM_CORE_REGS 49
1276 static int
1277 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1279 if (n < 15) {
1280 GET_REG32(env->regs[n]);
1283 if (n == 15) {
1284 GET_REG32(env->pc);
1287 if (n < 32) {
1288 switch (n) {
1289 case 16:
1290 GET_REG8(env->pregs[n - 16]);
1291 break;
1292 case 17:
1293 GET_REG8(env->pregs[n - 16]);
1294 break;
1295 case 20:
1296 case 21:
1297 GET_REG16(env->pregs[n - 16]);
1298 break;
1299 default:
1300 if (n >= 23) {
1301 GET_REG32(env->pregs[n - 16]);
1303 break;
1306 return 0;
1309 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1311 uint8_t srs;
1313 if (env->pregs[PR_VR] < 32)
1314 return read_register_crisv10(env, mem_buf, n);
1316 srs = env->pregs[PR_SRS];
1317 if (n < 16) {
1318 GET_REG32(env->regs[n]);
1321 if (n >= 21 && n < 32) {
1322 GET_REG32(env->pregs[n - 16]);
1324 if (n >= 33 && n < 49) {
1325 GET_REG32(env->sregs[srs][n - 33]);
1327 switch (n) {
1328 case 16: GET_REG8(env->pregs[0]);
1329 case 17: GET_REG8(env->pregs[1]);
1330 case 18: GET_REG32(env->pregs[2]);
1331 case 19: GET_REG8(srs);
1332 case 20: GET_REG16(env->pregs[4]);
1333 case 32: GET_REG32(env->pc);
1336 return 0;
1339 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1341 uint32_t tmp;
1343 if (n > 49)
1344 return 0;
1346 tmp = ldl_p(mem_buf);
1348 if (n < 16) {
1349 env->regs[n] = tmp;
1352 if (n >= 21 && n < 32) {
1353 env->pregs[n - 16] = tmp;
1356 /* FIXME: Should support function regs be writable? */
1357 switch (n) {
1358 case 16: return 1;
1359 case 17: return 1;
1360 case 18: env->pregs[PR_PID] = tmp; break;
1361 case 19: return 1;
1362 case 20: return 2;
1363 case 32: env->pc = tmp; break;
1366 return 4;
1368 #elif defined (TARGET_ALPHA)
1370 #define NUM_CORE_REGS 67
1372 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1374 uint64_t val;
1375 CPU_DoubleU d;
1377 switch (n) {
1378 case 0 ... 30:
1379 val = env->ir[n];
1380 break;
1381 case 32 ... 62:
1382 d.d = env->fir[n - 32];
1383 val = d.ll;
1384 break;
1385 case 63:
1386 val = cpu_alpha_load_fpcr(env);
1387 break;
1388 case 64:
1389 val = env->pc;
1390 break;
1391 case 66:
1392 val = env->unique;
1393 break;
1394 case 31:
1395 case 65:
1396 /* 31 really is the zero register; 65 is unassigned in the
1397 gdb protocol, but is still required to occupy 8 bytes. */
1398 val = 0;
1399 break;
1400 default:
1401 return 0;
1403 GET_REGL(val);
1406 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1408 target_ulong tmp = ldtul_p(mem_buf);
1409 CPU_DoubleU d;
1411 switch (n) {
1412 case 0 ... 30:
1413 env->ir[n] = tmp;
1414 break;
1415 case 32 ... 62:
1416 d.ll = tmp;
1417 env->fir[n - 32] = d.d;
1418 break;
1419 case 63:
1420 cpu_alpha_store_fpcr(env, tmp);
1421 break;
1422 case 64:
1423 env->pc = tmp;
1424 break;
1425 case 66:
1426 env->unique = tmp;
1427 break;
1428 case 31:
1429 case 65:
1430 /* 31 really is the zero register; 65 is unassigned in the
1431 gdb protocol, but is still required to occupy 8 bytes. */
1432 break;
1433 default:
1434 return 0;
1436 return 8;
1438 #elif defined (TARGET_S390X)
1440 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1442 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1444 switch (n) {
1445 case S390_PSWM_REGNUM: GET_REGL(env->psw.mask); break;
1446 case S390_PSWA_REGNUM: GET_REGL(env->psw.addr); break;
1447 case S390_R0_REGNUM ... S390_R15_REGNUM:
1448 GET_REGL(env->regs[n-S390_R0_REGNUM]); break;
1449 case S390_A0_REGNUM ... S390_A15_REGNUM:
1450 GET_REG32(env->aregs[n-S390_A0_REGNUM]); break;
1451 case S390_FPC_REGNUM: GET_REG32(env->fpc); break;
1452 case S390_F0_REGNUM ... S390_F15_REGNUM:
1453 /* XXX */
1454 break;
1455 case S390_PC_REGNUM: GET_REGL(env->psw.addr); break;
1456 case S390_CC_REGNUM:
1457 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
1458 env->cc_vr);
1459 GET_REG32(env->cc_op);
1460 break;
1463 return 0;
1466 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1468 target_ulong tmpl;
1469 uint32_t tmp32;
1470 int r = 8;
1471 tmpl = ldtul_p(mem_buf);
1472 tmp32 = ldl_p(mem_buf);
1474 switch (n) {
1475 case S390_PSWM_REGNUM: env->psw.mask = tmpl; break;
1476 case S390_PSWA_REGNUM: env->psw.addr = tmpl; break;
1477 case S390_R0_REGNUM ... S390_R15_REGNUM:
1478 env->regs[n-S390_R0_REGNUM] = tmpl; break;
1479 case S390_A0_REGNUM ... S390_A15_REGNUM:
1480 env->aregs[n-S390_A0_REGNUM] = tmp32; r=4; break;
1481 case S390_FPC_REGNUM: env->fpc = tmp32; r=4; break;
1482 case S390_F0_REGNUM ... S390_F15_REGNUM:
1483 /* XXX */
1484 break;
1485 case S390_PC_REGNUM: env->psw.addr = tmpl; break;
1486 case S390_CC_REGNUM: env->cc_op = tmp32; r=4; break;
1489 return r;
1491 #elif defined (TARGET_LM32)
1493 #include "hw/lm32_pic.h"
1494 #define NUM_CORE_REGS (32 + 7)
1496 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1498 if (n < 32) {
1499 GET_REG32(env->regs[n]);
1500 } else {
1501 switch (n) {
1502 case 32:
1503 GET_REG32(env->pc);
1504 break;
1505 /* FIXME: put in right exception ID */
1506 case 33:
1507 GET_REG32(0);
1508 break;
1509 case 34:
1510 GET_REG32(env->eba);
1511 break;
1512 case 35:
1513 GET_REG32(env->deba);
1514 break;
1515 case 36:
1516 GET_REG32(env->ie);
1517 break;
1518 case 37:
1519 GET_REG32(lm32_pic_get_im(env->pic_state));
1520 break;
1521 case 38:
1522 GET_REG32(lm32_pic_get_ip(env->pic_state));
1523 break;
1526 return 0;
1529 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1531 uint32_t tmp;
1533 if (n > NUM_CORE_REGS) {
1534 return 0;
1537 tmp = ldl_p(mem_buf);
1539 if (n < 32) {
1540 env->regs[n] = tmp;
1541 } else {
1542 switch (n) {
1543 case 32:
1544 env->pc = tmp;
1545 break;
1546 case 34:
1547 env->eba = tmp;
1548 break;
1549 case 35:
1550 env->deba = tmp;
1551 break;
1552 case 36:
1553 env->ie = tmp;
1554 break;
1555 case 37:
1556 lm32_pic_set_im(env->pic_state, tmp);
1557 break;
1558 case 38:
1559 lm32_pic_set_ip(env->pic_state, tmp);
1560 break;
1563 return 4;
1565 #elif defined(TARGET_XTENSA)
1567 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1568 * Use num_regs to see all registers. gdb modification is required for that:
1569 * reset bit 0 in the 'flags' field of the registers definitions in the
1570 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1572 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1573 #define num_g_regs NUM_CORE_REGS
1575 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1577 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1579 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1580 return 0;
1583 switch (reg->type) {
1584 case 9: /*pc*/
1585 GET_REG32(env->pc);
1586 break;
1588 case 1: /*ar*/
1589 xtensa_sync_phys_from_window(env);
1590 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1591 break;
1593 case 2: /*SR*/
1594 GET_REG32(env->sregs[reg->targno & 0xff]);
1595 break;
1597 case 3: /*UR*/
1598 GET_REG32(env->uregs[reg->targno & 0xff]);
1599 break;
1601 case 8: /*a*/
1602 GET_REG32(env->regs[reg->targno & 0x0f]);
1603 break;
1605 default:
1606 qemu_log("%s from reg %d of unsupported type %d\n",
1607 __func__, n, reg->type);
1608 return 0;
1612 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1614 uint32_t tmp;
1615 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1617 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1618 return 0;
1621 tmp = ldl_p(mem_buf);
1623 switch (reg->type) {
1624 case 9: /*pc*/
1625 env->pc = tmp;
1626 break;
1628 case 1: /*ar*/
1629 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1630 xtensa_sync_window_from_phys(env);
1631 break;
1633 case 2: /*SR*/
1634 env->sregs[reg->targno & 0xff] = tmp;
1635 break;
1637 case 3: /*UR*/
1638 env->uregs[reg->targno & 0xff] = tmp;
1639 break;
1641 case 8: /*a*/
1642 env->regs[reg->targno & 0x0f] = tmp;
1643 break;
1645 default:
1646 qemu_log("%s to reg %d of unsupported type %d\n",
1647 __func__, n, reg->type);
1648 return 0;
1651 return 4;
1653 #else
1655 #define NUM_CORE_REGS 0
1657 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1659 return 0;
1662 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1664 return 0;
1667 #endif
1669 #if !defined(TARGET_XTENSA)
1670 static int num_g_regs = NUM_CORE_REGS;
1671 #endif
1673 #ifdef GDB_CORE_XML
1674 /* Encode data using the encoding for 'x' packets. */
1675 static int memtox(char *buf, const char *mem, int len)
1677 char *p = buf;
1678 char c;
1680 while (len--) {
1681 c = *(mem++);
1682 switch (c) {
1683 case '#': case '$': case '*': case '}':
1684 *(p++) = '}';
1685 *(p++) = c ^ 0x20;
1686 break;
1687 default:
1688 *(p++) = c;
1689 break;
1692 return p - buf;
1695 static const char *get_feature_xml(const char *p, const char **newp)
1697 size_t len;
1698 int i;
1699 const char *name;
1700 static char target_xml[1024];
1702 len = 0;
1703 while (p[len] && p[len] != ':')
1704 len++;
1705 *newp = p + len;
1707 name = NULL;
1708 if (strncmp(p, "target.xml", len) == 0) {
1709 /* Generate the XML description for this CPU. */
1710 if (!target_xml[0]) {
1711 GDBRegisterState *r;
1713 snprintf(target_xml, sizeof(target_xml),
1714 "<?xml version=\"1.0\"?>"
1715 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1716 "<target>"
1717 "<xi:include href=\"%s\"/>",
1718 GDB_CORE_XML);
1720 for (r = first_cpu->gdb_regs; r; r = r->next) {
1721 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1722 pstrcat(target_xml, sizeof(target_xml), r->xml);
1723 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1725 pstrcat(target_xml, sizeof(target_xml), "</target>");
1727 return target_xml;
1729 for (i = 0; ; i++) {
1730 name = xml_builtin[i][0];
1731 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1732 break;
1734 return name ? xml_builtin[i][1] : NULL;
1736 #endif
1738 static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1740 GDBRegisterState *r;
1742 if (reg < NUM_CORE_REGS)
1743 return cpu_gdb_read_register(env, mem_buf, reg);
1745 for (r = env->gdb_regs; r; r = r->next) {
1746 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1747 return r->get_reg(env, mem_buf, reg - r->base_reg);
1750 return 0;
1753 static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1755 GDBRegisterState *r;
1757 if (reg < NUM_CORE_REGS)
1758 return cpu_gdb_write_register(env, mem_buf, reg);
1760 for (r = env->gdb_regs; r; r = r->next) {
1761 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1762 return r->set_reg(env, mem_buf, reg - r->base_reg);
1765 return 0;
1768 #if !defined(TARGET_XTENSA)
1769 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1770 specifies the first register number and these registers are included in
1771 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1772 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1775 void gdb_register_coprocessor(CPUArchState * env,
1776 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1777 int num_regs, const char *xml, int g_pos)
1779 GDBRegisterState *s;
1780 GDBRegisterState **p;
1781 static int last_reg = NUM_CORE_REGS;
1783 p = &env->gdb_regs;
1784 while (*p) {
1785 /* Check for duplicates. */
1786 if (strcmp((*p)->xml, xml) == 0)
1787 return;
1788 p = &(*p)->next;
1791 s = g_new0(GDBRegisterState, 1);
1792 s->base_reg = last_reg;
1793 s->num_regs = num_regs;
1794 s->get_reg = get_reg;
1795 s->set_reg = set_reg;
1796 s->xml = xml;
1798 /* Add to end of list. */
1799 last_reg += num_regs;
1800 *p = s;
1801 if (g_pos) {
1802 if (g_pos != s->base_reg) {
1803 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1804 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1805 } else {
1806 num_g_regs = last_reg;
1810 #endif
1812 #ifndef CONFIG_USER_ONLY
1813 static const int xlat_gdb_type[] = {
1814 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1815 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1816 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1818 #endif
1820 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1822 CPUArchState *env;
1823 int err = 0;
1825 if (kvm_enabled())
1826 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1828 switch (type) {
1829 case GDB_BREAKPOINT_SW:
1830 case GDB_BREAKPOINT_HW:
1831 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1832 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1833 if (err)
1834 break;
1836 return err;
1837 #ifndef CONFIG_USER_ONLY
1838 case GDB_WATCHPOINT_WRITE:
1839 case GDB_WATCHPOINT_READ:
1840 case GDB_WATCHPOINT_ACCESS:
1841 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1842 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1843 NULL);
1844 if (err)
1845 break;
1847 return err;
1848 #endif
1849 default:
1850 return -ENOSYS;
1854 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1856 CPUArchState *env;
1857 int err = 0;
1859 if (kvm_enabled())
1860 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1862 switch (type) {
1863 case GDB_BREAKPOINT_SW:
1864 case GDB_BREAKPOINT_HW:
1865 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1866 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1867 if (err)
1868 break;
1870 return err;
1871 #ifndef CONFIG_USER_ONLY
1872 case GDB_WATCHPOINT_WRITE:
1873 case GDB_WATCHPOINT_READ:
1874 case GDB_WATCHPOINT_ACCESS:
1875 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1876 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
1877 if (err)
1878 break;
1880 return err;
1881 #endif
1882 default:
1883 return -ENOSYS;
1887 static void gdb_breakpoint_remove_all(void)
1889 CPUArchState *env;
1891 if (kvm_enabled()) {
1892 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
1893 return;
1896 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1897 cpu_breakpoint_remove_all(env, BP_GDB);
1898 #ifndef CONFIG_USER_ONLY
1899 cpu_watchpoint_remove_all(env, BP_GDB);
1900 #endif
1904 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
1906 cpu_synchronize_state(s->c_cpu);
1907 #if defined(TARGET_I386)
1908 s->c_cpu->eip = pc;
1909 #elif defined (TARGET_PPC)
1910 s->c_cpu->nip = pc;
1911 #elif defined (TARGET_SPARC)
1912 s->c_cpu->pc = pc;
1913 s->c_cpu->npc = pc + 4;
1914 #elif defined (TARGET_ARM)
1915 s->c_cpu->regs[15] = pc;
1916 #elif defined (TARGET_SH4)
1917 s->c_cpu->pc = pc;
1918 #elif defined (TARGET_MIPS)
1919 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
1920 if (pc & 1) {
1921 s->c_cpu->hflags |= MIPS_HFLAG_M16;
1922 } else {
1923 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
1925 #elif defined (TARGET_MICROBLAZE)
1926 s->c_cpu->sregs[SR_PC] = pc;
1927 #elif defined (TARGET_CRIS)
1928 s->c_cpu->pc = pc;
1929 #elif defined (TARGET_ALPHA)
1930 s->c_cpu->pc = pc;
1931 #elif defined (TARGET_S390X)
1932 s->c_cpu->psw.addr = pc;
1933 #elif defined (TARGET_LM32)
1934 s->c_cpu->pc = pc;
1935 #elif defined(TARGET_XTENSA)
1936 s->c_cpu->pc = pc;
1937 #endif
1940 static CPUArchState *find_cpu(uint32_t thread_id)
1942 CPUArchState *env;
1944 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1945 if (cpu_index(env) == thread_id) {
1946 return env;
1950 return NULL;
1953 static int gdb_handle_packet(GDBState *s, const char *line_buf)
1955 CPUArchState *env;
1956 const char *p;
1957 uint32_t thread;
1958 int ch, reg_size, type, res;
1959 char buf[MAX_PACKET_LENGTH];
1960 uint8_t mem_buf[MAX_PACKET_LENGTH];
1961 uint8_t *registers;
1962 target_ulong addr, len;
1964 #ifdef DEBUG_GDB
1965 printf("command='%s'\n", line_buf);
1966 #endif
1967 p = line_buf;
1968 ch = *p++;
1969 switch(ch) {
1970 case '?':
1971 /* TODO: Make this return the correct value for user-mode. */
1972 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
1973 cpu_index(s->c_cpu));
1974 put_packet(s, buf);
1975 /* Remove all the breakpoints when this query is issued,
1976 * because gdb is doing and initial connect and the state
1977 * should be cleaned up.
1979 gdb_breakpoint_remove_all();
1980 break;
1981 case 'c':
1982 if (*p != '\0') {
1983 addr = strtoull(p, (char **)&p, 16);
1984 gdb_set_cpu_pc(s, addr);
1986 s->signal = 0;
1987 gdb_continue(s);
1988 return RS_IDLE;
1989 case 'C':
1990 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
1991 if (s->signal == -1)
1992 s->signal = 0;
1993 gdb_continue(s);
1994 return RS_IDLE;
1995 case 'v':
1996 if (strncmp(p, "Cont", 4) == 0) {
1997 int res_signal, res_thread;
1999 p += 4;
2000 if (*p == '?') {
2001 put_packet(s, "vCont;c;C;s;S");
2002 break;
2004 res = 0;
2005 res_signal = 0;
2006 res_thread = 0;
2007 while (*p) {
2008 int action, signal;
2010 if (*p++ != ';') {
2011 res = 0;
2012 break;
2014 action = *p++;
2015 signal = 0;
2016 if (action == 'C' || action == 'S') {
2017 signal = strtoul(p, (char **)&p, 16);
2018 } else if (action != 'c' && action != 's') {
2019 res = 0;
2020 break;
2022 thread = 0;
2023 if (*p == ':') {
2024 thread = strtoull(p+1, (char **)&p, 16);
2026 action = tolower(action);
2027 if (res == 0 || (res == 'c' && action == 's')) {
2028 res = action;
2029 res_signal = signal;
2030 res_thread = thread;
2033 if (res) {
2034 if (res_thread != -1 && res_thread != 0) {
2035 env = find_cpu(res_thread);
2036 if (env == NULL) {
2037 put_packet(s, "E22");
2038 break;
2040 s->c_cpu = env;
2042 if (res == 's') {
2043 cpu_single_step(s->c_cpu, sstep_flags);
2045 s->signal = res_signal;
2046 gdb_continue(s);
2047 return RS_IDLE;
2049 break;
2050 } else {
2051 goto unknown_command;
2053 case 'k':
2054 #ifdef CONFIG_USER_ONLY
2055 /* Kill the target */
2056 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2057 exit(0);
2058 #endif
2059 case 'D':
2060 /* Detach packet */
2061 gdb_breakpoint_remove_all();
2062 gdb_syscall_mode = GDB_SYS_DISABLED;
2063 gdb_continue(s);
2064 put_packet(s, "OK");
2065 break;
2066 case 's':
2067 if (*p != '\0') {
2068 addr = strtoull(p, (char **)&p, 16);
2069 gdb_set_cpu_pc(s, addr);
2071 cpu_single_step(s->c_cpu, sstep_flags);
2072 gdb_continue(s);
2073 return RS_IDLE;
2074 case 'F':
2076 target_ulong ret;
2077 target_ulong err;
2079 ret = strtoull(p, (char **)&p, 16);
2080 if (*p == ',') {
2081 p++;
2082 err = strtoull(p, (char **)&p, 16);
2083 } else {
2084 err = 0;
2086 if (*p == ',')
2087 p++;
2088 type = *p;
2089 if (s->current_syscall_cb) {
2090 s->current_syscall_cb(s->c_cpu, ret, err);
2091 s->current_syscall_cb = NULL;
2093 if (type == 'C') {
2094 put_packet(s, "T02");
2095 } else {
2096 gdb_continue(s);
2099 break;
2100 case 'g':
2101 cpu_synchronize_state(s->g_cpu);
2102 env = s->g_cpu;
2103 len = 0;
2104 for (addr = 0; addr < num_g_regs; addr++) {
2105 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2106 len += reg_size;
2108 memtohex(buf, mem_buf, len);
2109 put_packet(s, buf);
2110 break;
2111 case 'G':
2112 cpu_synchronize_state(s->g_cpu);
2113 env = s->g_cpu;
2114 registers = mem_buf;
2115 len = strlen(p) / 2;
2116 hextomem((uint8_t *)registers, p, len);
2117 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2118 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2119 len -= reg_size;
2120 registers += reg_size;
2122 put_packet(s, "OK");
2123 break;
2124 case 'm':
2125 addr = strtoull(p, (char **)&p, 16);
2126 if (*p == ',')
2127 p++;
2128 len = strtoull(p, NULL, 16);
2129 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2130 put_packet (s, "E14");
2131 } else {
2132 memtohex(buf, mem_buf, len);
2133 put_packet(s, buf);
2135 break;
2136 case 'M':
2137 addr = strtoull(p, (char **)&p, 16);
2138 if (*p == ',')
2139 p++;
2140 len = strtoull(p, (char **)&p, 16);
2141 if (*p == ':')
2142 p++;
2143 hextomem(mem_buf, p, len);
2144 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) {
2145 put_packet(s, "E14");
2146 } else {
2147 put_packet(s, "OK");
2149 break;
2150 case 'p':
2151 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2152 This works, but can be very slow. Anything new enough to
2153 understand XML also knows how to use this properly. */
2154 if (!gdb_has_xml)
2155 goto unknown_command;
2156 addr = strtoull(p, (char **)&p, 16);
2157 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2158 if (reg_size) {
2159 memtohex(buf, mem_buf, reg_size);
2160 put_packet(s, buf);
2161 } else {
2162 put_packet(s, "E14");
2164 break;
2165 case 'P':
2166 if (!gdb_has_xml)
2167 goto unknown_command;
2168 addr = strtoull(p, (char **)&p, 16);
2169 if (*p == '=')
2170 p++;
2171 reg_size = strlen(p) / 2;
2172 hextomem(mem_buf, p, reg_size);
2173 gdb_write_register(s->g_cpu, mem_buf, addr);
2174 put_packet(s, "OK");
2175 break;
2176 case 'Z':
2177 case 'z':
2178 type = strtoul(p, (char **)&p, 16);
2179 if (*p == ',')
2180 p++;
2181 addr = strtoull(p, (char **)&p, 16);
2182 if (*p == ',')
2183 p++;
2184 len = strtoull(p, (char **)&p, 16);
2185 if (ch == 'Z')
2186 res = gdb_breakpoint_insert(addr, len, type);
2187 else
2188 res = gdb_breakpoint_remove(addr, len, type);
2189 if (res >= 0)
2190 put_packet(s, "OK");
2191 else if (res == -ENOSYS)
2192 put_packet(s, "");
2193 else
2194 put_packet(s, "E22");
2195 break;
2196 case 'H':
2197 type = *p++;
2198 thread = strtoull(p, (char **)&p, 16);
2199 if (thread == -1 || thread == 0) {
2200 put_packet(s, "OK");
2201 break;
2203 env = find_cpu(thread);
2204 if (env == NULL) {
2205 put_packet(s, "E22");
2206 break;
2208 switch (type) {
2209 case 'c':
2210 s->c_cpu = env;
2211 put_packet(s, "OK");
2212 break;
2213 case 'g':
2214 s->g_cpu = env;
2215 put_packet(s, "OK");
2216 break;
2217 default:
2218 put_packet(s, "E22");
2219 break;
2221 break;
2222 case 'T':
2223 thread = strtoull(p, (char **)&p, 16);
2224 env = find_cpu(thread);
2226 if (env != NULL) {
2227 put_packet(s, "OK");
2228 } else {
2229 put_packet(s, "E22");
2231 break;
2232 case 'q':
2233 case 'Q':
2234 /* parse any 'q' packets here */
2235 if (!strcmp(p,"qemu.sstepbits")) {
2236 /* Query Breakpoint bit definitions */
2237 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2238 SSTEP_ENABLE,
2239 SSTEP_NOIRQ,
2240 SSTEP_NOTIMER);
2241 put_packet(s, buf);
2242 break;
2243 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2244 /* Display or change the sstep_flags */
2245 p += 10;
2246 if (*p != '=') {
2247 /* Display current setting */
2248 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2249 put_packet(s, buf);
2250 break;
2252 p++;
2253 type = strtoul(p, (char **)&p, 16);
2254 sstep_flags = type;
2255 put_packet(s, "OK");
2256 break;
2257 } else if (strcmp(p,"C") == 0) {
2258 /* "Current thread" remains vague in the spec, so always return
2259 * the first CPU (gdb returns the first thread). */
2260 put_packet(s, "QC1");
2261 break;
2262 } else if (strcmp(p,"fThreadInfo") == 0) {
2263 s->query_cpu = first_cpu;
2264 goto report_cpuinfo;
2265 } else if (strcmp(p,"sThreadInfo") == 0) {
2266 report_cpuinfo:
2267 if (s->query_cpu) {
2268 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2269 put_packet(s, buf);
2270 s->query_cpu = s->query_cpu->next_cpu;
2271 } else
2272 put_packet(s, "l");
2273 break;
2274 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2275 thread = strtoull(p+16, (char **)&p, 16);
2276 env = find_cpu(thread);
2277 if (env != NULL) {
2278 cpu_synchronize_state(env);
2279 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2280 "CPU#%d [%s]", env->cpu_index,
2281 env->halted ? "halted " : "running");
2282 memtohex(buf, mem_buf, len);
2283 put_packet(s, buf);
2285 break;
2287 #ifdef CONFIG_USER_ONLY
2288 else if (strncmp(p, "Offsets", 7) == 0) {
2289 TaskState *ts = s->c_cpu->opaque;
2291 snprintf(buf, sizeof(buf),
2292 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2293 ";Bss=" TARGET_ABI_FMT_lx,
2294 ts->info->code_offset,
2295 ts->info->data_offset,
2296 ts->info->data_offset);
2297 put_packet(s, buf);
2298 break;
2300 #else /* !CONFIG_USER_ONLY */
2301 else if (strncmp(p, "Rcmd,", 5) == 0) {
2302 int len = strlen(p + 5);
2304 if ((len % 2) != 0) {
2305 put_packet(s, "E01");
2306 break;
2308 hextomem(mem_buf, p + 5, len);
2309 len = len / 2;
2310 mem_buf[len++] = 0;
2311 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2312 put_packet(s, "OK");
2313 break;
2315 #endif /* !CONFIG_USER_ONLY */
2316 if (strncmp(p, "Supported", 9) == 0) {
2317 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2318 #ifdef GDB_CORE_XML
2319 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2320 #endif
2321 put_packet(s, buf);
2322 break;
2324 #ifdef GDB_CORE_XML
2325 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2326 const char *xml;
2327 target_ulong total_len;
2329 gdb_has_xml = 1;
2330 p += 19;
2331 xml = get_feature_xml(p, &p);
2332 if (!xml) {
2333 snprintf(buf, sizeof(buf), "E00");
2334 put_packet(s, buf);
2335 break;
2338 if (*p == ':')
2339 p++;
2340 addr = strtoul(p, (char **)&p, 16);
2341 if (*p == ',')
2342 p++;
2343 len = strtoul(p, (char **)&p, 16);
2345 total_len = strlen(xml);
2346 if (addr > total_len) {
2347 snprintf(buf, sizeof(buf), "E00");
2348 put_packet(s, buf);
2349 break;
2351 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2352 len = (MAX_PACKET_LENGTH - 5) / 2;
2353 if (len < total_len - addr) {
2354 buf[0] = 'm';
2355 len = memtox(buf + 1, xml + addr, len);
2356 } else {
2357 buf[0] = 'l';
2358 len = memtox(buf + 1, xml + addr, total_len - addr);
2360 put_packet_binary(s, buf, len + 1);
2361 break;
2363 #endif
2364 /* Unrecognised 'q' command. */
2365 goto unknown_command;
2367 default:
2368 unknown_command:
2369 /* put empty packet */
2370 buf[0] = '\0';
2371 put_packet(s, buf);
2372 break;
2374 return RS_IDLE;
2377 void gdb_set_stop_cpu(CPUArchState *env)
2379 gdbserver_state->c_cpu = env;
2380 gdbserver_state->g_cpu = env;
2383 #ifndef CONFIG_USER_ONLY
2384 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2386 GDBState *s = gdbserver_state;
2387 CPUArchState *env = s->c_cpu;
2388 char buf[256];
2389 const char *type;
2390 int ret;
2392 if (running || s->state == RS_INACTIVE) {
2393 return;
2395 /* Is there a GDB syscall waiting to be sent? */
2396 if (s->current_syscall_cb) {
2397 put_packet(s, s->syscall_buf);
2398 return;
2400 switch (state) {
2401 case RUN_STATE_DEBUG:
2402 if (env->watchpoint_hit) {
2403 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2404 case BP_MEM_READ:
2405 type = "r";
2406 break;
2407 case BP_MEM_ACCESS:
2408 type = "a";
2409 break;
2410 default:
2411 type = "";
2412 break;
2414 snprintf(buf, sizeof(buf),
2415 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2416 GDB_SIGNAL_TRAP, cpu_index(env), type,
2417 env->watchpoint_hit->vaddr);
2418 env->watchpoint_hit = NULL;
2419 goto send_packet;
2421 tb_flush(env);
2422 ret = GDB_SIGNAL_TRAP;
2423 break;
2424 case RUN_STATE_PAUSED:
2425 ret = GDB_SIGNAL_INT;
2426 break;
2427 case RUN_STATE_SHUTDOWN:
2428 ret = GDB_SIGNAL_QUIT;
2429 break;
2430 case RUN_STATE_IO_ERROR:
2431 ret = GDB_SIGNAL_IO;
2432 break;
2433 case RUN_STATE_WATCHDOG:
2434 ret = GDB_SIGNAL_ALRM;
2435 break;
2436 case RUN_STATE_INTERNAL_ERROR:
2437 ret = GDB_SIGNAL_ABRT;
2438 break;
2439 case RUN_STATE_SAVE_VM:
2440 case RUN_STATE_RESTORE_VM:
2441 return;
2442 case RUN_STATE_FINISH_MIGRATE:
2443 ret = GDB_SIGNAL_XCPU;
2444 break;
2445 default:
2446 ret = GDB_SIGNAL_UNKNOWN;
2447 break;
2449 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(env));
2451 send_packet:
2452 put_packet(s, buf);
2454 /* disable single step if it was enabled */
2455 cpu_single_step(env, 0);
2457 #endif
2459 /* Send a gdb syscall request.
2460 This accepts limited printf-style format specifiers, specifically:
2461 %x - target_ulong argument printed in hex.
2462 %lx - 64-bit argument printed in hex.
2463 %s - string pointer (target_ulong) and length (int) pair. */
2464 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2466 va_list va;
2467 char *p;
2468 char *p_end;
2469 target_ulong addr;
2470 uint64_t i64;
2471 GDBState *s;
2473 s = gdbserver_state;
2474 if (!s)
2475 return;
2476 s->current_syscall_cb = cb;
2477 #ifndef CONFIG_USER_ONLY
2478 vm_stop(RUN_STATE_DEBUG);
2479 #endif
2480 va_start(va, fmt);
2481 p = s->syscall_buf;
2482 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2483 *(p++) = 'F';
2484 while (*fmt) {
2485 if (*fmt == '%') {
2486 fmt++;
2487 switch (*fmt++) {
2488 case 'x':
2489 addr = va_arg(va, target_ulong);
2490 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2491 break;
2492 case 'l':
2493 if (*(fmt++) != 'x')
2494 goto bad_format;
2495 i64 = va_arg(va, uint64_t);
2496 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2497 break;
2498 case 's':
2499 addr = va_arg(va, target_ulong);
2500 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2501 addr, va_arg(va, int));
2502 break;
2503 default:
2504 bad_format:
2505 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2506 fmt - 1);
2507 break;
2509 } else {
2510 *(p++) = *(fmt++);
2513 *p = 0;
2514 va_end(va);
2515 #ifdef CONFIG_USER_ONLY
2516 put_packet(s, s->syscall_buf);
2517 gdb_handlesig(s->c_cpu, 0);
2518 #else
2519 /* In this case wait to send the syscall packet until notification that
2520 the CPU has stopped. This must be done because if the packet is sent
2521 now the reply from the syscall request could be received while the CPU
2522 is still in the running state, which can cause packets to be dropped
2523 and state transition 'T' packets to be sent while the syscall is still
2524 being processed. */
2525 cpu_exit(s->c_cpu);
2526 #endif
2529 static void gdb_read_byte(GDBState *s, int ch)
2531 int i, csum;
2532 uint8_t reply;
2534 #ifndef CONFIG_USER_ONLY
2535 if (s->last_packet_len) {
2536 /* Waiting for a response to the last packet. If we see the start
2537 of a new command then abandon the previous response. */
2538 if (ch == '-') {
2539 #ifdef DEBUG_GDB
2540 printf("Got NACK, retransmitting\n");
2541 #endif
2542 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2544 #ifdef DEBUG_GDB
2545 else if (ch == '+')
2546 printf("Got ACK\n");
2547 else
2548 printf("Got '%c' when expecting ACK/NACK\n", ch);
2549 #endif
2550 if (ch == '+' || ch == '$')
2551 s->last_packet_len = 0;
2552 if (ch != '$')
2553 return;
2555 if (runstate_is_running()) {
2556 /* when the CPU is running, we cannot do anything except stop
2557 it when receiving a char */
2558 vm_stop(RUN_STATE_PAUSED);
2559 } else
2560 #endif
2562 switch(s->state) {
2563 case RS_IDLE:
2564 if (ch == '$') {
2565 s->line_buf_index = 0;
2566 s->state = RS_GETLINE;
2568 break;
2569 case RS_GETLINE:
2570 if (ch == '#') {
2571 s->state = RS_CHKSUM1;
2572 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2573 s->state = RS_IDLE;
2574 } else {
2575 s->line_buf[s->line_buf_index++] = ch;
2577 break;
2578 case RS_CHKSUM1:
2579 s->line_buf[s->line_buf_index] = '\0';
2580 s->line_csum = fromhex(ch) << 4;
2581 s->state = RS_CHKSUM2;
2582 break;
2583 case RS_CHKSUM2:
2584 s->line_csum |= fromhex(ch);
2585 csum = 0;
2586 for(i = 0; i < s->line_buf_index; i++) {
2587 csum += s->line_buf[i];
2589 if (s->line_csum != (csum & 0xff)) {
2590 reply = '-';
2591 put_buffer(s, &reply, 1);
2592 s->state = RS_IDLE;
2593 } else {
2594 reply = '+';
2595 put_buffer(s, &reply, 1);
2596 s->state = gdb_handle_packet(s, s->line_buf);
2598 break;
2599 default:
2600 abort();
2605 /* Tell the remote gdb that the process has exited. */
2606 void gdb_exit(CPUArchState *env, int code)
2608 GDBState *s;
2609 char buf[4];
2611 s = gdbserver_state;
2612 if (!s) {
2613 return;
2615 #ifdef CONFIG_USER_ONLY
2616 if (gdbserver_fd < 0 || s->fd < 0) {
2617 return;
2619 #endif
2621 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2622 put_packet(s, buf);
2624 #ifndef CONFIG_USER_ONLY
2625 if (s->chr) {
2626 qemu_chr_delete(s->chr);
2628 #endif
2631 #ifdef CONFIG_USER_ONLY
2633 gdb_queuesig (void)
2635 GDBState *s;
2637 s = gdbserver_state;
2639 if (gdbserver_fd < 0 || s->fd < 0)
2640 return 0;
2641 else
2642 return 1;
2646 gdb_handlesig (CPUArchState *env, int sig)
2648 GDBState *s;
2649 char buf[256];
2650 int n;
2652 s = gdbserver_state;
2653 if (gdbserver_fd < 0 || s->fd < 0)
2654 return sig;
2656 /* disable single step if it was enabled */
2657 cpu_single_step(env, 0);
2658 tb_flush(env);
2660 if (sig != 0)
2662 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2663 put_packet(s, buf);
2665 /* put_packet() might have detected that the peer terminated the
2666 connection. */
2667 if (s->fd < 0)
2668 return sig;
2670 sig = 0;
2671 s->state = RS_IDLE;
2672 s->running_state = 0;
2673 while (s->running_state == 0) {
2674 n = read (s->fd, buf, 256);
2675 if (n > 0)
2677 int i;
2679 for (i = 0; i < n; i++)
2680 gdb_read_byte (s, buf[i]);
2682 else if (n == 0 || errno != EAGAIN)
2684 /* XXX: Connection closed. Should probably wait for another
2685 connection before continuing. */
2686 return sig;
2689 sig = s->signal;
2690 s->signal = 0;
2691 return sig;
2694 /* Tell the remote gdb that the process has exited due to SIG. */
2695 void gdb_signalled(CPUArchState *env, int sig)
2697 GDBState *s;
2698 char buf[4];
2700 s = gdbserver_state;
2701 if (gdbserver_fd < 0 || s->fd < 0)
2702 return;
2704 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2705 put_packet(s, buf);
2708 static void gdb_accept(void)
2710 GDBState *s;
2711 struct sockaddr_in sockaddr;
2712 socklen_t len;
2713 int val, fd;
2715 for(;;) {
2716 len = sizeof(sockaddr);
2717 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2718 if (fd < 0 && errno != EINTR) {
2719 perror("accept");
2720 return;
2721 } else if (fd >= 0) {
2722 #ifndef _WIN32
2723 fcntl(fd, F_SETFD, FD_CLOEXEC);
2724 #endif
2725 break;
2729 /* set short latency */
2730 val = 1;
2731 setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *)&val, sizeof(val));
2733 s = g_malloc0(sizeof(GDBState));
2734 s->c_cpu = first_cpu;
2735 s->g_cpu = first_cpu;
2736 s->fd = fd;
2737 gdb_has_xml = 0;
2739 gdbserver_state = s;
2741 fcntl(fd, F_SETFL, O_NONBLOCK);
2744 static int gdbserver_open(int port)
2746 struct sockaddr_in sockaddr;
2747 int fd, val, ret;
2749 fd = socket(PF_INET, SOCK_STREAM, 0);
2750 if (fd < 0) {
2751 perror("socket");
2752 return -1;
2754 #ifndef _WIN32
2755 fcntl(fd, F_SETFD, FD_CLOEXEC);
2756 #endif
2758 /* allow fast reuse */
2759 val = 1;
2760 setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val));
2762 sockaddr.sin_family = AF_INET;
2763 sockaddr.sin_port = htons(port);
2764 sockaddr.sin_addr.s_addr = 0;
2765 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2766 if (ret < 0) {
2767 perror("bind");
2768 close(fd);
2769 return -1;
2771 ret = listen(fd, 0);
2772 if (ret < 0) {
2773 perror("listen");
2774 close(fd);
2775 return -1;
2777 return fd;
2780 int gdbserver_start(int port)
2782 gdbserver_fd = gdbserver_open(port);
2783 if (gdbserver_fd < 0)
2784 return -1;
2785 /* accept connections */
2786 gdb_accept();
2787 return 0;
2790 /* Disable gdb stub for child processes. */
2791 void gdbserver_fork(CPUArchState *env)
2793 GDBState *s = gdbserver_state;
2794 if (gdbserver_fd < 0 || s->fd < 0)
2795 return;
2796 close(s->fd);
2797 s->fd = -1;
2798 cpu_breakpoint_remove_all(env, BP_GDB);
2799 cpu_watchpoint_remove_all(env, BP_GDB);
2801 #else
2802 static int gdb_chr_can_receive(void *opaque)
2804 /* We can handle an arbitrarily large amount of data.
2805 Pick the maximum packet size, which is as good as anything. */
2806 return MAX_PACKET_LENGTH;
2809 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2811 int i;
2813 for (i = 0; i < size; i++) {
2814 gdb_read_byte(gdbserver_state, buf[i]);
2818 static void gdb_chr_event(void *opaque, int event)
2820 switch (event) {
2821 case CHR_EVENT_OPENED:
2822 vm_stop(RUN_STATE_PAUSED);
2823 gdb_has_xml = 0;
2824 break;
2825 default:
2826 break;
2830 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2832 char buf[MAX_PACKET_LENGTH];
2834 buf[0] = 'O';
2835 if (len > (MAX_PACKET_LENGTH/2) - 1)
2836 len = (MAX_PACKET_LENGTH/2) - 1;
2837 memtohex(buf + 1, (uint8_t *)msg, len);
2838 put_packet(s, buf);
2841 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2843 const char *p = (const char *)buf;
2844 int max_sz;
2846 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2847 for (;;) {
2848 if (len <= max_sz) {
2849 gdb_monitor_output(gdbserver_state, p, len);
2850 break;
2852 gdb_monitor_output(gdbserver_state, p, max_sz);
2853 p += max_sz;
2854 len -= max_sz;
2856 return len;
2859 #ifndef _WIN32
2860 static void gdb_sigterm_handler(int signal)
2862 if (runstate_is_running()) {
2863 vm_stop(RUN_STATE_PAUSED);
2866 #endif
2868 int gdbserver_start(const char *device)
2870 GDBState *s;
2871 char gdbstub_device_name[128];
2872 CharDriverState *chr = NULL;
2873 CharDriverState *mon_chr;
2875 if (!device)
2876 return -1;
2877 if (strcmp(device, "none") != 0) {
2878 if (strstart(device, "tcp:", NULL)) {
2879 /* enforce required TCP attributes */
2880 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
2881 "%s,nowait,nodelay,server", device);
2882 device = gdbstub_device_name;
2884 #ifndef _WIN32
2885 else if (strcmp(device, "stdio") == 0) {
2886 struct sigaction act;
2888 memset(&act, 0, sizeof(act));
2889 act.sa_handler = gdb_sigterm_handler;
2890 sigaction(SIGINT, &act, NULL);
2892 #endif
2893 chr = qemu_chr_new("gdb", device, NULL);
2894 if (!chr)
2895 return -1;
2897 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
2898 gdb_chr_event, NULL);
2901 s = gdbserver_state;
2902 if (!s) {
2903 s = g_malloc0(sizeof(GDBState));
2904 gdbserver_state = s;
2906 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
2908 /* Initialize a monitor terminal for gdb */
2909 mon_chr = g_malloc0(sizeof(*mon_chr));
2910 mon_chr->chr_write = gdb_monitor_write;
2911 monitor_init(mon_chr, 0);
2912 } else {
2913 if (s->chr)
2914 qemu_chr_delete(s->chr);
2915 mon_chr = s->mon_chr;
2916 memset(s, 0, sizeof(GDBState));
2918 s->c_cpu = first_cpu;
2919 s->g_cpu = first_cpu;
2920 s->chr = chr;
2921 s->state = chr ? RS_IDLE : RS_INACTIVE;
2922 s->mon_chr = mon_chr;
2923 s->current_syscall_cb = NULL;
2925 return 0;
2927 #endif