xhci: add xhci_alloc_epctx
[qemu/ar7.git] / gdbstub.c
blob90e54cb4e352b9cc5c241c2b4e4ee37e2167a9cb
1 /*
2 * gdb server stub
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
30 #include "qemu.h"
31 #else
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
36 #endif
38 #define MAX_PACKET_LENGTH 4096
40 #include "cpu.h"
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
45 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
46 static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
47 uint8_t *buf, int len, int is_write)
49 return cpu_memory_rw_debug(env, addr, buf, len, is_write);
51 #else
52 /* target_memory_rw_debug() defined in cpu.h */
53 #endif
55 enum {
56 GDB_SIGNAL_0 = 0,
57 GDB_SIGNAL_INT = 2,
58 GDB_SIGNAL_QUIT = 3,
59 GDB_SIGNAL_TRAP = 5,
60 GDB_SIGNAL_ABRT = 6,
61 GDB_SIGNAL_ALRM = 14,
62 GDB_SIGNAL_IO = 23,
63 GDB_SIGNAL_XCPU = 24,
64 GDB_SIGNAL_UNKNOWN = 143
67 #ifdef CONFIG_USER_ONLY
69 /* Map target signal numbers to GDB protocol signal numbers and vice
70 * versa. For user emulation's currently supported systems, we can
71 * assume most signals are defined.
74 static int gdb_signal_table[] = {
76 TARGET_SIGHUP,
77 TARGET_SIGINT,
78 TARGET_SIGQUIT,
79 TARGET_SIGILL,
80 TARGET_SIGTRAP,
81 TARGET_SIGABRT,
82 -1, /* SIGEMT */
83 TARGET_SIGFPE,
84 TARGET_SIGKILL,
85 TARGET_SIGBUS,
86 TARGET_SIGSEGV,
87 TARGET_SIGSYS,
88 TARGET_SIGPIPE,
89 TARGET_SIGALRM,
90 TARGET_SIGTERM,
91 TARGET_SIGURG,
92 TARGET_SIGSTOP,
93 TARGET_SIGTSTP,
94 TARGET_SIGCONT,
95 TARGET_SIGCHLD,
96 TARGET_SIGTTIN,
97 TARGET_SIGTTOU,
98 TARGET_SIGIO,
99 TARGET_SIGXCPU,
100 TARGET_SIGXFSZ,
101 TARGET_SIGVTALRM,
102 TARGET_SIGPROF,
103 TARGET_SIGWINCH,
104 -1, /* SIGLOST */
105 TARGET_SIGUSR1,
106 TARGET_SIGUSR2,
107 #ifdef TARGET_SIGPWR
108 TARGET_SIGPWR,
109 #else
111 #endif
112 -1, /* SIGPOLL */
124 #ifdef __SIGRTMIN
125 __SIGRTMIN + 1,
126 __SIGRTMIN + 2,
127 __SIGRTMIN + 3,
128 __SIGRTMIN + 4,
129 __SIGRTMIN + 5,
130 __SIGRTMIN + 6,
131 __SIGRTMIN + 7,
132 __SIGRTMIN + 8,
133 __SIGRTMIN + 9,
134 __SIGRTMIN + 10,
135 __SIGRTMIN + 11,
136 __SIGRTMIN + 12,
137 __SIGRTMIN + 13,
138 __SIGRTMIN + 14,
139 __SIGRTMIN + 15,
140 __SIGRTMIN + 16,
141 __SIGRTMIN + 17,
142 __SIGRTMIN + 18,
143 __SIGRTMIN + 19,
144 __SIGRTMIN + 20,
145 __SIGRTMIN + 21,
146 __SIGRTMIN + 22,
147 __SIGRTMIN + 23,
148 __SIGRTMIN + 24,
149 __SIGRTMIN + 25,
150 __SIGRTMIN + 26,
151 __SIGRTMIN + 27,
152 __SIGRTMIN + 28,
153 __SIGRTMIN + 29,
154 __SIGRTMIN + 30,
155 __SIGRTMIN + 31,
156 -1, /* SIGCANCEL */
157 __SIGRTMIN,
158 __SIGRTMIN + 32,
159 __SIGRTMIN + 33,
160 __SIGRTMIN + 34,
161 __SIGRTMIN + 35,
162 __SIGRTMIN + 36,
163 __SIGRTMIN + 37,
164 __SIGRTMIN + 38,
165 __SIGRTMIN + 39,
166 __SIGRTMIN + 40,
167 __SIGRTMIN + 41,
168 __SIGRTMIN + 42,
169 __SIGRTMIN + 43,
170 __SIGRTMIN + 44,
171 __SIGRTMIN + 45,
172 __SIGRTMIN + 46,
173 __SIGRTMIN + 47,
174 __SIGRTMIN + 48,
175 __SIGRTMIN + 49,
176 __SIGRTMIN + 50,
177 __SIGRTMIN + 51,
178 __SIGRTMIN + 52,
179 __SIGRTMIN + 53,
180 __SIGRTMIN + 54,
181 __SIGRTMIN + 55,
182 __SIGRTMIN + 56,
183 __SIGRTMIN + 57,
184 __SIGRTMIN + 58,
185 __SIGRTMIN + 59,
186 __SIGRTMIN + 60,
187 __SIGRTMIN + 61,
188 __SIGRTMIN + 62,
189 __SIGRTMIN + 63,
190 __SIGRTMIN + 64,
191 __SIGRTMIN + 65,
192 __SIGRTMIN + 66,
193 __SIGRTMIN + 67,
194 __SIGRTMIN + 68,
195 __SIGRTMIN + 69,
196 __SIGRTMIN + 70,
197 __SIGRTMIN + 71,
198 __SIGRTMIN + 72,
199 __SIGRTMIN + 73,
200 __SIGRTMIN + 74,
201 __SIGRTMIN + 75,
202 __SIGRTMIN + 76,
203 __SIGRTMIN + 77,
204 __SIGRTMIN + 78,
205 __SIGRTMIN + 79,
206 __SIGRTMIN + 80,
207 __SIGRTMIN + 81,
208 __SIGRTMIN + 82,
209 __SIGRTMIN + 83,
210 __SIGRTMIN + 84,
211 __SIGRTMIN + 85,
212 __SIGRTMIN + 86,
213 __SIGRTMIN + 87,
214 __SIGRTMIN + 88,
215 __SIGRTMIN + 89,
216 __SIGRTMIN + 90,
217 __SIGRTMIN + 91,
218 __SIGRTMIN + 92,
219 __SIGRTMIN + 93,
220 __SIGRTMIN + 94,
221 __SIGRTMIN + 95,
222 -1, /* SIGINFO */
223 -1, /* UNKNOWN */
224 -1, /* DEFAULT */
231 #endif
233 #else
234 /* In system mode we only need SIGINT and SIGTRAP; other signals
235 are not yet supported. */
237 enum {
238 TARGET_SIGINT = 2,
239 TARGET_SIGTRAP = 5
242 static int gdb_signal_table[] = {
245 TARGET_SIGINT,
248 TARGET_SIGTRAP
250 #endif
252 #ifdef CONFIG_USER_ONLY
253 static int target_signal_to_gdb (int sig)
255 int i;
256 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
257 if (gdb_signal_table[i] == sig)
258 return i;
259 return GDB_SIGNAL_UNKNOWN;
261 #endif
263 static int gdb_signal_to_target (int sig)
265 if (sig < ARRAY_SIZE (gdb_signal_table))
266 return gdb_signal_table[sig];
267 else
268 return -1;
271 //#define DEBUG_GDB
273 typedef struct GDBRegisterState {
274 int base_reg;
275 int num_regs;
276 gdb_reg_cb get_reg;
277 gdb_reg_cb set_reg;
278 const char *xml;
279 struct GDBRegisterState *next;
280 } GDBRegisterState;
282 enum RSState {
283 RS_INACTIVE,
284 RS_IDLE,
285 RS_GETLINE,
286 RS_CHKSUM1,
287 RS_CHKSUM2,
289 typedef struct GDBState {
290 CPUArchState *c_cpu; /* current CPU for step/continue ops */
291 CPUArchState *g_cpu; /* current CPU for other ops */
292 CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
293 enum RSState state; /* parsing state */
294 char line_buf[MAX_PACKET_LENGTH];
295 int line_buf_index;
296 int line_csum;
297 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
298 int last_packet_len;
299 int signal;
300 #ifdef CONFIG_USER_ONLY
301 int fd;
302 int running_state;
303 #else
304 CharDriverState *chr;
305 CharDriverState *mon_chr;
306 #endif
307 char syscall_buf[256];
308 gdb_syscall_complete_cb current_syscall_cb;
309 } GDBState;
311 /* By default use no IRQs and no timers while single stepping so as to
312 * make single stepping like an ICE HW step.
314 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
316 static GDBState *gdbserver_state;
318 /* This is an ugly hack to cope with both new and old gdb.
319 If gdb sends qXfer:features:read then assume we're talking to a newish
320 gdb that understands target descriptions. */
321 static int gdb_has_xml;
323 #ifdef CONFIG_USER_ONLY
324 /* XXX: This is not thread safe. Do we care? */
325 static int gdbserver_fd = -1;
327 static int get_char(GDBState *s)
329 uint8_t ch;
330 int ret;
332 for(;;) {
333 ret = qemu_recv(s->fd, &ch, 1, 0);
334 if (ret < 0) {
335 if (errno == ECONNRESET)
336 s->fd = -1;
337 if (errno != EINTR && errno != EAGAIN)
338 return -1;
339 } else if (ret == 0) {
340 close(s->fd);
341 s->fd = -1;
342 return -1;
343 } else {
344 break;
347 return ch;
349 #endif
351 static enum {
352 GDB_SYS_UNKNOWN,
353 GDB_SYS_ENABLED,
354 GDB_SYS_DISABLED,
355 } gdb_syscall_mode;
357 /* If gdb is connected when the first semihosting syscall occurs then use
358 remote gdb syscalls. Otherwise use native file IO. */
359 int use_gdb_syscalls(void)
361 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
362 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
363 : GDB_SYS_DISABLED);
365 return gdb_syscall_mode == GDB_SYS_ENABLED;
368 /* Resume execution. */
369 static inline void gdb_continue(GDBState *s)
371 #ifdef CONFIG_USER_ONLY
372 s->running_state = 1;
373 #else
374 if (runstate_check(RUN_STATE_DEBUG)) {
375 vm_start();
377 #endif
380 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
382 #ifdef CONFIG_USER_ONLY
383 int ret;
385 while (len > 0) {
386 ret = send(s->fd, buf, len, 0);
387 if (ret < 0) {
388 if (errno != EINTR && errno != EAGAIN)
389 return;
390 } else {
391 buf += ret;
392 len -= ret;
395 #else
396 qemu_chr_fe_write(s->chr, buf, len);
397 #endif
400 static inline int fromhex(int v)
402 if (v >= '0' && v <= '9')
403 return v - '0';
404 else if (v >= 'A' && v <= 'F')
405 return v - 'A' + 10;
406 else if (v >= 'a' && v <= 'f')
407 return v - 'a' + 10;
408 else
409 return 0;
412 static inline int tohex(int v)
414 if (v < 10)
415 return v + '0';
416 else
417 return v - 10 + 'a';
420 static void memtohex(char *buf, const uint8_t *mem, int len)
422 int i, c;
423 char *q;
424 q = buf;
425 for(i = 0; i < len; i++) {
426 c = mem[i];
427 *q++ = tohex(c >> 4);
428 *q++ = tohex(c & 0xf);
430 *q = '\0';
433 static void hextomem(uint8_t *mem, const char *buf, int len)
435 int i;
437 for(i = 0; i < len; i++) {
438 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
439 buf += 2;
443 /* return -1 if error, 0 if OK */
444 static int put_packet_binary(GDBState *s, const char *buf, int len)
446 int csum, i;
447 uint8_t *p;
449 for(;;) {
450 p = s->last_packet;
451 *(p++) = '$';
452 memcpy(p, buf, len);
453 p += len;
454 csum = 0;
455 for(i = 0; i < len; i++) {
456 csum += buf[i];
458 *(p++) = '#';
459 *(p++) = tohex((csum >> 4) & 0xf);
460 *(p++) = tohex((csum) & 0xf);
462 s->last_packet_len = p - s->last_packet;
463 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
465 #ifdef CONFIG_USER_ONLY
466 i = get_char(s);
467 if (i < 0)
468 return -1;
469 if (i == '+')
470 break;
471 #else
472 break;
473 #endif
475 return 0;
478 /* return -1 if error, 0 if OK */
479 static int put_packet(GDBState *s, const char *buf)
481 #ifdef DEBUG_GDB
482 printf("reply='%s'\n", buf);
483 #endif
485 return put_packet_binary(s, buf, strlen(buf));
488 /* The GDB remote protocol transfers values in target byte order. This means
489 we can use the raw memory access routines to access the value buffer.
490 Conveniently, these also handle the case where the buffer is mis-aligned.
492 #define GET_REG8(val) do { \
493 stb_p(mem_buf, val); \
494 return 1; \
495 } while(0)
496 #define GET_REG16(val) do { \
497 stw_p(mem_buf, val); \
498 return 2; \
499 } while(0)
500 #define GET_REG32(val) do { \
501 stl_p(mem_buf, val); \
502 return 4; \
503 } while(0)
504 #define GET_REG64(val) do { \
505 stq_p(mem_buf, val); \
506 return 8; \
507 } while(0)
509 #if TARGET_LONG_BITS == 64
510 #define GET_REGL(val) GET_REG64(val)
511 #define ldtul_p(addr) ldq_p(addr)
512 #else
513 #define GET_REGL(val) GET_REG32(val)
514 #define ldtul_p(addr) ldl_p(addr)
515 #endif
517 #if defined(TARGET_I386)
519 #ifdef TARGET_X86_64
520 static const int gpr_map[16] = {
521 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
522 8, 9, 10, 11, 12, 13, 14, 15
524 #else
525 #define gpr_map gpr_map32
526 #endif
527 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
529 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
531 #define IDX_IP_REG CPU_NB_REGS
532 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
533 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
534 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
535 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
536 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
538 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
540 if (n < CPU_NB_REGS) {
541 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
542 GET_REG64(env->regs[gpr_map[n]]);
543 } else if (n < CPU_NB_REGS32) {
544 GET_REG32(env->regs[gpr_map32[n]]);
546 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
547 #ifdef USE_X86LDOUBLE
548 /* FIXME: byteswap float values - after fixing fpregs layout. */
549 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
550 #else
551 memset(mem_buf, 0, 10);
552 #endif
553 return 10;
554 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
555 n -= IDX_XMM_REGS;
556 if (n < CPU_NB_REGS32 ||
557 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
558 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
559 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
560 return 16;
562 } else {
563 switch (n) {
564 case IDX_IP_REG:
565 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
566 GET_REG64(env->eip);
567 } else {
568 GET_REG32(env->eip);
570 case IDX_FLAGS_REG: GET_REG32(env->eflags);
572 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
573 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
574 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
575 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
576 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
577 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
579 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
580 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
581 (env->fpstt & 0x7) << 11);
582 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
583 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
584 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
585 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
586 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
587 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
589 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
592 return 0;
595 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
597 uint16_t selector = ldl_p(mem_buf);
599 if (selector != env->segs[sreg].selector) {
600 #if defined(CONFIG_USER_ONLY)
601 cpu_x86_load_seg(env, sreg, selector);
602 #else
603 unsigned int limit, flags;
604 target_ulong base;
606 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
607 base = selector << 4;
608 limit = 0xffff;
609 flags = 0;
610 } else {
611 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
612 return 4;
614 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
615 #endif
617 return 4;
620 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
622 uint32_t tmp;
624 if (n < CPU_NB_REGS) {
625 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
626 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
627 return sizeof(target_ulong);
628 } else if (n < CPU_NB_REGS32) {
629 n = gpr_map32[n];
630 env->regs[n] &= ~0xffffffffUL;
631 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
632 return 4;
634 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
635 #ifdef USE_X86LDOUBLE
636 /* FIXME: byteswap float values - after fixing fpregs layout. */
637 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
638 #endif
639 return 10;
640 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
641 n -= IDX_XMM_REGS;
642 if (n < CPU_NB_REGS32 ||
643 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
644 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
645 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
646 return 16;
648 } else {
649 switch (n) {
650 case IDX_IP_REG:
651 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
652 env->eip = ldq_p(mem_buf);
653 return 8;
654 } else {
655 env->eip &= ~0xffffffffUL;
656 env->eip |= (uint32_t)ldl_p(mem_buf);
657 return 4;
659 case IDX_FLAGS_REG:
660 env->eflags = ldl_p(mem_buf);
661 return 4;
663 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
664 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
665 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
666 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
667 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
668 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
670 case IDX_FP_REGS + 8:
671 env->fpuc = ldl_p(mem_buf);
672 return 4;
673 case IDX_FP_REGS + 9:
674 tmp = ldl_p(mem_buf);
675 env->fpstt = (tmp >> 11) & 7;
676 env->fpus = tmp & ~0x3800;
677 return 4;
678 case IDX_FP_REGS + 10: /* ftag */ return 4;
679 case IDX_FP_REGS + 11: /* fiseg */ return 4;
680 case IDX_FP_REGS + 12: /* fioff */ return 4;
681 case IDX_FP_REGS + 13: /* foseg */ return 4;
682 case IDX_FP_REGS + 14: /* fooff */ return 4;
683 case IDX_FP_REGS + 15: /* fop */ return 4;
685 case IDX_MXCSR_REG:
686 env->mxcsr = ldl_p(mem_buf);
687 return 4;
690 /* Unrecognised register. */
691 return 0;
694 #elif defined (TARGET_PPC)
696 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
697 expects whatever the target description contains. Due to a
698 historical mishap the FP registers appear in between core integer
699 regs and PC, MSR, CR, and so forth. We hack round this by giving the
700 FP regs zero size when talking to a newer gdb. */
701 #define NUM_CORE_REGS 71
702 #if defined (TARGET_PPC64)
703 #define GDB_CORE_XML "power64-core.xml"
704 #else
705 #define GDB_CORE_XML "power-core.xml"
706 #endif
708 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
710 if (n < 32) {
711 /* gprs */
712 GET_REGL(env->gpr[n]);
713 } else if (n < 64) {
714 /* fprs */
715 if (gdb_has_xml)
716 return 0;
717 stfq_p(mem_buf, env->fpr[n-32]);
718 return 8;
719 } else {
720 switch (n) {
721 case 64: GET_REGL(env->nip);
722 case 65: GET_REGL(env->msr);
723 case 66:
725 uint32_t cr = 0;
726 int i;
727 for (i = 0; i < 8; i++)
728 cr |= env->crf[i] << (32 - ((i + 1) * 4));
729 GET_REG32(cr);
731 case 67: GET_REGL(env->lr);
732 case 68: GET_REGL(env->ctr);
733 case 69: GET_REGL(env->xer);
734 case 70:
736 if (gdb_has_xml)
737 return 0;
738 GET_REG32(env->fpscr);
742 return 0;
745 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
747 if (n < 32) {
748 /* gprs */
749 env->gpr[n] = ldtul_p(mem_buf);
750 return sizeof(target_ulong);
751 } else if (n < 64) {
752 /* fprs */
753 if (gdb_has_xml)
754 return 0;
755 env->fpr[n-32] = ldfq_p(mem_buf);
756 return 8;
757 } else {
758 switch (n) {
759 case 64:
760 env->nip = ldtul_p(mem_buf);
761 return sizeof(target_ulong);
762 case 65:
763 ppc_store_msr(env, ldtul_p(mem_buf));
764 return sizeof(target_ulong);
765 case 66:
767 uint32_t cr = ldl_p(mem_buf);
768 int i;
769 for (i = 0; i < 8; i++)
770 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
771 return 4;
773 case 67:
774 env->lr = ldtul_p(mem_buf);
775 return sizeof(target_ulong);
776 case 68:
777 env->ctr = ldtul_p(mem_buf);
778 return sizeof(target_ulong);
779 case 69:
780 env->xer = ldtul_p(mem_buf);
781 return sizeof(target_ulong);
782 case 70:
783 /* fpscr */
784 if (gdb_has_xml)
785 return 0;
786 store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
787 return sizeof(target_ulong);
790 return 0;
793 #elif defined (TARGET_SPARC)
795 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
796 #define NUM_CORE_REGS 86
797 #else
798 #define NUM_CORE_REGS 72
799 #endif
801 #ifdef TARGET_ABI32
802 #define GET_REGA(val) GET_REG32(val)
803 #else
804 #define GET_REGA(val) GET_REGL(val)
805 #endif
807 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
809 if (n < 8) {
810 /* g0..g7 */
811 GET_REGA(env->gregs[n]);
813 if (n < 32) {
814 /* register window */
815 GET_REGA(env->regwptr[n - 8]);
817 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
818 if (n < 64) {
819 /* fprs */
820 if (n & 1) {
821 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
822 } else {
823 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
826 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
827 switch (n) {
828 case 64: GET_REGA(env->y);
829 case 65: GET_REGA(cpu_get_psr(env));
830 case 66: GET_REGA(env->wim);
831 case 67: GET_REGA(env->tbr);
832 case 68: GET_REGA(env->pc);
833 case 69: GET_REGA(env->npc);
834 case 70: GET_REGA(env->fsr);
835 case 71: GET_REGA(0); /* csr */
836 default: GET_REGA(0);
838 #else
839 if (n < 64) {
840 /* f0-f31 */
841 if (n & 1) {
842 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
843 } else {
844 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
847 if (n < 80) {
848 /* f32-f62 (double width, even numbers only) */
849 GET_REG64(env->fpr[(n - 32) / 2].ll);
851 switch (n) {
852 case 80: GET_REGL(env->pc);
853 case 81: GET_REGL(env->npc);
854 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
855 ((env->asi & 0xff) << 24) |
856 ((env->pstate & 0xfff) << 8) |
857 cpu_get_cwp64(env));
858 case 83: GET_REGL(env->fsr);
859 case 84: GET_REGL(env->fprs);
860 case 85: GET_REGL(env->y);
862 #endif
863 return 0;
866 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
868 #if defined(TARGET_ABI32)
869 abi_ulong tmp;
871 tmp = ldl_p(mem_buf);
872 #else
873 target_ulong tmp;
875 tmp = ldtul_p(mem_buf);
876 #endif
878 if (n < 8) {
879 /* g0..g7 */
880 env->gregs[n] = tmp;
881 } else if (n < 32) {
882 /* register window */
883 env->regwptr[n - 8] = tmp;
885 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
886 else if (n < 64) {
887 /* fprs */
888 /* f0-f31 */
889 if (n & 1) {
890 env->fpr[(n - 32) / 2].l.lower = tmp;
891 } else {
892 env->fpr[(n - 32) / 2].l.upper = tmp;
894 } else {
895 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
896 switch (n) {
897 case 64: env->y = tmp; break;
898 case 65: cpu_put_psr(env, tmp); break;
899 case 66: env->wim = tmp; break;
900 case 67: env->tbr = tmp; break;
901 case 68: env->pc = tmp; break;
902 case 69: env->npc = tmp; break;
903 case 70: env->fsr = tmp; break;
904 default: return 0;
907 return 4;
908 #else
909 else if (n < 64) {
910 /* f0-f31 */
911 tmp = ldl_p(mem_buf);
912 if (n & 1) {
913 env->fpr[(n - 32) / 2].l.lower = tmp;
914 } else {
915 env->fpr[(n - 32) / 2].l.upper = tmp;
917 return 4;
918 } else if (n < 80) {
919 /* f32-f62 (double width, even numbers only) */
920 env->fpr[(n - 32) / 2].ll = tmp;
921 } else {
922 switch (n) {
923 case 80: env->pc = tmp; break;
924 case 81: env->npc = tmp; break;
925 case 82:
926 cpu_put_ccr(env, tmp >> 32);
927 env->asi = (tmp >> 24) & 0xff;
928 env->pstate = (tmp >> 8) & 0xfff;
929 cpu_put_cwp64(env, tmp & 0xff);
930 break;
931 case 83: env->fsr = tmp; break;
932 case 84: env->fprs = tmp; break;
933 case 85: env->y = tmp; break;
934 default: return 0;
937 return 8;
938 #endif
940 #elif defined (TARGET_ARM)
942 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
943 whatever the target description contains. Due to a historical mishap
944 the FPA registers appear in between core integer regs and the CPSR.
945 We hack round this by giving the FPA regs zero size when talking to a
946 newer gdb. */
947 #define NUM_CORE_REGS 26
948 #define GDB_CORE_XML "arm-core.xml"
950 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
952 if (n < 16) {
953 /* Core integer register. */
954 GET_REG32(env->regs[n]);
956 if (n < 24) {
957 /* FPA registers. */
958 if (gdb_has_xml)
959 return 0;
960 memset(mem_buf, 0, 12);
961 return 12;
963 switch (n) {
964 case 24:
965 /* FPA status register. */
966 if (gdb_has_xml)
967 return 0;
968 GET_REG32(0);
969 case 25:
970 /* CPSR */
971 GET_REG32(cpsr_read(env));
973 /* Unknown register. */
974 return 0;
977 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
979 uint32_t tmp;
981 tmp = ldl_p(mem_buf);
983 /* Mask out low bit of PC to workaround gdb bugs. This will probably
984 cause problems if we ever implement the Jazelle DBX extensions. */
985 if (n == 15)
986 tmp &= ~1;
988 if (n < 16) {
989 /* Core integer register. */
990 env->regs[n] = tmp;
991 return 4;
993 if (n < 24) { /* 16-23 */
994 /* FPA registers (ignored). */
995 if (gdb_has_xml)
996 return 0;
997 return 12;
999 switch (n) {
1000 case 24:
1001 /* FPA status register (ignored). */
1002 if (gdb_has_xml)
1003 return 0;
1004 return 4;
1005 case 25:
1006 /* CPSR */
1007 cpsr_write (env, tmp, 0xffffffff);
1008 return 4;
1010 /* Unknown register. */
1011 return 0;
1014 #elif defined (TARGET_M68K)
1016 #define NUM_CORE_REGS 18
1018 #define GDB_CORE_XML "cf-core.xml"
1020 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1022 if (n < 8) {
1023 /* D0-D7 */
1024 GET_REG32(env->dregs[n]);
1025 } else if (n < 16) {
1026 /* A0-A7 */
1027 GET_REG32(env->aregs[n - 8]);
1028 } else {
1029 switch (n) {
1030 case 16: GET_REG32(env->sr);
1031 case 17: GET_REG32(env->pc);
1034 /* FP registers not included here because they vary between
1035 ColdFire and m68k. Use XML bits for these. */
1036 return 0;
1039 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1041 uint32_t tmp;
1043 tmp = ldl_p(mem_buf);
1045 if (n < 8) {
1046 /* D0-D7 */
1047 env->dregs[n] = tmp;
1048 } else if (n < 16) {
1049 /* A0-A7 */
1050 env->aregs[n - 8] = tmp;
1051 } else {
1052 switch (n) {
1053 case 16: env->sr = tmp; break;
1054 case 17: env->pc = tmp; break;
1055 default: return 0;
1058 return 4;
1060 #elif defined (TARGET_MIPS)
1062 #define NUM_CORE_REGS 73
1064 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1066 if (n < 32) {
1067 GET_REGL(env->active_tc.gpr[n]);
1069 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1070 if (n >= 38 && n < 70) {
1071 if (env->CP0_Status & (1 << CP0St_FR))
1072 GET_REGL(env->active_fpu.fpr[n - 38].d);
1073 else
1074 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1076 switch (n) {
1077 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1078 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1081 switch (n) {
1082 case 32: GET_REGL((int32_t)env->CP0_Status);
1083 case 33: GET_REGL(env->active_tc.LO[0]);
1084 case 34: GET_REGL(env->active_tc.HI[0]);
1085 case 35: GET_REGL(env->CP0_BadVAddr);
1086 case 36: GET_REGL((int32_t)env->CP0_Cause);
1087 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1088 case 72: GET_REGL(0); /* fp */
1089 case 89: GET_REGL((int32_t)env->CP0_PRid);
1091 if (n >= 73 && n <= 88) {
1092 /* 16 embedded regs. */
1093 GET_REGL(0);
1096 return 0;
1099 /* convert MIPS rounding mode in FCR31 to IEEE library */
1100 static unsigned int ieee_rm[] =
1102 float_round_nearest_even,
1103 float_round_to_zero,
1104 float_round_up,
1105 float_round_down
1107 #define RESTORE_ROUNDING_MODE \
1108 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1110 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1112 target_ulong tmp;
1114 tmp = ldtul_p(mem_buf);
1116 if (n < 32) {
1117 env->active_tc.gpr[n] = tmp;
1118 return sizeof(target_ulong);
1120 if (env->CP0_Config1 & (1 << CP0C1_FP)
1121 && n >= 38 && n < 73) {
1122 if (n < 70) {
1123 if (env->CP0_Status & (1 << CP0St_FR))
1124 env->active_fpu.fpr[n - 38].d = tmp;
1125 else
1126 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1128 switch (n) {
1129 case 70:
1130 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1131 /* set rounding mode */
1132 RESTORE_ROUNDING_MODE;
1133 break;
1134 case 71: env->active_fpu.fcr0 = tmp; break;
1136 return sizeof(target_ulong);
1138 switch (n) {
1139 case 32: env->CP0_Status = tmp; break;
1140 case 33: env->active_tc.LO[0] = tmp; break;
1141 case 34: env->active_tc.HI[0] = tmp; break;
1142 case 35: env->CP0_BadVAddr = tmp; break;
1143 case 36: env->CP0_Cause = tmp; break;
1144 case 37:
1145 env->active_tc.PC = tmp & ~(target_ulong)1;
1146 if (tmp & 1) {
1147 env->hflags |= MIPS_HFLAG_M16;
1148 } else {
1149 env->hflags &= ~(MIPS_HFLAG_M16);
1151 break;
1152 case 72: /* fp, ignored */ break;
1153 default:
1154 if (n > 89)
1155 return 0;
1156 /* Other registers are readonly. Ignore writes. */
1157 break;
1160 return sizeof(target_ulong);
1162 #elif defined(TARGET_OPENRISC)
1164 #define NUM_CORE_REGS (32 + 3)
1166 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1168 if (n < 32) {
1169 GET_REG32(env->gpr[n]);
1170 } else {
1171 switch (n) {
1172 case 32: /* PPC */
1173 GET_REG32(env->ppc);
1174 break;
1176 case 33: /* NPC */
1177 GET_REG32(env->npc);
1178 break;
1180 case 34: /* SR */
1181 GET_REG32(env->sr);
1182 break;
1184 default:
1185 break;
1188 return 0;
1191 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1192 uint8_t *mem_buf, int n)
1194 uint32_t tmp;
1196 if (n > NUM_CORE_REGS) {
1197 return 0;
1200 tmp = ldl_p(mem_buf);
1202 if (n < 32) {
1203 env->gpr[n] = tmp;
1204 } else {
1205 switch (n) {
1206 case 32: /* PPC */
1207 env->ppc = tmp;
1208 break;
1210 case 33: /* NPC */
1211 env->npc = tmp;
1212 break;
1214 case 34: /* SR */
1215 env->sr = tmp;
1216 break;
1218 default:
1219 break;
1222 return 4;
1224 #elif defined (TARGET_SH4)
1226 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1227 /* FIXME: We should use XML for this. */
1229 #define NUM_CORE_REGS 59
1231 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1233 switch (n) {
1234 case 0 ... 7:
1235 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1236 GET_REGL(env->gregs[n + 16]);
1237 } else {
1238 GET_REGL(env->gregs[n]);
1240 case 8 ... 15:
1241 GET_REGL(env->gregs[n]);
1242 case 16:
1243 GET_REGL(env->pc);
1244 case 17:
1245 GET_REGL(env->pr);
1246 case 18:
1247 GET_REGL(env->gbr);
1248 case 19:
1249 GET_REGL(env->vbr);
1250 case 20:
1251 GET_REGL(env->mach);
1252 case 21:
1253 GET_REGL(env->macl);
1254 case 22:
1255 GET_REGL(env->sr);
1256 case 23:
1257 GET_REGL(env->fpul);
1258 case 24:
1259 GET_REGL(env->fpscr);
1260 case 25 ... 40:
1261 if (env->fpscr & FPSCR_FR) {
1262 stfl_p(mem_buf, env->fregs[n - 9]);
1263 } else {
1264 stfl_p(mem_buf, env->fregs[n - 25]);
1266 return 4;
1267 case 41:
1268 GET_REGL(env->ssr);
1269 case 42:
1270 GET_REGL(env->spc);
1271 case 43 ... 50:
1272 GET_REGL(env->gregs[n - 43]);
1273 case 51 ... 58:
1274 GET_REGL(env->gregs[n - (51 - 16)]);
1277 return 0;
1280 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1282 switch (n) {
1283 case 0 ... 7:
1284 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1285 env->gregs[n + 16] = ldl_p(mem_buf);
1286 } else {
1287 env->gregs[n] = ldl_p(mem_buf);
1289 break;
1290 case 8 ... 15:
1291 env->gregs[n] = ldl_p(mem_buf);
1292 break;
1293 case 16:
1294 env->pc = ldl_p(mem_buf);
1295 break;
1296 case 17:
1297 env->pr = ldl_p(mem_buf);
1298 break;
1299 case 18:
1300 env->gbr = ldl_p(mem_buf);
1301 break;
1302 case 19:
1303 env->vbr = ldl_p(mem_buf);
1304 break;
1305 case 20:
1306 env->mach = ldl_p(mem_buf);
1307 break;
1308 case 21:
1309 env->macl = ldl_p(mem_buf);
1310 break;
1311 case 22:
1312 env->sr = ldl_p(mem_buf);
1313 break;
1314 case 23:
1315 env->fpul = ldl_p(mem_buf);
1316 break;
1317 case 24:
1318 env->fpscr = ldl_p(mem_buf);
1319 break;
1320 case 25 ... 40:
1321 if (env->fpscr & FPSCR_FR) {
1322 env->fregs[n - 9] = ldfl_p(mem_buf);
1323 } else {
1324 env->fregs[n - 25] = ldfl_p(mem_buf);
1326 break;
1327 case 41:
1328 env->ssr = ldl_p(mem_buf);
1329 break;
1330 case 42:
1331 env->spc = ldl_p(mem_buf);
1332 break;
1333 case 43 ... 50:
1334 env->gregs[n - 43] = ldl_p(mem_buf);
1335 break;
1336 case 51 ... 58:
1337 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1338 break;
1339 default: return 0;
1342 return 4;
1344 #elif defined (TARGET_MICROBLAZE)
1346 #define NUM_CORE_REGS (32 + 5)
1348 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1350 if (n < 32) {
1351 GET_REG32(env->regs[n]);
1352 } else {
1353 GET_REG32(env->sregs[n - 32]);
1355 return 0;
1358 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1360 uint32_t tmp;
1362 if (n > NUM_CORE_REGS)
1363 return 0;
1365 tmp = ldl_p(mem_buf);
1367 if (n < 32) {
1368 env->regs[n] = tmp;
1369 } else {
1370 env->sregs[n - 32] = tmp;
1372 return 4;
1374 #elif defined (TARGET_CRIS)
1376 #define NUM_CORE_REGS 49
1378 static int
1379 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1381 if (n < 15) {
1382 GET_REG32(env->regs[n]);
1385 if (n == 15) {
1386 GET_REG32(env->pc);
1389 if (n < 32) {
1390 switch (n) {
1391 case 16:
1392 GET_REG8(env->pregs[n - 16]);
1393 break;
1394 case 17:
1395 GET_REG8(env->pregs[n - 16]);
1396 break;
1397 case 20:
1398 case 21:
1399 GET_REG16(env->pregs[n - 16]);
1400 break;
1401 default:
1402 if (n >= 23) {
1403 GET_REG32(env->pregs[n - 16]);
1405 break;
1408 return 0;
1411 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1413 uint8_t srs;
1415 if (env->pregs[PR_VR] < 32)
1416 return read_register_crisv10(env, mem_buf, n);
1418 srs = env->pregs[PR_SRS];
1419 if (n < 16) {
1420 GET_REG32(env->regs[n]);
1423 if (n >= 21 && n < 32) {
1424 GET_REG32(env->pregs[n - 16]);
1426 if (n >= 33 && n < 49) {
1427 GET_REG32(env->sregs[srs][n - 33]);
1429 switch (n) {
1430 case 16: GET_REG8(env->pregs[0]);
1431 case 17: GET_REG8(env->pregs[1]);
1432 case 18: GET_REG32(env->pregs[2]);
1433 case 19: GET_REG8(srs);
1434 case 20: GET_REG16(env->pregs[4]);
1435 case 32: GET_REG32(env->pc);
1438 return 0;
1441 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1443 uint32_t tmp;
1445 if (n > 49)
1446 return 0;
1448 tmp = ldl_p(mem_buf);
1450 if (n < 16) {
1451 env->regs[n] = tmp;
1454 if (n >= 21 && n < 32) {
1455 env->pregs[n - 16] = tmp;
1458 /* FIXME: Should support function regs be writable? */
1459 switch (n) {
1460 case 16: return 1;
1461 case 17: return 1;
1462 case 18: env->pregs[PR_PID] = tmp; break;
1463 case 19: return 1;
1464 case 20: return 2;
1465 case 32: env->pc = tmp; break;
1468 return 4;
1470 #elif defined (TARGET_ALPHA)
1472 #define NUM_CORE_REGS 67
1474 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1476 uint64_t val;
1477 CPU_DoubleU d;
1479 switch (n) {
1480 case 0 ... 30:
1481 val = env->ir[n];
1482 break;
1483 case 32 ... 62:
1484 d.d = env->fir[n - 32];
1485 val = d.ll;
1486 break;
1487 case 63:
1488 val = cpu_alpha_load_fpcr(env);
1489 break;
1490 case 64:
1491 val = env->pc;
1492 break;
1493 case 66:
1494 val = env->unique;
1495 break;
1496 case 31:
1497 case 65:
1498 /* 31 really is the zero register; 65 is unassigned in the
1499 gdb protocol, but is still required to occupy 8 bytes. */
1500 val = 0;
1501 break;
1502 default:
1503 return 0;
1505 GET_REGL(val);
1508 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1510 target_ulong tmp = ldtul_p(mem_buf);
1511 CPU_DoubleU d;
1513 switch (n) {
1514 case 0 ... 30:
1515 env->ir[n] = tmp;
1516 break;
1517 case 32 ... 62:
1518 d.ll = tmp;
1519 env->fir[n - 32] = d.d;
1520 break;
1521 case 63:
1522 cpu_alpha_store_fpcr(env, tmp);
1523 break;
1524 case 64:
1525 env->pc = tmp;
1526 break;
1527 case 66:
1528 env->unique = tmp;
1529 break;
1530 case 31:
1531 case 65:
1532 /* 31 really is the zero register; 65 is unassigned in the
1533 gdb protocol, but is still required to occupy 8 bytes. */
1534 break;
1535 default:
1536 return 0;
1538 return 8;
1540 #elif defined (TARGET_S390X)
1542 #define NUM_CORE_REGS S390_NUM_REGS
1544 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1546 uint64_t val;
1547 int cc_op;
1549 switch (n) {
1550 case S390_PSWM_REGNUM:
1551 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1552 val = deposit64(env->psw.mask, 44, 2, cc_op);
1553 GET_REGL(val);
1554 break;
1555 case S390_PSWA_REGNUM:
1556 GET_REGL(env->psw.addr);
1557 break;
1558 case S390_R0_REGNUM ... S390_R15_REGNUM:
1559 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1560 break;
1561 case S390_A0_REGNUM ... S390_A15_REGNUM:
1562 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1563 break;
1564 case S390_FPC_REGNUM:
1565 GET_REG32(env->fpc);
1566 break;
1567 case S390_F0_REGNUM ... S390_F15_REGNUM:
1568 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1569 break;
1572 return 0;
1575 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1577 target_ulong tmpl;
1578 uint32_t tmp32;
1579 int r = 8;
1580 tmpl = ldtul_p(mem_buf);
1581 tmp32 = ldl_p(mem_buf);
1583 switch (n) {
1584 case S390_PSWM_REGNUM:
1585 env->psw.mask = tmpl;
1586 env->cc_op = extract64(tmpl, 44, 2);
1587 break;
1588 case S390_PSWA_REGNUM:
1589 env->psw.addr = tmpl;
1590 break;
1591 case S390_R0_REGNUM ... S390_R15_REGNUM:
1592 env->regs[n-S390_R0_REGNUM] = tmpl;
1593 break;
1594 case S390_A0_REGNUM ... S390_A15_REGNUM:
1595 env->aregs[n-S390_A0_REGNUM] = tmp32;
1596 r = 4;
1597 break;
1598 case S390_FPC_REGNUM:
1599 env->fpc = tmp32;
1600 r = 4;
1601 break;
1602 case S390_F0_REGNUM ... S390_F15_REGNUM:
1603 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1604 break;
1605 default:
1606 return 0;
1608 return r;
1610 #elif defined (TARGET_LM32)
1612 #include "hw/lm32/lm32_pic.h"
1613 #define NUM_CORE_REGS (32 + 7)
1615 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1617 if (n < 32) {
1618 GET_REG32(env->regs[n]);
1619 } else {
1620 switch (n) {
1621 case 32:
1622 GET_REG32(env->pc);
1623 break;
1624 /* FIXME: put in right exception ID */
1625 case 33:
1626 GET_REG32(0);
1627 break;
1628 case 34:
1629 GET_REG32(env->eba);
1630 break;
1631 case 35:
1632 GET_REG32(env->deba);
1633 break;
1634 case 36:
1635 GET_REG32(env->ie);
1636 break;
1637 case 37:
1638 GET_REG32(lm32_pic_get_im(env->pic_state));
1639 break;
1640 case 38:
1641 GET_REG32(lm32_pic_get_ip(env->pic_state));
1642 break;
1645 return 0;
1648 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1650 uint32_t tmp;
1652 if (n > NUM_CORE_REGS) {
1653 return 0;
1656 tmp = ldl_p(mem_buf);
1658 if (n < 32) {
1659 env->regs[n] = tmp;
1660 } else {
1661 switch (n) {
1662 case 32:
1663 env->pc = tmp;
1664 break;
1665 case 34:
1666 env->eba = tmp;
1667 break;
1668 case 35:
1669 env->deba = tmp;
1670 break;
1671 case 36:
1672 env->ie = tmp;
1673 break;
1674 case 37:
1675 lm32_pic_set_im(env->pic_state, tmp);
1676 break;
1677 case 38:
1678 lm32_pic_set_ip(env->pic_state, tmp);
1679 break;
1682 return 4;
1684 #elif defined(TARGET_XTENSA)
1686 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1687 * Use num_regs to see all registers. gdb modification is required for that:
1688 * reset bit 0 in the 'flags' field of the registers definitions in the
1689 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1691 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1692 #define num_g_regs NUM_CORE_REGS
1694 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1696 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1698 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1699 return 0;
1702 switch (reg->type) {
1703 case 9: /*pc*/
1704 GET_REG32(env->pc);
1705 break;
1707 case 1: /*ar*/
1708 xtensa_sync_phys_from_window(env);
1709 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1710 break;
1712 case 2: /*SR*/
1713 GET_REG32(env->sregs[reg->targno & 0xff]);
1714 break;
1716 case 3: /*UR*/
1717 GET_REG32(env->uregs[reg->targno & 0xff]);
1718 break;
1720 case 4: /*f*/
1721 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1722 break;
1724 case 8: /*a*/
1725 GET_REG32(env->regs[reg->targno & 0x0f]);
1726 break;
1728 default:
1729 qemu_log("%s from reg %d of unsupported type %d\n",
1730 __func__, n, reg->type);
1731 return 0;
1735 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1737 uint32_t tmp;
1738 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1740 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1741 return 0;
1744 tmp = ldl_p(mem_buf);
1746 switch (reg->type) {
1747 case 9: /*pc*/
1748 env->pc = tmp;
1749 break;
1751 case 1: /*ar*/
1752 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1753 xtensa_sync_window_from_phys(env);
1754 break;
1756 case 2: /*SR*/
1757 env->sregs[reg->targno & 0xff] = tmp;
1758 break;
1760 case 3: /*UR*/
1761 env->uregs[reg->targno & 0xff] = tmp;
1762 break;
1764 case 4: /*f*/
1765 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1766 break;
1768 case 8: /*a*/
1769 env->regs[reg->targno & 0x0f] = tmp;
1770 break;
1772 default:
1773 qemu_log("%s to reg %d of unsupported type %d\n",
1774 __func__, n, reg->type);
1775 return 0;
1778 return 4;
1780 #else
1782 #define NUM_CORE_REGS 0
1784 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1786 return 0;
1789 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1791 return 0;
1794 #endif
1796 #if !defined(TARGET_XTENSA)
1797 static int num_g_regs = NUM_CORE_REGS;
1798 #endif
1800 #ifdef GDB_CORE_XML
1801 /* Encode data using the encoding for 'x' packets. */
1802 static int memtox(char *buf, const char *mem, int len)
1804 char *p = buf;
1805 char c;
1807 while (len--) {
1808 c = *(mem++);
1809 switch (c) {
1810 case '#': case '$': case '*': case '}':
1811 *(p++) = '}';
1812 *(p++) = c ^ 0x20;
1813 break;
1814 default:
1815 *(p++) = c;
1816 break;
1819 return p - buf;
1822 static const char *get_feature_xml(const char *p, const char **newp)
1824 size_t len;
1825 int i;
1826 const char *name;
1827 static char target_xml[1024];
1829 len = 0;
1830 while (p[len] && p[len] != ':')
1831 len++;
1832 *newp = p + len;
1834 name = NULL;
1835 if (strncmp(p, "target.xml", len) == 0) {
1836 /* Generate the XML description for this CPU. */
1837 if (!target_xml[0]) {
1838 GDBRegisterState *r;
1840 snprintf(target_xml, sizeof(target_xml),
1841 "<?xml version=\"1.0\"?>"
1842 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1843 "<target>"
1844 "<xi:include href=\"%s\"/>",
1845 GDB_CORE_XML);
1847 for (r = first_cpu->gdb_regs; r; r = r->next) {
1848 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1849 pstrcat(target_xml, sizeof(target_xml), r->xml);
1850 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1852 pstrcat(target_xml, sizeof(target_xml), "</target>");
1854 return target_xml;
1856 for (i = 0; ; i++) {
1857 name = xml_builtin[i][0];
1858 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1859 break;
1861 return name ? xml_builtin[i][1] : NULL;
1863 #endif
1865 static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1867 GDBRegisterState *r;
1869 if (reg < NUM_CORE_REGS)
1870 return cpu_gdb_read_register(env, mem_buf, reg);
1872 for (r = env->gdb_regs; r; r = r->next) {
1873 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1874 return r->get_reg(env, mem_buf, reg - r->base_reg);
1877 return 0;
1880 static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1882 GDBRegisterState *r;
1884 if (reg < NUM_CORE_REGS)
1885 return cpu_gdb_write_register(env, mem_buf, reg);
1887 for (r = env->gdb_regs; r; r = r->next) {
1888 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1889 return r->set_reg(env, mem_buf, reg - r->base_reg);
1892 return 0;
1895 #if !defined(TARGET_XTENSA)
1896 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1897 specifies the first register number and these registers are included in
1898 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1899 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1902 void gdb_register_coprocessor(CPUArchState * env,
1903 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1904 int num_regs, const char *xml, int g_pos)
1906 GDBRegisterState *s;
1907 GDBRegisterState **p;
1908 static int last_reg = NUM_CORE_REGS;
1910 p = &env->gdb_regs;
1911 while (*p) {
1912 /* Check for duplicates. */
1913 if (strcmp((*p)->xml, xml) == 0)
1914 return;
1915 p = &(*p)->next;
1918 s = g_new0(GDBRegisterState, 1);
1919 s->base_reg = last_reg;
1920 s->num_regs = num_regs;
1921 s->get_reg = get_reg;
1922 s->set_reg = set_reg;
1923 s->xml = xml;
1925 /* Add to end of list. */
1926 last_reg += num_regs;
1927 *p = s;
1928 if (g_pos) {
1929 if (g_pos != s->base_reg) {
1930 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1931 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1932 } else {
1933 num_g_regs = last_reg;
1937 #endif
1939 #ifndef CONFIG_USER_ONLY
1940 static const int xlat_gdb_type[] = {
1941 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1942 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1943 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1945 #endif
1947 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1949 CPUArchState *env;
1950 int err = 0;
1952 if (kvm_enabled())
1953 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1955 switch (type) {
1956 case GDB_BREAKPOINT_SW:
1957 case GDB_BREAKPOINT_HW:
1958 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1959 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1960 if (err)
1961 break;
1963 return err;
1964 #ifndef CONFIG_USER_ONLY
1965 case GDB_WATCHPOINT_WRITE:
1966 case GDB_WATCHPOINT_READ:
1967 case GDB_WATCHPOINT_ACCESS:
1968 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1969 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1970 NULL);
1971 if (err)
1972 break;
1974 return err;
1975 #endif
1976 default:
1977 return -ENOSYS;
1981 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1983 CPUArchState *env;
1984 int err = 0;
1986 if (kvm_enabled())
1987 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1989 switch (type) {
1990 case GDB_BREAKPOINT_SW:
1991 case GDB_BREAKPOINT_HW:
1992 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1993 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1994 if (err)
1995 break;
1997 return err;
1998 #ifndef CONFIG_USER_ONLY
1999 case GDB_WATCHPOINT_WRITE:
2000 case GDB_WATCHPOINT_READ:
2001 case GDB_WATCHPOINT_ACCESS:
2002 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2003 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2004 if (err)
2005 break;
2007 return err;
2008 #endif
2009 default:
2010 return -ENOSYS;
2014 static void gdb_breakpoint_remove_all(void)
2016 CPUArchState *env;
2018 if (kvm_enabled()) {
2019 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
2020 return;
2023 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2024 cpu_breakpoint_remove_all(env, BP_GDB);
2025 #ifndef CONFIG_USER_ONLY
2026 cpu_watchpoint_remove_all(env, BP_GDB);
2027 #endif
2031 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2033 cpu_synchronize_state(s->c_cpu);
2034 #if defined(TARGET_I386)
2035 s->c_cpu->eip = pc;
2036 #elif defined (TARGET_PPC)
2037 s->c_cpu->nip = pc;
2038 #elif defined (TARGET_SPARC)
2039 s->c_cpu->pc = pc;
2040 s->c_cpu->npc = pc + 4;
2041 #elif defined (TARGET_ARM)
2042 s->c_cpu->regs[15] = pc;
2043 #elif defined (TARGET_SH4)
2044 s->c_cpu->pc = pc;
2045 #elif defined (TARGET_MIPS)
2046 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
2047 if (pc & 1) {
2048 s->c_cpu->hflags |= MIPS_HFLAG_M16;
2049 } else {
2050 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
2052 #elif defined (TARGET_MICROBLAZE)
2053 s->c_cpu->sregs[SR_PC] = pc;
2054 #elif defined(TARGET_OPENRISC)
2055 s->c_cpu->pc = pc;
2056 #elif defined (TARGET_CRIS)
2057 s->c_cpu->pc = pc;
2058 #elif defined (TARGET_ALPHA)
2059 s->c_cpu->pc = pc;
2060 #elif defined (TARGET_S390X)
2061 s->c_cpu->psw.addr = pc;
2062 #elif defined (TARGET_LM32)
2063 s->c_cpu->pc = pc;
2064 #elif defined(TARGET_XTENSA)
2065 s->c_cpu->pc = pc;
2066 #endif
2069 static CPUArchState *find_cpu(uint32_t thread_id)
2071 CPUArchState *env;
2072 CPUState *cpu;
2074 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2075 cpu = ENV_GET_CPU(env);
2076 if (cpu_index(cpu) == thread_id) {
2077 return env;
2081 return NULL;
2084 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2086 CPUArchState *env;
2087 const char *p;
2088 uint32_t thread;
2089 int ch, reg_size, type, res;
2090 char buf[MAX_PACKET_LENGTH];
2091 uint8_t mem_buf[MAX_PACKET_LENGTH];
2092 uint8_t *registers;
2093 target_ulong addr, len;
2095 #ifdef DEBUG_GDB
2096 printf("command='%s'\n", line_buf);
2097 #endif
2098 p = line_buf;
2099 ch = *p++;
2100 switch(ch) {
2101 case '?':
2102 /* TODO: Make this return the correct value for user-mode. */
2103 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2104 cpu_index(ENV_GET_CPU(s->c_cpu)));
2105 put_packet(s, buf);
2106 /* Remove all the breakpoints when this query is issued,
2107 * because gdb is doing and initial connect and the state
2108 * should be cleaned up.
2110 gdb_breakpoint_remove_all();
2111 break;
2112 case 'c':
2113 if (*p != '\0') {
2114 addr = strtoull(p, (char **)&p, 16);
2115 gdb_set_cpu_pc(s, addr);
2117 s->signal = 0;
2118 gdb_continue(s);
2119 return RS_IDLE;
2120 case 'C':
2121 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2122 if (s->signal == -1)
2123 s->signal = 0;
2124 gdb_continue(s);
2125 return RS_IDLE;
2126 case 'v':
2127 if (strncmp(p, "Cont", 4) == 0) {
2128 int res_signal, res_thread;
2130 p += 4;
2131 if (*p == '?') {
2132 put_packet(s, "vCont;c;C;s;S");
2133 break;
2135 res = 0;
2136 res_signal = 0;
2137 res_thread = 0;
2138 while (*p) {
2139 int action, signal;
2141 if (*p++ != ';') {
2142 res = 0;
2143 break;
2145 action = *p++;
2146 signal = 0;
2147 if (action == 'C' || action == 'S') {
2148 signal = strtoul(p, (char **)&p, 16);
2149 } else if (action != 'c' && action != 's') {
2150 res = 0;
2151 break;
2153 thread = 0;
2154 if (*p == ':') {
2155 thread = strtoull(p+1, (char **)&p, 16);
2157 action = tolower(action);
2158 if (res == 0 || (res == 'c' && action == 's')) {
2159 res = action;
2160 res_signal = signal;
2161 res_thread = thread;
2164 if (res) {
2165 if (res_thread != -1 && res_thread != 0) {
2166 env = find_cpu(res_thread);
2167 if (env == NULL) {
2168 put_packet(s, "E22");
2169 break;
2171 s->c_cpu = env;
2173 if (res == 's') {
2174 cpu_single_step(s->c_cpu, sstep_flags);
2176 s->signal = res_signal;
2177 gdb_continue(s);
2178 return RS_IDLE;
2180 break;
2181 } else {
2182 goto unknown_command;
2184 case 'k':
2185 #ifdef CONFIG_USER_ONLY
2186 /* Kill the target */
2187 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2188 exit(0);
2189 #endif
2190 case 'D':
2191 /* Detach packet */
2192 gdb_breakpoint_remove_all();
2193 gdb_syscall_mode = GDB_SYS_DISABLED;
2194 gdb_continue(s);
2195 put_packet(s, "OK");
2196 break;
2197 case 's':
2198 if (*p != '\0') {
2199 addr = strtoull(p, (char **)&p, 16);
2200 gdb_set_cpu_pc(s, addr);
2202 cpu_single_step(s->c_cpu, sstep_flags);
2203 gdb_continue(s);
2204 return RS_IDLE;
2205 case 'F':
2207 target_ulong ret;
2208 target_ulong err;
2210 ret = strtoull(p, (char **)&p, 16);
2211 if (*p == ',') {
2212 p++;
2213 err = strtoull(p, (char **)&p, 16);
2214 } else {
2215 err = 0;
2217 if (*p == ',')
2218 p++;
2219 type = *p;
2220 if (s->current_syscall_cb) {
2221 s->current_syscall_cb(s->c_cpu, ret, err);
2222 s->current_syscall_cb = NULL;
2224 if (type == 'C') {
2225 put_packet(s, "T02");
2226 } else {
2227 gdb_continue(s);
2230 break;
2231 case 'g':
2232 cpu_synchronize_state(s->g_cpu);
2233 env = s->g_cpu;
2234 len = 0;
2235 for (addr = 0; addr < num_g_regs; addr++) {
2236 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2237 len += reg_size;
2239 memtohex(buf, mem_buf, len);
2240 put_packet(s, buf);
2241 break;
2242 case 'G':
2243 cpu_synchronize_state(s->g_cpu);
2244 env = s->g_cpu;
2245 registers = mem_buf;
2246 len = strlen(p) / 2;
2247 hextomem((uint8_t *)registers, p, len);
2248 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2249 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2250 len -= reg_size;
2251 registers += reg_size;
2253 put_packet(s, "OK");
2254 break;
2255 case 'm':
2256 addr = strtoull(p, (char **)&p, 16);
2257 if (*p == ',')
2258 p++;
2259 len = strtoull(p, NULL, 16);
2260 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2261 put_packet (s, "E14");
2262 } else {
2263 memtohex(buf, mem_buf, len);
2264 put_packet(s, buf);
2266 break;
2267 case 'M':
2268 addr = strtoull(p, (char **)&p, 16);
2269 if (*p == ',')
2270 p++;
2271 len = strtoull(p, (char **)&p, 16);
2272 if (*p == ':')
2273 p++;
2274 hextomem(mem_buf, p, len);
2275 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) {
2276 put_packet(s, "E14");
2277 } else {
2278 put_packet(s, "OK");
2280 break;
2281 case 'p':
2282 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2283 This works, but can be very slow. Anything new enough to
2284 understand XML also knows how to use this properly. */
2285 if (!gdb_has_xml)
2286 goto unknown_command;
2287 addr = strtoull(p, (char **)&p, 16);
2288 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2289 if (reg_size) {
2290 memtohex(buf, mem_buf, reg_size);
2291 put_packet(s, buf);
2292 } else {
2293 put_packet(s, "E14");
2295 break;
2296 case 'P':
2297 if (!gdb_has_xml)
2298 goto unknown_command;
2299 addr = strtoull(p, (char **)&p, 16);
2300 if (*p == '=')
2301 p++;
2302 reg_size = strlen(p) / 2;
2303 hextomem(mem_buf, p, reg_size);
2304 gdb_write_register(s->g_cpu, mem_buf, addr);
2305 put_packet(s, "OK");
2306 break;
2307 case 'Z':
2308 case 'z':
2309 type = strtoul(p, (char **)&p, 16);
2310 if (*p == ',')
2311 p++;
2312 addr = strtoull(p, (char **)&p, 16);
2313 if (*p == ',')
2314 p++;
2315 len = strtoull(p, (char **)&p, 16);
2316 if (ch == 'Z')
2317 res = gdb_breakpoint_insert(addr, len, type);
2318 else
2319 res = gdb_breakpoint_remove(addr, len, type);
2320 if (res >= 0)
2321 put_packet(s, "OK");
2322 else if (res == -ENOSYS)
2323 put_packet(s, "");
2324 else
2325 put_packet(s, "E22");
2326 break;
2327 case 'H':
2328 type = *p++;
2329 thread = strtoull(p, (char **)&p, 16);
2330 if (thread == -1 || thread == 0) {
2331 put_packet(s, "OK");
2332 break;
2334 env = find_cpu(thread);
2335 if (env == NULL) {
2336 put_packet(s, "E22");
2337 break;
2339 switch (type) {
2340 case 'c':
2341 s->c_cpu = env;
2342 put_packet(s, "OK");
2343 break;
2344 case 'g':
2345 s->g_cpu = env;
2346 put_packet(s, "OK");
2347 break;
2348 default:
2349 put_packet(s, "E22");
2350 break;
2352 break;
2353 case 'T':
2354 thread = strtoull(p, (char **)&p, 16);
2355 env = find_cpu(thread);
2357 if (env != NULL) {
2358 put_packet(s, "OK");
2359 } else {
2360 put_packet(s, "E22");
2362 break;
2363 case 'q':
2364 case 'Q':
2365 /* parse any 'q' packets here */
2366 if (!strcmp(p,"qemu.sstepbits")) {
2367 /* Query Breakpoint bit definitions */
2368 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2369 SSTEP_ENABLE,
2370 SSTEP_NOIRQ,
2371 SSTEP_NOTIMER);
2372 put_packet(s, buf);
2373 break;
2374 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2375 /* Display or change the sstep_flags */
2376 p += 10;
2377 if (*p != '=') {
2378 /* Display current setting */
2379 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2380 put_packet(s, buf);
2381 break;
2383 p++;
2384 type = strtoul(p, (char **)&p, 16);
2385 sstep_flags = type;
2386 put_packet(s, "OK");
2387 break;
2388 } else if (strcmp(p,"C") == 0) {
2389 /* "Current thread" remains vague in the spec, so always return
2390 * the first CPU (gdb returns the first thread). */
2391 put_packet(s, "QC1");
2392 break;
2393 } else if (strcmp(p,"fThreadInfo") == 0) {
2394 s->query_cpu = first_cpu;
2395 goto report_cpuinfo;
2396 } else if (strcmp(p,"sThreadInfo") == 0) {
2397 report_cpuinfo:
2398 if (s->query_cpu) {
2399 snprintf(buf, sizeof(buf), "m%x",
2400 cpu_index(ENV_GET_CPU(s->query_cpu)));
2401 put_packet(s, buf);
2402 s->query_cpu = s->query_cpu->next_cpu;
2403 } else
2404 put_packet(s, "l");
2405 break;
2406 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2407 thread = strtoull(p+16, (char **)&p, 16);
2408 env = find_cpu(thread);
2409 if (env != NULL) {
2410 CPUState *cpu = ENV_GET_CPU(env);
2411 cpu_synchronize_state(env);
2412 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2413 "CPU#%d [%s]", cpu->cpu_index,
2414 cpu->halted ? "halted " : "running");
2415 memtohex(buf, mem_buf, len);
2416 put_packet(s, buf);
2418 break;
2420 #ifdef CONFIG_USER_ONLY
2421 else if (strncmp(p, "Offsets", 7) == 0) {
2422 TaskState *ts = s->c_cpu->opaque;
2424 snprintf(buf, sizeof(buf),
2425 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2426 ";Bss=" TARGET_ABI_FMT_lx,
2427 ts->info->code_offset,
2428 ts->info->data_offset,
2429 ts->info->data_offset);
2430 put_packet(s, buf);
2431 break;
2433 #else /* !CONFIG_USER_ONLY */
2434 else if (strncmp(p, "Rcmd,", 5) == 0) {
2435 int len = strlen(p + 5);
2437 if ((len % 2) != 0) {
2438 put_packet(s, "E01");
2439 break;
2441 hextomem(mem_buf, p + 5, len);
2442 len = len / 2;
2443 mem_buf[len++] = 0;
2444 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2445 put_packet(s, "OK");
2446 break;
2448 #endif /* !CONFIG_USER_ONLY */
2449 if (strncmp(p, "Supported", 9) == 0) {
2450 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2451 #ifdef GDB_CORE_XML
2452 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2453 #endif
2454 put_packet(s, buf);
2455 break;
2457 #ifdef GDB_CORE_XML
2458 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2459 const char *xml;
2460 target_ulong total_len;
2462 gdb_has_xml = 1;
2463 p += 19;
2464 xml = get_feature_xml(p, &p);
2465 if (!xml) {
2466 snprintf(buf, sizeof(buf), "E00");
2467 put_packet(s, buf);
2468 break;
2471 if (*p == ':')
2472 p++;
2473 addr = strtoul(p, (char **)&p, 16);
2474 if (*p == ',')
2475 p++;
2476 len = strtoul(p, (char **)&p, 16);
2478 total_len = strlen(xml);
2479 if (addr > total_len) {
2480 snprintf(buf, sizeof(buf), "E00");
2481 put_packet(s, buf);
2482 break;
2484 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2485 len = (MAX_PACKET_LENGTH - 5) / 2;
2486 if (len < total_len - addr) {
2487 buf[0] = 'm';
2488 len = memtox(buf + 1, xml + addr, len);
2489 } else {
2490 buf[0] = 'l';
2491 len = memtox(buf + 1, xml + addr, total_len - addr);
2493 put_packet_binary(s, buf, len + 1);
2494 break;
2496 #endif
2497 /* Unrecognised 'q' command. */
2498 goto unknown_command;
2500 default:
2501 unknown_command:
2502 /* put empty packet */
2503 buf[0] = '\0';
2504 put_packet(s, buf);
2505 break;
2507 return RS_IDLE;
2510 void gdb_set_stop_cpu(CPUArchState *env)
2512 gdbserver_state->c_cpu = env;
2513 gdbserver_state->g_cpu = env;
2516 #ifndef CONFIG_USER_ONLY
2517 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2519 GDBState *s = gdbserver_state;
2520 CPUArchState *env = s->c_cpu;
2521 CPUState *cpu = ENV_GET_CPU(env);
2522 char buf[256];
2523 const char *type;
2524 int ret;
2526 if (running || s->state == RS_INACTIVE) {
2527 return;
2529 /* Is there a GDB syscall waiting to be sent? */
2530 if (s->current_syscall_cb) {
2531 put_packet(s, s->syscall_buf);
2532 return;
2534 switch (state) {
2535 case RUN_STATE_DEBUG:
2536 if (env->watchpoint_hit) {
2537 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2538 case BP_MEM_READ:
2539 type = "r";
2540 break;
2541 case BP_MEM_ACCESS:
2542 type = "a";
2543 break;
2544 default:
2545 type = "";
2546 break;
2548 snprintf(buf, sizeof(buf),
2549 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2550 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2551 env->watchpoint_hit->vaddr);
2552 env->watchpoint_hit = NULL;
2553 goto send_packet;
2555 tb_flush(env);
2556 ret = GDB_SIGNAL_TRAP;
2557 break;
2558 case RUN_STATE_PAUSED:
2559 ret = GDB_SIGNAL_INT;
2560 break;
2561 case RUN_STATE_SHUTDOWN:
2562 ret = GDB_SIGNAL_QUIT;
2563 break;
2564 case RUN_STATE_IO_ERROR:
2565 ret = GDB_SIGNAL_IO;
2566 break;
2567 case RUN_STATE_WATCHDOG:
2568 ret = GDB_SIGNAL_ALRM;
2569 break;
2570 case RUN_STATE_INTERNAL_ERROR:
2571 ret = GDB_SIGNAL_ABRT;
2572 break;
2573 case RUN_STATE_SAVE_VM:
2574 case RUN_STATE_RESTORE_VM:
2575 return;
2576 case RUN_STATE_FINISH_MIGRATE:
2577 ret = GDB_SIGNAL_XCPU;
2578 break;
2579 default:
2580 ret = GDB_SIGNAL_UNKNOWN;
2581 break;
2583 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2585 send_packet:
2586 put_packet(s, buf);
2588 /* disable single step if it was enabled */
2589 cpu_single_step(env, 0);
2591 #endif
2593 /* Send a gdb syscall request.
2594 This accepts limited printf-style format specifiers, specifically:
2595 %x - target_ulong argument printed in hex.
2596 %lx - 64-bit argument printed in hex.
2597 %s - string pointer (target_ulong) and length (int) pair. */
2598 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2600 va_list va;
2601 char *p;
2602 char *p_end;
2603 target_ulong addr;
2604 uint64_t i64;
2605 GDBState *s;
2607 s = gdbserver_state;
2608 if (!s)
2609 return;
2610 s->current_syscall_cb = cb;
2611 #ifndef CONFIG_USER_ONLY
2612 vm_stop(RUN_STATE_DEBUG);
2613 #endif
2614 va_start(va, fmt);
2615 p = s->syscall_buf;
2616 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2617 *(p++) = 'F';
2618 while (*fmt) {
2619 if (*fmt == '%') {
2620 fmt++;
2621 switch (*fmt++) {
2622 case 'x':
2623 addr = va_arg(va, target_ulong);
2624 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2625 break;
2626 case 'l':
2627 if (*(fmt++) != 'x')
2628 goto bad_format;
2629 i64 = va_arg(va, uint64_t);
2630 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2631 break;
2632 case 's':
2633 addr = va_arg(va, target_ulong);
2634 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2635 addr, va_arg(va, int));
2636 break;
2637 default:
2638 bad_format:
2639 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2640 fmt - 1);
2641 break;
2643 } else {
2644 *(p++) = *(fmt++);
2647 *p = 0;
2648 va_end(va);
2649 #ifdef CONFIG_USER_ONLY
2650 put_packet(s, s->syscall_buf);
2651 gdb_handlesig(s->c_cpu, 0);
2652 #else
2653 /* In this case wait to send the syscall packet until notification that
2654 the CPU has stopped. This must be done because if the packet is sent
2655 now the reply from the syscall request could be received while the CPU
2656 is still in the running state, which can cause packets to be dropped
2657 and state transition 'T' packets to be sent while the syscall is still
2658 being processed. */
2659 cpu_exit(s->c_cpu);
2660 #endif
2663 static void gdb_read_byte(GDBState *s, int ch)
2665 int i, csum;
2666 uint8_t reply;
2668 #ifndef CONFIG_USER_ONLY
2669 if (s->last_packet_len) {
2670 /* Waiting for a response to the last packet. If we see the start
2671 of a new command then abandon the previous response. */
2672 if (ch == '-') {
2673 #ifdef DEBUG_GDB
2674 printf("Got NACK, retransmitting\n");
2675 #endif
2676 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2678 #ifdef DEBUG_GDB
2679 else if (ch == '+')
2680 printf("Got ACK\n");
2681 else
2682 printf("Got '%c' when expecting ACK/NACK\n", ch);
2683 #endif
2684 if (ch == '+' || ch == '$')
2685 s->last_packet_len = 0;
2686 if (ch != '$')
2687 return;
2689 if (runstate_is_running()) {
2690 /* when the CPU is running, we cannot do anything except stop
2691 it when receiving a char */
2692 vm_stop(RUN_STATE_PAUSED);
2693 } else
2694 #endif
2696 switch(s->state) {
2697 case RS_IDLE:
2698 if (ch == '$') {
2699 s->line_buf_index = 0;
2700 s->state = RS_GETLINE;
2702 break;
2703 case RS_GETLINE:
2704 if (ch == '#') {
2705 s->state = RS_CHKSUM1;
2706 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2707 s->state = RS_IDLE;
2708 } else {
2709 s->line_buf[s->line_buf_index++] = ch;
2711 break;
2712 case RS_CHKSUM1:
2713 s->line_buf[s->line_buf_index] = '\0';
2714 s->line_csum = fromhex(ch) << 4;
2715 s->state = RS_CHKSUM2;
2716 break;
2717 case RS_CHKSUM2:
2718 s->line_csum |= fromhex(ch);
2719 csum = 0;
2720 for(i = 0; i < s->line_buf_index; i++) {
2721 csum += s->line_buf[i];
2723 if (s->line_csum != (csum & 0xff)) {
2724 reply = '-';
2725 put_buffer(s, &reply, 1);
2726 s->state = RS_IDLE;
2727 } else {
2728 reply = '+';
2729 put_buffer(s, &reply, 1);
2730 s->state = gdb_handle_packet(s, s->line_buf);
2732 break;
2733 default:
2734 abort();
2739 /* Tell the remote gdb that the process has exited. */
2740 void gdb_exit(CPUArchState *env, int code)
2742 GDBState *s;
2743 char buf[4];
2745 s = gdbserver_state;
2746 if (!s) {
2747 return;
2749 #ifdef CONFIG_USER_ONLY
2750 if (gdbserver_fd < 0 || s->fd < 0) {
2751 return;
2753 #endif
2755 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2756 put_packet(s, buf);
2758 #ifndef CONFIG_USER_ONLY
2759 if (s->chr) {
2760 qemu_chr_delete(s->chr);
2762 #endif
2765 #ifdef CONFIG_USER_ONLY
2767 gdb_queuesig (void)
2769 GDBState *s;
2771 s = gdbserver_state;
2773 if (gdbserver_fd < 0 || s->fd < 0)
2774 return 0;
2775 else
2776 return 1;
2780 gdb_handlesig (CPUArchState *env, int sig)
2782 GDBState *s;
2783 char buf[256];
2784 int n;
2786 s = gdbserver_state;
2787 if (gdbserver_fd < 0 || s->fd < 0)
2788 return sig;
2790 /* disable single step if it was enabled */
2791 cpu_single_step(env, 0);
2792 tb_flush(env);
2794 if (sig != 0)
2796 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2797 put_packet(s, buf);
2799 /* put_packet() might have detected that the peer terminated the
2800 connection. */
2801 if (s->fd < 0)
2802 return sig;
2804 sig = 0;
2805 s->state = RS_IDLE;
2806 s->running_state = 0;
2807 while (s->running_state == 0) {
2808 n = read (s->fd, buf, 256);
2809 if (n > 0)
2811 int i;
2813 for (i = 0; i < n; i++)
2814 gdb_read_byte (s, buf[i]);
2816 else if (n == 0 || errno != EAGAIN)
2818 /* XXX: Connection closed. Should probably wait for another
2819 connection before continuing. */
2820 return sig;
2823 sig = s->signal;
2824 s->signal = 0;
2825 return sig;
2828 /* Tell the remote gdb that the process has exited due to SIG. */
2829 void gdb_signalled(CPUArchState *env, int sig)
2831 GDBState *s;
2832 char buf[4];
2834 s = gdbserver_state;
2835 if (gdbserver_fd < 0 || s->fd < 0)
2836 return;
2838 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2839 put_packet(s, buf);
2842 static void gdb_accept(void)
2844 GDBState *s;
2845 struct sockaddr_in sockaddr;
2846 socklen_t len;
2847 int fd;
2849 for(;;) {
2850 len = sizeof(sockaddr);
2851 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2852 if (fd < 0 && errno != EINTR) {
2853 perror("accept");
2854 return;
2855 } else if (fd >= 0) {
2856 #ifndef _WIN32
2857 fcntl(fd, F_SETFD, FD_CLOEXEC);
2858 #endif
2859 break;
2863 /* set short latency */
2864 socket_set_nodelay(fd);
2866 s = g_malloc0(sizeof(GDBState));
2867 s->c_cpu = first_cpu;
2868 s->g_cpu = first_cpu;
2869 s->fd = fd;
2870 gdb_has_xml = 0;
2872 gdbserver_state = s;
2874 fcntl(fd, F_SETFL, O_NONBLOCK);
2877 static int gdbserver_open(int port)
2879 struct sockaddr_in sockaddr;
2880 int fd, val, ret;
2882 fd = socket(PF_INET, SOCK_STREAM, 0);
2883 if (fd < 0) {
2884 perror("socket");
2885 return -1;
2887 #ifndef _WIN32
2888 fcntl(fd, F_SETFD, FD_CLOEXEC);
2889 #endif
2891 /* allow fast reuse */
2892 val = 1;
2893 qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
2895 sockaddr.sin_family = AF_INET;
2896 sockaddr.sin_port = htons(port);
2897 sockaddr.sin_addr.s_addr = 0;
2898 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2899 if (ret < 0) {
2900 perror("bind");
2901 close(fd);
2902 return -1;
2904 ret = listen(fd, 0);
2905 if (ret < 0) {
2906 perror("listen");
2907 close(fd);
2908 return -1;
2910 return fd;
2913 int gdbserver_start(int port)
2915 gdbserver_fd = gdbserver_open(port);
2916 if (gdbserver_fd < 0)
2917 return -1;
2918 /* accept connections */
2919 gdb_accept();
2920 return 0;
2923 /* Disable gdb stub for child processes. */
2924 void gdbserver_fork(CPUArchState *env)
2926 GDBState *s = gdbserver_state;
2927 if (gdbserver_fd < 0 || s->fd < 0)
2928 return;
2929 close(s->fd);
2930 s->fd = -1;
2931 cpu_breakpoint_remove_all(env, BP_GDB);
2932 cpu_watchpoint_remove_all(env, BP_GDB);
2934 #else
2935 static int gdb_chr_can_receive(void *opaque)
2937 /* We can handle an arbitrarily large amount of data.
2938 Pick the maximum packet size, which is as good as anything. */
2939 return MAX_PACKET_LENGTH;
2942 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2944 int i;
2946 for (i = 0; i < size; i++) {
2947 gdb_read_byte(gdbserver_state, buf[i]);
2951 static void gdb_chr_event(void *opaque, int event)
2953 switch (event) {
2954 case CHR_EVENT_OPENED:
2955 vm_stop(RUN_STATE_PAUSED);
2956 gdb_has_xml = 0;
2957 break;
2958 default:
2959 break;
2963 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2965 char buf[MAX_PACKET_LENGTH];
2967 buf[0] = 'O';
2968 if (len > (MAX_PACKET_LENGTH/2) - 1)
2969 len = (MAX_PACKET_LENGTH/2) - 1;
2970 memtohex(buf + 1, (uint8_t *)msg, len);
2971 put_packet(s, buf);
2974 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2976 const char *p = (const char *)buf;
2977 int max_sz;
2979 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2980 for (;;) {
2981 if (len <= max_sz) {
2982 gdb_monitor_output(gdbserver_state, p, len);
2983 break;
2985 gdb_monitor_output(gdbserver_state, p, max_sz);
2986 p += max_sz;
2987 len -= max_sz;
2989 return len;
2992 #ifndef _WIN32
2993 static void gdb_sigterm_handler(int signal)
2995 if (runstate_is_running()) {
2996 vm_stop(RUN_STATE_PAUSED);
2999 #endif
3001 int gdbserver_start(const char *device)
3003 GDBState *s;
3004 char gdbstub_device_name[128];
3005 CharDriverState *chr = NULL;
3006 CharDriverState *mon_chr;
3008 if (!device)
3009 return -1;
3010 if (strcmp(device, "none") != 0) {
3011 if (strstart(device, "tcp:", NULL)) {
3012 /* enforce required TCP attributes */
3013 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3014 "%s,nowait,nodelay,server", device);
3015 device = gdbstub_device_name;
3017 #ifndef _WIN32
3018 else if (strcmp(device, "stdio") == 0) {
3019 struct sigaction act;
3021 memset(&act, 0, sizeof(act));
3022 act.sa_handler = gdb_sigterm_handler;
3023 sigaction(SIGINT, &act, NULL);
3025 #endif
3026 chr = qemu_chr_new("gdb", device, NULL);
3027 if (!chr)
3028 return -1;
3030 qemu_chr_fe_claim_no_fail(chr);
3031 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3032 gdb_chr_event, NULL);
3035 s = gdbserver_state;
3036 if (!s) {
3037 s = g_malloc0(sizeof(GDBState));
3038 gdbserver_state = s;
3040 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3042 /* Initialize a monitor terminal for gdb */
3043 mon_chr = g_malloc0(sizeof(*mon_chr));
3044 mon_chr->chr_write = gdb_monitor_write;
3045 monitor_init(mon_chr, 0);
3046 } else {
3047 if (s->chr)
3048 qemu_chr_delete(s->chr);
3049 mon_chr = s->mon_chr;
3050 memset(s, 0, sizeof(GDBState));
3052 s->c_cpu = first_cpu;
3053 s->g_cpu = first_cpu;
3054 s->chr = chr;
3055 s->state = chr ? RS_IDLE : RS_INACTIVE;
3056 s->mon_chr = mon_chr;
3057 s->current_syscall_cb = NULL;
3059 return 0;
3061 #endif