memory: introduce memory_region_test_and_clear_dirty
[qemu/ar7.git] / gdbstub.c
bloba8dd437ec0a6d671c6bc3a2bbf84e7de3e2d0f23
1 /*
2 * gdb server stub
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
30 #include "qemu.h"
31 #else
32 #include "monitor/monitor.h"
33 #include "char/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
36 #endif
38 #define MAX_PACKET_LENGTH 4096
40 #include "cpu.h"
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
44 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
45 static inline int target_memory_rw_debug(CPUArchState *env, target_ulong addr,
46 uint8_t *buf, int len, int is_write)
48 return cpu_memory_rw_debug(env, addr, buf, len, is_write);
50 #else
51 /* target_memory_rw_debug() defined in cpu.h */
52 #endif
54 enum {
55 GDB_SIGNAL_0 = 0,
56 GDB_SIGNAL_INT = 2,
57 GDB_SIGNAL_QUIT = 3,
58 GDB_SIGNAL_TRAP = 5,
59 GDB_SIGNAL_ABRT = 6,
60 GDB_SIGNAL_ALRM = 14,
61 GDB_SIGNAL_IO = 23,
62 GDB_SIGNAL_XCPU = 24,
63 GDB_SIGNAL_UNKNOWN = 143
66 #ifdef CONFIG_USER_ONLY
68 /* Map target signal numbers to GDB protocol signal numbers and vice
69 * versa. For user emulation's currently supported systems, we can
70 * assume most signals are defined.
73 static int gdb_signal_table[] = {
75 TARGET_SIGHUP,
76 TARGET_SIGINT,
77 TARGET_SIGQUIT,
78 TARGET_SIGILL,
79 TARGET_SIGTRAP,
80 TARGET_SIGABRT,
81 -1, /* SIGEMT */
82 TARGET_SIGFPE,
83 TARGET_SIGKILL,
84 TARGET_SIGBUS,
85 TARGET_SIGSEGV,
86 TARGET_SIGSYS,
87 TARGET_SIGPIPE,
88 TARGET_SIGALRM,
89 TARGET_SIGTERM,
90 TARGET_SIGURG,
91 TARGET_SIGSTOP,
92 TARGET_SIGTSTP,
93 TARGET_SIGCONT,
94 TARGET_SIGCHLD,
95 TARGET_SIGTTIN,
96 TARGET_SIGTTOU,
97 TARGET_SIGIO,
98 TARGET_SIGXCPU,
99 TARGET_SIGXFSZ,
100 TARGET_SIGVTALRM,
101 TARGET_SIGPROF,
102 TARGET_SIGWINCH,
103 -1, /* SIGLOST */
104 TARGET_SIGUSR1,
105 TARGET_SIGUSR2,
106 #ifdef TARGET_SIGPWR
107 TARGET_SIGPWR,
108 #else
110 #endif
111 -1, /* SIGPOLL */
123 #ifdef __SIGRTMIN
124 __SIGRTMIN + 1,
125 __SIGRTMIN + 2,
126 __SIGRTMIN + 3,
127 __SIGRTMIN + 4,
128 __SIGRTMIN + 5,
129 __SIGRTMIN + 6,
130 __SIGRTMIN + 7,
131 __SIGRTMIN + 8,
132 __SIGRTMIN + 9,
133 __SIGRTMIN + 10,
134 __SIGRTMIN + 11,
135 __SIGRTMIN + 12,
136 __SIGRTMIN + 13,
137 __SIGRTMIN + 14,
138 __SIGRTMIN + 15,
139 __SIGRTMIN + 16,
140 __SIGRTMIN + 17,
141 __SIGRTMIN + 18,
142 __SIGRTMIN + 19,
143 __SIGRTMIN + 20,
144 __SIGRTMIN + 21,
145 __SIGRTMIN + 22,
146 __SIGRTMIN + 23,
147 __SIGRTMIN + 24,
148 __SIGRTMIN + 25,
149 __SIGRTMIN + 26,
150 __SIGRTMIN + 27,
151 __SIGRTMIN + 28,
152 __SIGRTMIN + 29,
153 __SIGRTMIN + 30,
154 __SIGRTMIN + 31,
155 -1, /* SIGCANCEL */
156 __SIGRTMIN,
157 __SIGRTMIN + 32,
158 __SIGRTMIN + 33,
159 __SIGRTMIN + 34,
160 __SIGRTMIN + 35,
161 __SIGRTMIN + 36,
162 __SIGRTMIN + 37,
163 __SIGRTMIN + 38,
164 __SIGRTMIN + 39,
165 __SIGRTMIN + 40,
166 __SIGRTMIN + 41,
167 __SIGRTMIN + 42,
168 __SIGRTMIN + 43,
169 __SIGRTMIN + 44,
170 __SIGRTMIN + 45,
171 __SIGRTMIN + 46,
172 __SIGRTMIN + 47,
173 __SIGRTMIN + 48,
174 __SIGRTMIN + 49,
175 __SIGRTMIN + 50,
176 __SIGRTMIN + 51,
177 __SIGRTMIN + 52,
178 __SIGRTMIN + 53,
179 __SIGRTMIN + 54,
180 __SIGRTMIN + 55,
181 __SIGRTMIN + 56,
182 __SIGRTMIN + 57,
183 __SIGRTMIN + 58,
184 __SIGRTMIN + 59,
185 __SIGRTMIN + 60,
186 __SIGRTMIN + 61,
187 __SIGRTMIN + 62,
188 __SIGRTMIN + 63,
189 __SIGRTMIN + 64,
190 __SIGRTMIN + 65,
191 __SIGRTMIN + 66,
192 __SIGRTMIN + 67,
193 __SIGRTMIN + 68,
194 __SIGRTMIN + 69,
195 __SIGRTMIN + 70,
196 __SIGRTMIN + 71,
197 __SIGRTMIN + 72,
198 __SIGRTMIN + 73,
199 __SIGRTMIN + 74,
200 __SIGRTMIN + 75,
201 __SIGRTMIN + 76,
202 __SIGRTMIN + 77,
203 __SIGRTMIN + 78,
204 __SIGRTMIN + 79,
205 __SIGRTMIN + 80,
206 __SIGRTMIN + 81,
207 __SIGRTMIN + 82,
208 __SIGRTMIN + 83,
209 __SIGRTMIN + 84,
210 __SIGRTMIN + 85,
211 __SIGRTMIN + 86,
212 __SIGRTMIN + 87,
213 __SIGRTMIN + 88,
214 __SIGRTMIN + 89,
215 __SIGRTMIN + 90,
216 __SIGRTMIN + 91,
217 __SIGRTMIN + 92,
218 __SIGRTMIN + 93,
219 __SIGRTMIN + 94,
220 __SIGRTMIN + 95,
221 -1, /* SIGINFO */
222 -1, /* UNKNOWN */
223 -1, /* DEFAULT */
230 #endif
232 #else
233 /* In system mode we only need SIGINT and SIGTRAP; other signals
234 are not yet supported. */
236 enum {
237 TARGET_SIGINT = 2,
238 TARGET_SIGTRAP = 5
241 static int gdb_signal_table[] = {
244 TARGET_SIGINT,
247 TARGET_SIGTRAP
249 #endif
251 #ifdef CONFIG_USER_ONLY
252 static int target_signal_to_gdb (int sig)
254 int i;
255 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
256 if (gdb_signal_table[i] == sig)
257 return i;
258 return GDB_SIGNAL_UNKNOWN;
260 #endif
262 static int gdb_signal_to_target (int sig)
264 if (sig < ARRAY_SIZE (gdb_signal_table))
265 return gdb_signal_table[sig];
266 else
267 return -1;
270 //#define DEBUG_GDB
272 typedef struct GDBRegisterState {
273 int base_reg;
274 int num_regs;
275 gdb_reg_cb get_reg;
276 gdb_reg_cb set_reg;
277 const char *xml;
278 struct GDBRegisterState *next;
279 } GDBRegisterState;
281 enum RSState {
282 RS_INACTIVE,
283 RS_IDLE,
284 RS_GETLINE,
285 RS_CHKSUM1,
286 RS_CHKSUM2,
288 typedef struct GDBState {
289 CPUArchState *c_cpu; /* current CPU for step/continue ops */
290 CPUArchState *g_cpu; /* current CPU for other ops */
291 CPUArchState *query_cpu; /* for q{f|s}ThreadInfo */
292 enum RSState state; /* parsing state */
293 char line_buf[MAX_PACKET_LENGTH];
294 int line_buf_index;
295 int line_csum;
296 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
297 int last_packet_len;
298 int signal;
299 #ifdef CONFIG_USER_ONLY
300 int fd;
301 int running_state;
302 #else
303 CharDriverState *chr;
304 CharDriverState *mon_chr;
305 #endif
306 char syscall_buf[256];
307 gdb_syscall_complete_cb current_syscall_cb;
308 } GDBState;
310 /* By default use no IRQs and no timers while single stepping so as to
311 * make single stepping like an ICE HW step.
313 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
315 static GDBState *gdbserver_state;
317 /* This is an ugly hack to cope with both new and old gdb.
318 If gdb sends qXfer:features:read then assume we're talking to a newish
319 gdb that understands target descriptions. */
320 static int gdb_has_xml;
322 #ifdef CONFIG_USER_ONLY
323 /* XXX: This is not thread safe. Do we care? */
324 static int gdbserver_fd = -1;
326 static int get_char(GDBState *s)
328 uint8_t ch;
329 int ret;
331 for(;;) {
332 ret = qemu_recv(s->fd, &ch, 1, 0);
333 if (ret < 0) {
334 if (errno == ECONNRESET)
335 s->fd = -1;
336 if (errno != EINTR && errno != EAGAIN)
337 return -1;
338 } else if (ret == 0) {
339 close(s->fd);
340 s->fd = -1;
341 return -1;
342 } else {
343 break;
346 return ch;
348 #endif
350 static enum {
351 GDB_SYS_UNKNOWN,
352 GDB_SYS_ENABLED,
353 GDB_SYS_DISABLED,
354 } gdb_syscall_mode;
356 /* If gdb is connected when the first semihosting syscall occurs then use
357 remote gdb syscalls. Otherwise use native file IO. */
358 int use_gdb_syscalls(void)
360 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
361 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
362 : GDB_SYS_DISABLED);
364 return gdb_syscall_mode == GDB_SYS_ENABLED;
367 /* Resume execution. */
368 static inline void gdb_continue(GDBState *s)
370 #ifdef CONFIG_USER_ONLY
371 s->running_state = 1;
372 #else
373 vm_start();
374 #endif
377 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
379 #ifdef CONFIG_USER_ONLY
380 int ret;
382 while (len > 0) {
383 ret = send(s->fd, buf, len, 0);
384 if (ret < 0) {
385 if (errno != EINTR && errno != EAGAIN)
386 return;
387 } else {
388 buf += ret;
389 len -= ret;
392 #else
393 qemu_chr_fe_write(s->chr, buf, len);
394 #endif
397 static inline int fromhex(int v)
399 if (v >= '0' && v <= '9')
400 return v - '0';
401 else if (v >= 'A' && v <= 'F')
402 return v - 'A' + 10;
403 else if (v >= 'a' && v <= 'f')
404 return v - 'a' + 10;
405 else
406 return 0;
409 static inline int tohex(int v)
411 if (v < 10)
412 return v + '0';
413 else
414 return v - 10 + 'a';
417 static void memtohex(char *buf, const uint8_t *mem, int len)
419 int i, c;
420 char *q;
421 q = buf;
422 for(i = 0; i < len; i++) {
423 c = mem[i];
424 *q++ = tohex(c >> 4);
425 *q++ = tohex(c & 0xf);
427 *q = '\0';
430 static void hextomem(uint8_t *mem, const char *buf, int len)
432 int i;
434 for(i = 0; i < len; i++) {
435 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
436 buf += 2;
440 /* return -1 if error, 0 if OK */
441 static int put_packet_binary(GDBState *s, const char *buf, int len)
443 int csum, i;
444 uint8_t *p;
446 for(;;) {
447 p = s->last_packet;
448 *(p++) = '$';
449 memcpy(p, buf, len);
450 p += len;
451 csum = 0;
452 for(i = 0; i < len; i++) {
453 csum += buf[i];
455 *(p++) = '#';
456 *(p++) = tohex((csum >> 4) & 0xf);
457 *(p++) = tohex((csum) & 0xf);
459 s->last_packet_len = p - s->last_packet;
460 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
462 #ifdef CONFIG_USER_ONLY
463 i = get_char(s);
464 if (i < 0)
465 return -1;
466 if (i == '+')
467 break;
468 #else
469 break;
470 #endif
472 return 0;
475 /* return -1 if error, 0 if OK */
476 static int put_packet(GDBState *s, const char *buf)
478 #ifdef DEBUG_GDB
479 printf("reply='%s'\n", buf);
480 #endif
482 return put_packet_binary(s, buf, strlen(buf));
485 /* The GDB remote protocol transfers values in target byte order. This means
486 we can use the raw memory access routines to access the value buffer.
487 Conveniently, these also handle the case where the buffer is mis-aligned.
489 #define GET_REG8(val) do { \
490 stb_p(mem_buf, val); \
491 return 1; \
492 } while(0)
493 #define GET_REG16(val) do { \
494 stw_p(mem_buf, val); \
495 return 2; \
496 } while(0)
497 #define GET_REG32(val) do { \
498 stl_p(mem_buf, val); \
499 return 4; \
500 } while(0)
501 #define GET_REG64(val) do { \
502 stq_p(mem_buf, val); \
503 return 8; \
504 } while(0)
506 #if TARGET_LONG_BITS == 64
507 #define GET_REGL(val) GET_REG64(val)
508 #define ldtul_p(addr) ldq_p(addr)
509 #else
510 #define GET_REGL(val) GET_REG32(val)
511 #define ldtul_p(addr) ldl_p(addr)
512 #endif
514 #if defined(TARGET_I386)
516 #ifdef TARGET_X86_64
517 static const int gpr_map[16] = {
518 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
519 8, 9, 10, 11, 12, 13, 14, 15
521 #else
522 #define gpr_map gpr_map32
523 #endif
524 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
526 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
528 #define IDX_IP_REG CPU_NB_REGS
529 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
530 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
531 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
532 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
533 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
535 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
537 if (n < CPU_NB_REGS) {
538 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
539 GET_REG64(env->regs[gpr_map[n]]);
540 } else if (n < CPU_NB_REGS32) {
541 GET_REG32(env->regs[gpr_map32[n]]);
543 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
544 #ifdef USE_X86LDOUBLE
545 /* FIXME: byteswap float values - after fixing fpregs layout. */
546 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
547 #else
548 memset(mem_buf, 0, 10);
549 #endif
550 return 10;
551 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
552 n -= IDX_XMM_REGS;
553 if (n < CPU_NB_REGS32 ||
554 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
555 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
556 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
557 return 16;
559 } else {
560 switch (n) {
561 case IDX_IP_REG:
562 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
563 GET_REG64(env->eip);
564 } else {
565 GET_REG32(env->eip);
567 case IDX_FLAGS_REG: GET_REG32(env->eflags);
569 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
570 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
571 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
572 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
573 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
574 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
576 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
577 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
578 (env->fpstt & 0x7) << 11);
579 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
580 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
581 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
582 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
583 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
584 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
586 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
589 return 0;
592 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
594 uint16_t selector = ldl_p(mem_buf);
596 if (selector != env->segs[sreg].selector) {
597 #if defined(CONFIG_USER_ONLY)
598 cpu_x86_load_seg(env, sreg, selector);
599 #else
600 unsigned int limit, flags;
601 target_ulong base;
603 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
604 base = selector << 4;
605 limit = 0xffff;
606 flags = 0;
607 } else {
608 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
609 return 4;
611 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
612 #endif
614 return 4;
617 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
619 uint32_t tmp;
621 if (n < CPU_NB_REGS) {
622 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
623 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
624 return sizeof(target_ulong);
625 } else if (n < CPU_NB_REGS32) {
626 n = gpr_map32[n];
627 env->regs[n] &= ~0xffffffffUL;
628 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
629 return 4;
631 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
632 #ifdef USE_X86LDOUBLE
633 /* FIXME: byteswap float values - after fixing fpregs layout. */
634 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
635 #endif
636 return 10;
637 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
638 n -= IDX_XMM_REGS;
639 if (n < CPU_NB_REGS32 ||
640 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
641 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
642 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
643 return 16;
645 } else {
646 switch (n) {
647 case IDX_IP_REG:
648 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
649 env->eip = ldq_p(mem_buf);
650 return 8;
651 } else {
652 env->eip &= ~0xffffffffUL;
653 env->eip |= (uint32_t)ldl_p(mem_buf);
654 return 4;
656 case IDX_FLAGS_REG:
657 env->eflags = ldl_p(mem_buf);
658 return 4;
660 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
661 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
662 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
663 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
664 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
665 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
667 case IDX_FP_REGS + 8:
668 env->fpuc = ldl_p(mem_buf);
669 return 4;
670 case IDX_FP_REGS + 9:
671 tmp = ldl_p(mem_buf);
672 env->fpstt = (tmp >> 11) & 7;
673 env->fpus = tmp & ~0x3800;
674 return 4;
675 case IDX_FP_REGS + 10: /* ftag */ return 4;
676 case IDX_FP_REGS + 11: /* fiseg */ return 4;
677 case IDX_FP_REGS + 12: /* fioff */ return 4;
678 case IDX_FP_REGS + 13: /* foseg */ return 4;
679 case IDX_FP_REGS + 14: /* fooff */ return 4;
680 case IDX_FP_REGS + 15: /* fop */ return 4;
682 case IDX_MXCSR_REG:
683 env->mxcsr = ldl_p(mem_buf);
684 return 4;
687 /* Unrecognised register. */
688 return 0;
691 #elif defined (TARGET_PPC)
693 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
694 expects whatever the target description contains. Due to a
695 historical mishap the FP registers appear in between core integer
696 regs and PC, MSR, CR, and so forth. We hack round this by giving the
697 FP regs zero size when talking to a newer gdb. */
698 #define NUM_CORE_REGS 71
699 #if defined (TARGET_PPC64)
700 #define GDB_CORE_XML "power64-core.xml"
701 #else
702 #define GDB_CORE_XML "power-core.xml"
703 #endif
705 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
707 if (n < 32) {
708 /* gprs */
709 GET_REGL(env->gpr[n]);
710 } else if (n < 64) {
711 /* fprs */
712 if (gdb_has_xml)
713 return 0;
714 stfq_p(mem_buf, env->fpr[n-32]);
715 return 8;
716 } else {
717 switch (n) {
718 case 64: GET_REGL(env->nip);
719 case 65: GET_REGL(env->msr);
720 case 66:
722 uint32_t cr = 0;
723 int i;
724 for (i = 0; i < 8; i++)
725 cr |= env->crf[i] << (32 - ((i + 1) * 4));
726 GET_REG32(cr);
728 case 67: GET_REGL(env->lr);
729 case 68: GET_REGL(env->ctr);
730 case 69: GET_REGL(env->xer);
731 case 70:
733 if (gdb_has_xml)
734 return 0;
735 GET_REG32(env->fpscr);
739 return 0;
742 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
744 if (n < 32) {
745 /* gprs */
746 env->gpr[n] = ldtul_p(mem_buf);
747 return sizeof(target_ulong);
748 } else if (n < 64) {
749 /* fprs */
750 if (gdb_has_xml)
751 return 0;
752 env->fpr[n-32] = ldfq_p(mem_buf);
753 return 8;
754 } else {
755 switch (n) {
756 case 64:
757 env->nip = ldtul_p(mem_buf);
758 return sizeof(target_ulong);
759 case 65:
760 ppc_store_msr(env, ldtul_p(mem_buf));
761 return sizeof(target_ulong);
762 case 66:
764 uint32_t cr = ldl_p(mem_buf);
765 int i;
766 for (i = 0; i < 8; i++)
767 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
768 return 4;
770 case 67:
771 env->lr = ldtul_p(mem_buf);
772 return sizeof(target_ulong);
773 case 68:
774 env->ctr = ldtul_p(mem_buf);
775 return sizeof(target_ulong);
776 case 69:
777 env->xer = ldtul_p(mem_buf);
778 return sizeof(target_ulong);
779 case 70:
780 /* fpscr */
781 if (gdb_has_xml)
782 return 0;
783 return 4;
786 return 0;
789 #elif defined (TARGET_SPARC)
791 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
792 #define NUM_CORE_REGS 86
793 #else
794 #define NUM_CORE_REGS 72
795 #endif
797 #ifdef TARGET_ABI32
798 #define GET_REGA(val) GET_REG32(val)
799 #else
800 #define GET_REGA(val) GET_REGL(val)
801 #endif
803 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
805 if (n < 8) {
806 /* g0..g7 */
807 GET_REGA(env->gregs[n]);
809 if (n < 32) {
810 /* register window */
811 GET_REGA(env->regwptr[n - 8]);
813 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
814 if (n < 64) {
815 /* fprs */
816 if (n & 1) {
817 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
818 } else {
819 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
822 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
823 switch (n) {
824 case 64: GET_REGA(env->y);
825 case 65: GET_REGA(cpu_get_psr(env));
826 case 66: GET_REGA(env->wim);
827 case 67: GET_REGA(env->tbr);
828 case 68: GET_REGA(env->pc);
829 case 69: GET_REGA(env->npc);
830 case 70: GET_REGA(env->fsr);
831 case 71: GET_REGA(0); /* csr */
832 default: GET_REGA(0);
834 #else
835 if (n < 64) {
836 /* f0-f31 */
837 if (n & 1) {
838 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
839 } else {
840 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
843 if (n < 80) {
844 /* f32-f62 (double width, even numbers only) */
845 GET_REG64(env->fpr[(n - 32) / 2].ll);
847 switch (n) {
848 case 80: GET_REGL(env->pc);
849 case 81: GET_REGL(env->npc);
850 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
851 ((env->asi & 0xff) << 24) |
852 ((env->pstate & 0xfff) << 8) |
853 cpu_get_cwp64(env));
854 case 83: GET_REGL(env->fsr);
855 case 84: GET_REGL(env->fprs);
856 case 85: GET_REGL(env->y);
858 #endif
859 return 0;
862 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
864 #if defined(TARGET_ABI32)
865 abi_ulong tmp;
867 tmp = ldl_p(mem_buf);
868 #else
869 target_ulong tmp;
871 tmp = ldtul_p(mem_buf);
872 #endif
874 if (n < 8) {
875 /* g0..g7 */
876 env->gregs[n] = tmp;
877 } else if (n < 32) {
878 /* register window */
879 env->regwptr[n - 8] = tmp;
881 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
882 else if (n < 64) {
883 /* fprs */
884 /* f0-f31 */
885 if (n & 1) {
886 env->fpr[(n - 32) / 2].l.lower = tmp;
887 } else {
888 env->fpr[(n - 32) / 2].l.upper = tmp;
890 } else {
891 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
892 switch (n) {
893 case 64: env->y = tmp; break;
894 case 65: cpu_put_psr(env, tmp); break;
895 case 66: env->wim = tmp; break;
896 case 67: env->tbr = tmp; break;
897 case 68: env->pc = tmp; break;
898 case 69: env->npc = tmp; break;
899 case 70: env->fsr = tmp; break;
900 default: return 0;
903 return 4;
904 #else
905 else if (n < 64) {
906 /* f0-f31 */
907 tmp = ldl_p(mem_buf);
908 if (n & 1) {
909 env->fpr[(n - 32) / 2].l.lower = tmp;
910 } else {
911 env->fpr[(n - 32) / 2].l.upper = tmp;
913 return 4;
914 } else if (n < 80) {
915 /* f32-f62 (double width, even numbers only) */
916 env->fpr[(n - 32) / 2].ll = tmp;
917 } else {
918 switch (n) {
919 case 80: env->pc = tmp; break;
920 case 81: env->npc = tmp; break;
921 case 82:
922 cpu_put_ccr(env, tmp >> 32);
923 env->asi = (tmp >> 24) & 0xff;
924 env->pstate = (tmp >> 8) & 0xfff;
925 cpu_put_cwp64(env, tmp & 0xff);
926 break;
927 case 83: env->fsr = tmp; break;
928 case 84: env->fprs = tmp; break;
929 case 85: env->y = tmp; break;
930 default: return 0;
933 return 8;
934 #endif
936 #elif defined (TARGET_ARM)
938 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
939 whatever the target description contains. Due to a historical mishap
940 the FPA registers appear in between core integer regs and the CPSR.
941 We hack round this by giving the FPA regs zero size when talking to a
942 newer gdb. */
943 #define NUM_CORE_REGS 26
944 #define GDB_CORE_XML "arm-core.xml"
946 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
948 if (n < 16) {
949 /* Core integer register. */
950 GET_REG32(env->regs[n]);
952 if (n < 24) {
953 /* FPA registers. */
954 if (gdb_has_xml)
955 return 0;
956 memset(mem_buf, 0, 12);
957 return 12;
959 switch (n) {
960 case 24:
961 /* FPA status register. */
962 if (gdb_has_xml)
963 return 0;
964 GET_REG32(0);
965 case 25:
966 /* CPSR */
967 GET_REG32(cpsr_read(env));
969 /* Unknown register. */
970 return 0;
973 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
975 uint32_t tmp;
977 tmp = ldl_p(mem_buf);
979 /* Mask out low bit of PC to workaround gdb bugs. This will probably
980 cause problems if we ever implement the Jazelle DBX extensions. */
981 if (n == 15)
982 tmp &= ~1;
984 if (n < 16) {
985 /* Core integer register. */
986 env->regs[n] = tmp;
987 return 4;
989 if (n < 24) { /* 16-23 */
990 /* FPA registers (ignored). */
991 if (gdb_has_xml)
992 return 0;
993 return 12;
995 switch (n) {
996 case 24:
997 /* FPA status register (ignored). */
998 if (gdb_has_xml)
999 return 0;
1000 return 4;
1001 case 25:
1002 /* CPSR */
1003 cpsr_write (env, tmp, 0xffffffff);
1004 return 4;
1006 /* Unknown register. */
1007 return 0;
1010 #elif defined (TARGET_M68K)
1012 #define NUM_CORE_REGS 18
1014 #define GDB_CORE_XML "cf-core.xml"
1016 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1018 if (n < 8) {
1019 /* D0-D7 */
1020 GET_REG32(env->dregs[n]);
1021 } else if (n < 16) {
1022 /* A0-A7 */
1023 GET_REG32(env->aregs[n - 8]);
1024 } else {
1025 switch (n) {
1026 case 16: GET_REG32(env->sr);
1027 case 17: GET_REG32(env->pc);
1030 /* FP registers not included here because they vary between
1031 ColdFire and m68k. Use XML bits for these. */
1032 return 0;
1035 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1037 uint32_t tmp;
1039 tmp = ldl_p(mem_buf);
1041 if (n < 8) {
1042 /* D0-D7 */
1043 env->dregs[n] = tmp;
1044 } else if (n < 16) {
1045 /* A0-A7 */
1046 env->aregs[n - 8] = tmp;
1047 } else {
1048 switch (n) {
1049 case 16: env->sr = tmp; break;
1050 case 17: env->pc = tmp; break;
1051 default: return 0;
1054 return 4;
1056 #elif defined (TARGET_MIPS)
1058 #define NUM_CORE_REGS 73
1060 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1062 if (n < 32) {
1063 GET_REGL(env->active_tc.gpr[n]);
1065 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1066 if (n >= 38 && n < 70) {
1067 if (env->CP0_Status & (1 << CP0St_FR))
1068 GET_REGL(env->active_fpu.fpr[n - 38].d);
1069 else
1070 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1072 switch (n) {
1073 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1074 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1077 switch (n) {
1078 case 32: GET_REGL((int32_t)env->CP0_Status);
1079 case 33: GET_REGL(env->active_tc.LO[0]);
1080 case 34: GET_REGL(env->active_tc.HI[0]);
1081 case 35: GET_REGL(env->CP0_BadVAddr);
1082 case 36: GET_REGL((int32_t)env->CP0_Cause);
1083 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1084 case 72: GET_REGL(0); /* fp */
1085 case 89: GET_REGL((int32_t)env->CP0_PRid);
1087 if (n >= 73 && n <= 88) {
1088 /* 16 embedded regs. */
1089 GET_REGL(0);
1092 return 0;
1095 /* convert MIPS rounding mode in FCR31 to IEEE library */
1096 static unsigned int ieee_rm[] =
1098 float_round_nearest_even,
1099 float_round_to_zero,
1100 float_round_up,
1101 float_round_down
1103 #define RESTORE_ROUNDING_MODE \
1104 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1106 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1108 target_ulong tmp;
1110 tmp = ldtul_p(mem_buf);
1112 if (n < 32) {
1113 env->active_tc.gpr[n] = tmp;
1114 return sizeof(target_ulong);
1116 if (env->CP0_Config1 & (1 << CP0C1_FP)
1117 && n >= 38 && n < 73) {
1118 if (n < 70) {
1119 if (env->CP0_Status & (1 << CP0St_FR))
1120 env->active_fpu.fpr[n - 38].d = tmp;
1121 else
1122 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1124 switch (n) {
1125 case 70:
1126 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1127 /* set rounding mode */
1128 RESTORE_ROUNDING_MODE;
1129 break;
1130 case 71: env->active_fpu.fcr0 = tmp; break;
1132 return sizeof(target_ulong);
1134 switch (n) {
1135 case 32: env->CP0_Status = tmp; break;
1136 case 33: env->active_tc.LO[0] = tmp; break;
1137 case 34: env->active_tc.HI[0] = tmp; break;
1138 case 35: env->CP0_BadVAddr = tmp; break;
1139 case 36: env->CP0_Cause = tmp; break;
1140 case 37:
1141 env->active_tc.PC = tmp & ~(target_ulong)1;
1142 if (tmp & 1) {
1143 env->hflags |= MIPS_HFLAG_M16;
1144 } else {
1145 env->hflags &= ~(MIPS_HFLAG_M16);
1147 break;
1148 case 72: /* fp, ignored */ break;
1149 default:
1150 if (n > 89)
1151 return 0;
1152 /* Other registers are readonly. Ignore writes. */
1153 break;
1156 return sizeof(target_ulong);
1158 #elif defined(TARGET_OPENRISC)
1160 #define NUM_CORE_REGS (32 + 3)
1162 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1164 if (n < 32) {
1165 GET_REG32(env->gpr[n]);
1166 } else {
1167 switch (n) {
1168 case 32: /* PPC */
1169 GET_REG32(env->ppc);
1170 break;
1172 case 33: /* NPC */
1173 GET_REG32(env->npc);
1174 break;
1176 case 34: /* SR */
1177 GET_REG32(env->sr);
1178 break;
1180 default:
1181 break;
1184 return 0;
1187 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1188 uint8_t *mem_buf, int n)
1190 uint32_t tmp;
1192 if (n > NUM_CORE_REGS) {
1193 return 0;
1196 tmp = ldl_p(mem_buf);
1198 if (n < 32) {
1199 env->gpr[n] = tmp;
1200 } else {
1201 switch (n) {
1202 case 32: /* PPC */
1203 env->ppc = tmp;
1204 break;
1206 case 33: /* NPC */
1207 env->npc = tmp;
1208 break;
1210 case 34: /* SR */
1211 env->sr = tmp;
1212 break;
1214 default:
1215 break;
1218 return 4;
1220 #elif defined (TARGET_SH4)
1222 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1223 /* FIXME: We should use XML for this. */
1225 #define NUM_CORE_REGS 59
1227 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1229 switch (n) {
1230 case 0 ... 7:
1231 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1232 GET_REGL(env->gregs[n + 16]);
1233 } else {
1234 GET_REGL(env->gregs[n]);
1236 case 8 ... 15:
1237 GET_REGL(env->gregs[n]);
1238 case 16:
1239 GET_REGL(env->pc);
1240 case 17:
1241 GET_REGL(env->pr);
1242 case 18:
1243 GET_REGL(env->gbr);
1244 case 19:
1245 GET_REGL(env->vbr);
1246 case 20:
1247 GET_REGL(env->mach);
1248 case 21:
1249 GET_REGL(env->macl);
1250 case 22:
1251 GET_REGL(env->sr);
1252 case 23:
1253 GET_REGL(env->fpul);
1254 case 24:
1255 GET_REGL(env->fpscr);
1256 case 25 ... 40:
1257 if (env->fpscr & FPSCR_FR) {
1258 stfl_p(mem_buf, env->fregs[n - 9]);
1259 } else {
1260 stfl_p(mem_buf, env->fregs[n - 25]);
1262 return 4;
1263 case 41:
1264 GET_REGL(env->ssr);
1265 case 42:
1266 GET_REGL(env->spc);
1267 case 43 ... 50:
1268 GET_REGL(env->gregs[n - 43]);
1269 case 51 ... 58:
1270 GET_REGL(env->gregs[n - (51 - 16)]);
1273 return 0;
1276 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1278 switch (n) {
1279 case 0 ... 7:
1280 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1281 env->gregs[n + 16] = ldl_p(mem_buf);
1282 } else {
1283 env->gregs[n] = ldl_p(mem_buf);
1285 break;
1286 case 8 ... 15:
1287 env->gregs[n] = ldl_p(mem_buf);
1288 break;
1289 case 16:
1290 env->pc = ldl_p(mem_buf);
1291 break;
1292 case 17:
1293 env->pr = ldl_p(mem_buf);
1294 break;
1295 case 18:
1296 env->gbr = ldl_p(mem_buf);
1297 break;
1298 case 19:
1299 env->vbr = ldl_p(mem_buf);
1300 break;
1301 case 20:
1302 env->mach = ldl_p(mem_buf);
1303 break;
1304 case 21:
1305 env->macl = ldl_p(mem_buf);
1306 break;
1307 case 22:
1308 env->sr = ldl_p(mem_buf);
1309 break;
1310 case 23:
1311 env->fpul = ldl_p(mem_buf);
1312 break;
1313 case 24:
1314 env->fpscr = ldl_p(mem_buf);
1315 break;
1316 case 25 ... 40:
1317 if (env->fpscr & FPSCR_FR) {
1318 env->fregs[n - 9] = ldfl_p(mem_buf);
1319 } else {
1320 env->fregs[n - 25] = ldfl_p(mem_buf);
1322 break;
1323 case 41:
1324 env->ssr = ldl_p(mem_buf);
1325 break;
1326 case 42:
1327 env->spc = ldl_p(mem_buf);
1328 break;
1329 case 43 ... 50:
1330 env->gregs[n - 43] = ldl_p(mem_buf);
1331 break;
1332 case 51 ... 58:
1333 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1334 break;
1335 default: return 0;
1338 return 4;
1340 #elif defined (TARGET_MICROBLAZE)
1342 #define NUM_CORE_REGS (32 + 5)
1344 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1346 if (n < 32) {
1347 GET_REG32(env->regs[n]);
1348 } else {
1349 GET_REG32(env->sregs[n - 32]);
1351 return 0;
1354 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1356 uint32_t tmp;
1358 if (n > NUM_CORE_REGS)
1359 return 0;
1361 tmp = ldl_p(mem_buf);
1363 if (n < 32) {
1364 env->regs[n] = tmp;
1365 } else {
1366 env->sregs[n - 32] = tmp;
1368 return 4;
1370 #elif defined (TARGET_CRIS)
1372 #define NUM_CORE_REGS 49
1374 static int
1375 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1377 if (n < 15) {
1378 GET_REG32(env->regs[n]);
1381 if (n == 15) {
1382 GET_REG32(env->pc);
1385 if (n < 32) {
1386 switch (n) {
1387 case 16:
1388 GET_REG8(env->pregs[n - 16]);
1389 break;
1390 case 17:
1391 GET_REG8(env->pregs[n - 16]);
1392 break;
1393 case 20:
1394 case 21:
1395 GET_REG16(env->pregs[n - 16]);
1396 break;
1397 default:
1398 if (n >= 23) {
1399 GET_REG32(env->pregs[n - 16]);
1401 break;
1404 return 0;
1407 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1409 uint8_t srs;
1411 if (env->pregs[PR_VR] < 32)
1412 return read_register_crisv10(env, mem_buf, n);
1414 srs = env->pregs[PR_SRS];
1415 if (n < 16) {
1416 GET_REG32(env->regs[n]);
1419 if (n >= 21 && n < 32) {
1420 GET_REG32(env->pregs[n - 16]);
1422 if (n >= 33 && n < 49) {
1423 GET_REG32(env->sregs[srs][n - 33]);
1425 switch (n) {
1426 case 16: GET_REG8(env->pregs[0]);
1427 case 17: GET_REG8(env->pregs[1]);
1428 case 18: GET_REG32(env->pregs[2]);
1429 case 19: GET_REG8(srs);
1430 case 20: GET_REG16(env->pregs[4]);
1431 case 32: GET_REG32(env->pc);
1434 return 0;
1437 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1439 uint32_t tmp;
1441 if (n > 49)
1442 return 0;
1444 tmp = ldl_p(mem_buf);
1446 if (n < 16) {
1447 env->regs[n] = tmp;
1450 if (n >= 21 && n < 32) {
1451 env->pregs[n - 16] = tmp;
1454 /* FIXME: Should support function regs be writable? */
1455 switch (n) {
1456 case 16: return 1;
1457 case 17: return 1;
1458 case 18: env->pregs[PR_PID] = tmp; break;
1459 case 19: return 1;
1460 case 20: return 2;
1461 case 32: env->pc = tmp; break;
1464 return 4;
1466 #elif defined (TARGET_ALPHA)
1468 #define NUM_CORE_REGS 67
1470 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1472 uint64_t val;
1473 CPU_DoubleU d;
1475 switch (n) {
1476 case 0 ... 30:
1477 val = env->ir[n];
1478 break;
1479 case 32 ... 62:
1480 d.d = env->fir[n - 32];
1481 val = d.ll;
1482 break;
1483 case 63:
1484 val = cpu_alpha_load_fpcr(env);
1485 break;
1486 case 64:
1487 val = env->pc;
1488 break;
1489 case 66:
1490 val = env->unique;
1491 break;
1492 case 31:
1493 case 65:
1494 /* 31 really is the zero register; 65 is unassigned in the
1495 gdb protocol, but is still required to occupy 8 bytes. */
1496 val = 0;
1497 break;
1498 default:
1499 return 0;
1501 GET_REGL(val);
1504 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1506 target_ulong tmp = ldtul_p(mem_buf);
1507 CPU_DoubleU d;
1509 switch (n) {
1510 case 0 ... 30:
1511 env->ir[n] = tmp;
1512 break;
1513 case 32 ... 62:
1514 d.ll = tmp;
1515 env->fir[n - 32] = d.d;
1516 break;
1517 case 63:
1518 cpu_alpha_store_fpcr(env, tmp);
1519 break;
1520 case 64:
1521 env->pc = tmp;
1522 break;
1523 case 66:
1524 env->unique = tmp;
1525 break;
1526 case 31:
1527 case 65:
1528 /* 31 really is the zero register; 65 is unassigned in the
1529 gdb protocol, but is still required to occupy 8 bytes. */
1530 break;
1531 default:
1532 return 0;
1534 return 8;
1536 #elif defined (TARGET_S390X)
1538 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1540 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1542 switch (n) {
1543 case S390_PSWM_REGNUM: GET_REGL(env->psw.mask); break;
1544 case S390_PSWA_REGNUM: GET_REGL(env->psw.addr); break;
1545 case S390_R0_REGNUM ... S390_R15_REGNUM:
1546 GET_REGL(env->regs[n-S390_R0_REGNUM]); break;
1547 case S390_A0_REGNUM ... S390_A15_REGNUM:
1548 GET_REG32(env->aregs[n-S390_A0_REGNUM]); break;
1549 case S390_FPC_REGNUM: GET_REG32(env->fpc); break;
1550 case S390_F0_REGNUM ... S390_F15_REGNUM:
1551 /* XXX */
1552 break;
1553 case S390_PC_REGNUM: GET_REGL(env->psw.addr); break;
1554 case S390_CC_REGNUM:
1555 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
1556 env->cc_vr);
1557 GET_REG32(env->cc_op);
1558 break;
1561 return 0;
1564 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1566 target_ulong tmpl;
1567 uint32_t tmp32;
1568 int r = 8;
1569 tmpl = ldtul_p(mem_buf);
1570 tmp32 = ldl_p(mem_buf);
1572 switch (n) {
1573 case S390_PSWM_REGNUM: env->psw.mask = tmpl; break;
1574 case S390_PSWA_REGNUM: env->psw.addr = tmpl; break;
1575 case S390_R0_REGNUM ... S390_R15_REGNUM:
1576 env->regs[n-S390_R0_REGNUM] = tmpl; break;
1577 case S390_A0_REGNUM ... S390_A15_REGNUM:
1578 env->aregs[n-S390_A0_REGNUM] = tmp32; r=4; break;
1579 case S390_FPC_REGNUM: env->fpc = tmp32; r=4; break;
1580 case S390_F0_REGNUM ... S390_F15_REGNUM:
1581 /* XXX */
1582 break;
1583 case S390_PC_REGNUM: env->psw.addr = tmpl; break;
1584 case S390_CC_REGNUM: env->cc_op = tmp32; r=4; break;
1587 return r;
1589 #elif defined (TARGET_LM32)
1591 #include "hw/lm32_pic.h"
1592 #define NUM_CORE_REGS (32 + 7)
1594 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1596 if (n < 32) {
1597 GET_REG32(env->regs[n]);
1598 } else {
1599 switch (n) {
1600 case 32:
1601 GET_REG32(env->pc);
1602 break;
1603 /* FIXME: put in right exception ID */
1604 case 33:
1605 GET_REG32(0);
1606 break;
1607 case 34:
1608 GET_REG32(env->eba);
1609 break;
1610 case 35:
1611 GET_REG32(env->deba);
1612 break;
1613 case 36:
1614 GET_REG32(env->ie);
1615 break;
1616 case 37:
1617 GET_REG32(lm32_pic_get_im(env->pic_state));
1618 break;
1619 case 38:
1620 GET_REG32(lm32_pic_get_ip(env->pic_state));
1621 break;
1624 return 0;
1627 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1629 uint32_t tmp;
1631 if (n > NUM_CORE_REGS) {
1632 return 0;
1635 tmp = ldl_p(mem_buf);
1637 if (n < 32) {
1638 env->regs[n] = tmp;
1639 } else {
1640 switch (n) {
1641 case 32:
1642 env->pc = tmp;
1643 break;
1644 case 34:
1645 env->eba = tmp;
1646 break;
1647 case 35:
1648 env->deba = tmp;
1649 break;
1650 case 36:
1651 env->ie = tmp;
1652 break;
1653 case 37:
1654 lm32_pic_set_im(env->pic_state, tmp);
1655 break;
1656 case 38:
1657 lm32_pic_set_ip(env->pic_state, tmp);
1658 break;
1661 return 4;
1663 #elif defined(TARGET_XTENSA)
1665 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1666 * Use num_regs to see all registers. gdb modification is required for that:
1667 * reset bit 0 in the 'flags' field of the registers definitions in the
1668 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1670 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1671 #define num_g_regs NUM_CORE_REGS
1673 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1675 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1677 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1678 return 0;
1681 switch (reg->type) {
1682 case 9: /*pc*/
1683 GET_REG32(env->pc);
1684 break;
1686 case 1: /*ar*/
1687 xtensa_sync_phys_from_window(env);
1688 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1689 break;
1691 case 2: /*SR*/
1692 GET_REG32(env->sregs[reg->targno & 0xff]);
1693 break;
1695 case 3: /*UR*/
1696 GET_REG32(env->uregs[reg->targno & 0xff]);
1697 break;
1699 case 4: /*f*/
1700 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1701 break;
1703 case 8: /*a*/
1704 GET_REG32(env->regs[reg->targno & 0x0f]);
1705 break;
1707 default:
1708 qemu_log("%s from reg %d of unsupported type %d\n",
1709 __func__, n, reg->type);
1710 return 0;
1714 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1716 uint32_t tmp;
1717 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1719 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1720 return 0;
1723 tmp = ldl_p(mem_buf);
1725 switch (reg->type) {
1726 case 9: /*pc*/
1727 env->pc = tmp;
1728 break;
1730 case 1: /*ar*/
1731 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1732 xtensa_sync_window_from_phys(env);
1733 break;
1735 case 2: /*SR*/
1736 env->sregs[reg->targno & 0xff] = tmp;
1737 break;
1739 case 3: /*UR*/
1740 env->uregs[reg->targno & 0xff] = tmp;
1741 break;
1743 case 4: /*f*/
1744 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1745 break;
1747 case 8: /*a*/
1748 env->regs[reg->targno & 0x0f] = tmp;
1749 break;
1751 default:
1752 qemu_log("%s to reg %d of unsupported type %d\n",
1753 __func__, n, reg->type);
1754 return 0;
1757 return 4;
1759 #else
1761 #define NUM_CORE_REGS 0
1763 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1765 return 0;
1768 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1770 return 0;
1773 #endif
1775 #if !defined(TARGET_XTENSA)
1776 static int num_g_regs = NUM_CORE_REGS;
1777 #endif
1779 #ifdef GDB_CORE_XML
1780 /* Encode data using the encoding for 'x' packets. */
1781 static int memtox(char *buf, const char *mem, int len)
1783 char *p = buf;
1784 char c;
1786 while (len--) {
1787 c = *(mem++);
1788 switch (c) {
1789 case '#': case '$': case '*': case '}':
1790 *(p++) = '}';
1791 *(p++) = c ^ 0x20;
1792 break;
1793 default:
1794 *(p++) = c;
1795 break;
1798 return p - buf;
1801 static const char *get_feature_xml(const char *p, const char **newp)
1803 size_t len;
1804 int i;
1805 const char *name;
1806 static char target_xml[1024];
1808 len = 0;
1809 while (p[len] && p[len] != ':')
1810 len++;
1811 *newp = p + len;
1813 name = NULL;
1814 if (strncmp(p, "target.xml", len) == 0) {
1815 /* Generate the XML description for this CPU. */
1816 if (!target_xml[0]) {
1817 GDBRegisterState *r;
1819 snprintf(target_xml, sizeof(target_xml),
1820 "<?xml version=\"1.0\"?>"
1821 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1822 "<target>"
1823 "<xi:include href=\"%s\"/>",
1824 GDB_CORE_XML);
1826 for (r = first_cpu->gdb_regs; r; r = r->next) {
1827 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1828 pstrcat(target_xml, sizeof(target_xml), r->xml);
1829 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1831 pstrcat(target_xml, sizeof(target_xml), "</target>");
1833 return target_xml;
1835 for (i = 0; ; i++) {
1836 name = xml_builtin[i][0];
1837 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1838 break;
1840 return name ? xml_builtin[i][1] : NULL;
1842 #endif
1844 static int gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1846 GDBRegisterState *r;
1848 if (reg < NUM_CORE_REGS)
1849 return cpu_gdb_read_register(env, mem_buf, reg);
1851 for (r = env->gdb_regs; r; r = r->next) {
1852 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1853 return r->get_reg(env, mem_buf, reg - r->base_reg);
1856 return 0;
1859 static int gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int reg)
1861 GDBRegisterState *r;
1863 if (reg < NUM_CORE_REGS)
1864 return cpu_gdb_write_register(env, mem_buf, reg);
1866 for (r = env->gdb_regs; r; r = r->next) {
1867 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1868 return r->set_reg(env, mem_buf, reg - r->base_reg);
1871 return 0;
1874 #if !defined(TARGET_XTENSA)
1875 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1876 specifies the first register number and these registers are included in
1877 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1878 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1881 void gdb_register_coprocessor(CPUArchState * env,
1882 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1883 int num_regs, const char *xml, int g_pos)
1885 GDBRegisterState *s;
1886 GDBRegisterState **p;
1887 static int last_reg = NUM_CORE_REGS;
1889 p = &env->gdb_regs;
1890 while (*p) {
1891 /* Check for duplicates. */
1892 if (strcmp((*p)->xml, xml) == 0)
1893 return;
1894 p = &(*p)->next;
1897 s = g_new0(GDBRegisterState, 1);
1898 s->base_reg = last_reg;
1899 s->num_regs = num_regs;
1900 s->get_reg = get_reg;
1901 s->set_reg = set_reg;
1902 s->xml = xml;
1904 /* Add to end of list. */
1905 last_reg += num_regs;
1906 *p = s;
1907 if (g_pos) {
1908 if (g_pos != s->base_reg) {
1909 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1910 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1911 } else {
1912 num_g_regs = last_reg;
1916 #endif
1918 #ifndef CONFIG_USER_ONLY
1919 static const int xlat_gdb_type[] = {
1920 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1921 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1922 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1924 #endif
1926 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1928 CPUArchState *env;
1929 int err = 0;
1931 if (kvm_enabled())
1932 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1934 switch (type) {
1935 case GDB_BREAKPOINT_SW:
1936 case GDB_BREAKPOINT_HW:
1937 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1938 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1939 if (err)
1940 break;
1942 return err;
1943 #ifndef CONFIG_USER_ONLY
1944 case GDB_WATCHPOINT_WRITE:
1945 case GDB_WATCHPOINT_READ:
1946 case GDB_WATCHPOINT_ACCESS:
1947 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1948 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1949 NULL);
1950 if (err)
1951 break;
1953 return err;
1954 #endif
1955 default:
1956 return -ENOSYS;
1960 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1962 CPUArchState *env;
1963 int err = 0;
1965 if (kvm_enabled())
1966 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1968 switch (type) {
1969 case GDB_BREAKPOINT_SW:
1970 case GDB_BREAKPOINT_HW:
1971 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1972 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1973 if (err)
1974 break;
1976 return err;
1977 #ifndef CONFIG_USER_ONLY
1978 case GDB_WATCHPOINT_WRITE:
1979 case GDB_WATCHPOINT_READ:
1980 case GDB_WATCHPOINT_ACCESS:
1981 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1982 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
1983 if (err)
1984 break;
1986 return err;
1987 #endif
1988 default:
1989 return -ENOSYS;
1993 static void gdb_breakpoint_remove_all(void)
1995 CPUArchState *env;
1997 if (kvm_enabled()) {
1998 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
1999 return;
2002 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2003 cpu_breakpoint_remove_all(env, BP_GDB);
2004 #ifndef CONFIG_USER_ONLY
2005 cpu_watchpoint_remove_all(env, BP_GDB);
2006 #endif
2010 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2012 cpu_synchronize_state(s->c_cpu);
2013 #if defined(TARGET_I386)
2014 s->c_cpu->eip = pc;
2015 #elif defined (TARGET_PPC)
2016 s->c_cpu->nip = pc;
2017 #elif defined (TARGET_SPARC)
2018 s->c_cpu->pc = pc;
2019 s->c_cpu->npc = pc + 4;
2020 #elif defined (TARGET_ARM)
2021 s->c_cpu->regs[15] = pc;
2022 #elif defined (TARGET_SH4)
2023 s->c_cpu->pc = pc;
2024 #elif defined (TARGET_MIPS)
2025 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
2026 if (pc & 1) {
2027 s->c_cpu->hflags |= MIPS_HFLAG_M16;
2028 } else {
2029 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
2031 #elif defined (TARGET_MICROBLAZE)
2032 s->c_cpu->sregs[SR_PC] = pc;
2033 #elif defined(TARGET_OPENRISC)
2034 s->c_cpu->pc = pc;
2035 #elif defined (TARGET_CRIS)
2036 s->c_cpu->pc = pc;
2037 #elif defined (TARGET_ALPHA)
2038 s->c_cpu->pc = pc;
2039 #elif defined (TARGET_S390X)
2040 s->c_cpu->psw.addr = pc;
2041 #elif defined (TARGET_LM32)
2042 s->c_cpu->pc = pc;
2043 #elif defined(TARGET_XTENSA)
2044 s->c_cpu->pc = pc;
2045 #endif
2048 static CPUArchState *find_cpu(uint32_t thread_id)
2050 CPUArchState *env;
2052 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2053 if (cpu_index(env) == thread_id) {
2054 return env;
2058 return NULL;
2061 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2063 CPUArchState *env;
2064 const char *p;
2065 uint32_t thread;
2066 int ch, reg_size, type, res;
2067 char buf[MAX_PACKET_LENGTH];
2068 uint8_t mem_buf[MAX_PACKET_LENGTH];
2069 uint8_t *registers;
2070 target_ulong addr, len;
2072 #ifdef DEBUG_GDB
2073 printf("command='%s'\n", line_buf);
2074 #endif
2075 p = line_buf;
2076 ch = *p++;
2077 switch(ch) {
2078 case '?':
2079 /* TODO: Make this return the correct value for user-mode. */
2080 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2081 cpu_index(s->c_cpu));
2082 put_packet(s, buf);
2083 /* Remove all the breakpoints when this query is issued,
2084 * because gdb is doing and initial connect and the state
2085 * should be cleaned up.
2087 gdb_breakpoint_remove_all();
2088 break;
2089 case 'c':
2090 if (*p != '\0') {
2091 addr = strtoull(p, (char **)&p, 16);
2092 gdb_set_cpu_pc(s, addr);
2094 s->signal = 0;
2095 gdb_continue(s);
2096 return RS_IDLE;
2097 case 'C':
2098 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2099 if (s->signal == -1)
2100 s->signal = 0;
2101 gdb_continue(s);
2102 return RS_IDLE;
2103 case 'v':
2104 if (strncmp(p, "Cont", 4) == 0) {
2105 int res_signal, res_thread;
2107 p += 4;
2108 if (*p == '?') {
2109 put_packet(s, "vCont;c;C;s;S");
2110 break;
2112 res = 0;
2113 res_signal = 0;
2114 res_thread = 0;
2115 while (*p) {
2116 int action, signal;
2118 if (*p++ != ';') {
2119 res = 0;
2120 break;
2122 action = *p++;
2123 signal = 0;
2124 if (action == 'C' || action == 'S') {
2125 signal = strtoul(p, (char **)&p, 16);
2126 } else if (action != 'c' && action != 's') {
2127 res = 0;
2128 break;
2130 thread = 0;
2131 if (*p == ':') {
2132 thread = strtoull(p+1, (char **)&p, 16);
2134 action = tolower(action);
2135 if (res == 0 || (res == 'c' && action == 's')) {
2136 res = action;
2137 res_signal = signal;
2138 res_thread = thread;
2141 if (res) {
2142 if (res_thread != -1 && res_thread != 0) {
2143 env = find_cpu(res_thread);
2144 if (env == NULL) {
2145 put_packet(s, "E22");
2146 break;
2148 s->c_cpu = env;
2150 if (res == 's') {
2151 cpu_single_step(s->c_cpu, sstep_flags);
2153 s->signal = res_signal;
2154 gdb_continue(s);
2155 return RS_IDLE;
2157 break;
2158 } else {
2159 goto unknown_command;
2161 case 'k':
2162 #ifdef CONFIG_USER_ONLY
2163 /* Kill the target */
2164 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2165 exit(0);
2166 #endif
2167 case 'D':
2168 /* Detach packet */
2169 gdb_breakpoint_remove_all();
2170 gdb_syscall_mode = GDB_SYS_DISABLED;
2171 gdb_continue(s);
2172 put_packet(s, "OK");
2173 break;
2174 case 's':
2175 if (*p != '\0') {
2176 addr = strtoull(p, (char **)&p, 16);
2177 gdb_set_cpu_pc(s, addr);
2179 cpu_single_step(s->c_cpu, sstep_flags);
2180 gdb_continue(s);
2181 return RS_IDLE;
2182 case 'F':
2184 target_ulong ret;
2185 target_ulong err;
2187 ret = strtoull(p, (char **)&p, 16);
2188 if (*p == ',') {
2189 p++;
2190 err = strtoull(p, (char **)&p, 16);
2191 } else {
2192 err = 0;
2194 if (*p == ',')
2195 p++;
2196 type = *p;
2197 if (s->current_syscall_cb) {
2198 s->current_syscall_cb(s->c_cpu, ret, err);
2199 s->current_syscall_cb = NULL;
2201 if (type == 'C') {
2202 put_packet(s, "T02");
2203 } else {
2204 gdb_continue(s);
2207 break;
2208 case 'g':
2209 cpu_synchronize_state(s->g_cpu);
2210 env = s->g_cpu;
2211 len = 0;
2212 for (addr = 0; addr < num_g_regs; addr++) {
2213 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2214 len += reg_size;
2216 memtohex(buf, mem_buf, len);
2217 put_packet(s, buf);
2218 break;
2219 case 'G':
2220 cpu_synchronize_state(s->g_cpu);
2221 env = s->g_cpu;
2222 registers = mem_buf;
2223 len = strlen(p) / 2;
2224 hextomem((uint8_t *)registers, p, len);
2225 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2226 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2227 len -= reg_size;
2228 registers += reg_size;
2230 put_packet(s, "OK");
2231 break;
2232 case 'm':
2233 addr = strtoull(p, (char **)&p, 16);
2234 if (*p == ',')
2235 p++;
2236 len = strtoull(p, NULL, 16);
2237 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
2238 put_packet (s, "E14");
2239 } else {
2240 memtohex(buf, mem_buf, len);
2241 put_packet(s, buf);
2243 break;
2244 case 'M':
2245 addr = strtoull(p, (char **)&p, 16);
2246 if (*p == ',')
2247 p++;
2248 len = strtoull(p, (char **)&p, 16);
2249 if (*p == ':')
2250 p++;
2251 hextomem(mem_buf, p, len);
2252 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0) {
2253 put_packet(s, "E14");
2254 } else {
2255 put_packet(s, "OK");
2257 break;
2258 case 'p':
2259 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2260 This works, but can be very slow. Anything new enough to
2261 understand XML also knows how to use this properly. */
2262 if (!gdb_has_xml)
2263 goto unknown_command;
2264 addr = strtoull(p, (char **)&p, 16);
2265 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2266 if (reg_size) {
2267 memtohex(buf, mem_buf, reg_size);
2268 put_packet(s, buf);
2269 } else {
2270 put_packet(s, "E14");
2272 break;
2273 case 'P':
2274 if (!gdb_has_xml)
2275 goto unknown_command;
2276 addr = strtoull(p, (char **)&p, 16);
2277 if (*p == '=')
2278 p++;
2279 reg_size = strlen(p) / 2;
2280 hextomem(mem_buf, p, reg_size);
2281 gdb_write_register(s->g_cpu, mem_buf, addr);
2282 put_packet(s, "OK");
2283 break;
2284 case 'Z':
2285 case 'z':
2286 type = strtoul(p, (char **)&p, 16);
2287 if (*p == ',')
2288 p++;
2289 addr = strtoull(p, (char **)&p, 16);
2290 if (*p == ',')
2291 p++;
2292 len = strtoull(p, (char **)&p, 16);
2293 if (ch == 'Z')
2294 res = gdb_breakpoint_insert(addr, len, type);
2295 else
2296 res = gdb_breakpoint_remove(addr, len, type);
2297 if (res >= 0)
2298 put_packet(s, "OK");
2299 else if (res == -ENOSYS)
2300 put_packet(s, "");
2301 else
2302 put_packet(s, "E22");
2303 break;
2304 case 'H':
2305 type = *p++;
2306 thread = strtoull(p, (char **)&p, 16);
2307 if (thread == -1 || thread == 0) {
2308 put_packet(s, "OK");
2309 break;
2311 env = find_cpu(thread);
2312 if (env == NULL) {
2313 put_packet(s, "E22");
2314 break;
2316 switch (type) {
2317 case 'c':
2318 s->c_cpu = env;
2319 put_packet(s, "OK");
2320 break;
2321 case 'g':
2322 s->g_cpu = env;
2323 put_packet(s, "OK");
2324 break;
2325 default:
2326 put_packet(s, "E22");
2327 break;
2329 break;
2330 case 'T':
2331 thread = strtoull(p, (char **)&p, 16);
2332 env = find_cpu(thread);
2334 if (env != NULL) {
2335 put_packet(s, "OK");
2336 } else {
2337 put_packet(s, "E22");
2339 break;
2340 case 'q':
2341 case 'Q':
2342 /* parse any 'q' packets here */
2343 if (!strcmp(p,"qemu.sstepbits")) {
2344 /* Query Breakpoint bit definitions */
2345 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2346 SSTEP_ENABLE,
2347 SSTEP_NOIRQ,
2348 SSTEP_NOTIMER);
2349 put_packet(s, buf);
2350 break;
2351 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2352 /* Display or change the sstep_flags */
2353 p += 10;
2354 if (*p != '=') {
2355 /* Display current setting */
2356 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2357 put_packet(s, buf);
2358 break;
2360 p++;
2361 type = strtoul(p, (char **)&p, 16);
2362 sstep_flags = type;
2363 put_packet(s, "OK");
2364 break;
2365 } else if (strcmp(p,"C") == 0) {
2366 /* "Current thread" remains vague in the spec, so always return
2367 * the first CPU (gdb returns the first thread). */
2368 put_packet(s, "QC1");
2369 break;
2370 } else if (strcmp(p,"fThreadInfo") == 0) {
2371 s->query_cpu = first_cpu;
2372 goto report_cpuinfo;
2373 } else if (strcmp(p,"sThreadInfo") == 0) {
2374 report_cpuinfo:
2375 if (s->query_cpu) {
2376 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2377 put_packet(s, buf);
2378 s->query_cpu = s->query_cpu->next_cpu;
2379 } else
2380 put_packet(s, "l");
2381 break;
2382 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2383 thread = strtoull(p+16, (char **)&p, 16);
2384 env = find_cpu(thread);
2385 if (env != NULL) {
2386 cpu_synchronize_state(env);
2387 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2388 "CPU#%d [%s]", env->cpu_index,
2389 env->halted ? "halted " : "running");
2390 memtohex(buf, mem_buf, len);
2391 put_packet(s, buf);
2393 break;
2395 #ifdef CONFIG_USER_ONLY
2396 else if (strncmp(p, "Offsets", 7) == 0) {
2397 TaskState *ts = s->c_cpu->opaque;
2399 snprintf(buf, sizeof(buf),
2400 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2401 ";Bss=" TARGET_ABI_FMT_lx,
2402 ts->info->code_offset,
2403 ts->info->data_offset,
2404 ts->info->data_offset);
2405 put_packet(s, buf);
2406 break;
2408 #else /* !CONFIG_USER_ONLY */
2409 else if (strncmp(p, "Rcmd,", 5) == 0) {
2410 int len = strlen(p + 5);
2412 if ((len % 2) != 0) {
2413 put_packet(s, "E01");
2414 break;
2416 hextomem(mem_buf, p + 5, len);
2417 len = len / 2;
2418 mem_buf[len++] = 0;
2419 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2420 put_packet(s, "OK");
2421 break;
2423 #endif /* !CONFIG_USER_ONLY */
2424 if (strncmp(p, "Supported", 9) == 0) {
2425 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2426 #ifdef GDB_CORE_XML
2427 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2428 #endif
2429 put_packet(s, buf);
2430 break;
2432 #ifdef GDB_CORE_XML
2433 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2434 const char *xml;
2435 target_ulong total_len;
2437 gdb_has_xml = 1;
2438 p += 19;
2439 xml = get_feature_xml(p, &p);
2440 if (!xml) {
2441 snprintf(buf, sizeof(buf), "E00");
2442 put_packet(s, buf);
2443 break;
2446 if (*p == ':')
2447 p++;
2448 addr = strtoul(p, (char **)&p, 16);
2449 if (*p == ',')
2450 p++;
2451 len = strtoul(p, (char **)&p, 16);
2453 total_len = strlen(xml);
2454 if (addr > total_len) {
2455 snprintf(buf, sizeof(buf), "E00");
2456 put_packet(s, buf);
2457 break;
2459 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2460 len = (MAX_PACKET_LENGTH - 5) / 2;
2461 if (len < total_len - addr) {
2462 buf[0] = 'm';
2463 len = memtox(buf + 1, xml + addr, len);
2464 } else {
2465 buf[0] = 'l';
2466 len = memtox(buf + 1, xml + addr, total_len - addr);
2468 put_packet_binary(s, buf, len + 1);
2469 break;
2471 #endif
2472 /* Unrecognised 'q' command. */
2473 goto unknown_command;
2475 default:
2476 unknown_command:
2477 /* put empty packet */
2478 buf[0] = '\0';
2479 put_packet(s, buf);
2480 break;
2482 return RS_IDLE;
2485 void gdb_set_stop_cpu(CPUArchState *env)
2487 gdbserver_state->c_cpu = env;
2488 gdbserver_state->g_cpu = env;
2491 #ifndef CONFIG_USER_ONLY
2492 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2494 GDBState *s = gdbserver_state;
2495 CPUArchState *env = s->c_cpu;
2496 char buf[256];
2497 const char *type;
2498 int ret;
2500 if (running || s->state == RS_INACTIVE) {
2501 return;
2503 /* Is there a GDB syscall waiting to be sent? */
2504 if (s->current_syscall_cb) {
2505 put_packet(s, s->syscall_buf);
2506 return;
2508 switch (state) {
2509 case RUN_STATE_DEBUG:
2510 if (env->watchpoint_hit) {
2511 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2512 case BP_MEM_READ:
2513 type = "r";
2514 break;
2515 case BP_MEM_ACCESS:
2516 type = "a";
2517 break;
2518 default:
2519 type = "";
2520 break;
2522 snprintf(buf, sizeof(buf),
2523 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2524 GDB_SIGNAL_TRAP, cpu_index(env), type,
2525 env->watchpoint_hit->vaddr);
2526 env->watchpoint_hit = NULL;
2527 goto send_packet;
2529 tb_flush(env);
2530 ret = GDB_SIGNAL_TRAP;
2531 break;
2532 case RUN_STATE_PAUSED:
2533 ret = GDB_SIGNAL_INT;
2534 break;
2535 case RUN_STATE_SHUTDOWN:
2536 ret = GDB_SIGNAL_QUIT;
2537 break;
2538 case RUN_STATE_IO_ERROR:
2539 ret = GDB_SIGNAL_IO;
2540 break;
2541 case RUN_STATE_WATCHDOG:
2542 ret = GDB_SIGNAL_ALRM;
2543 break;
2544 case RUN_STATE_INTERNAL_ERROR:
2545 ret = GDB_SIGNAL_ABRT;
2546 break;
2547 case RUN_STATE_SAVE_VM:
2548 case RUN_STATE_RESTORE_VM:
2549 return;
2550 case RUN_STATE_FINISH_MIGRATE:
2551 ret = GDB_SIGNAL_XCPU;
2552 break;
2553 default:
2554 ret = GDB_SIGNAL_UNKNOWN;
2555 break;
2557 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(env));
2559 send_packet:
2560 put_packet(s, buf);
2562 /* disable single step if it was enabled */
2563 cpu_single_step(env, 0);
2565 #endif
2567 /* Send a gdb syscall request.
2568 This accepts limited printf-style format specifiers, specifically:
2569 %x - target_ulong argument printed in hex.
2570 %lx - 64-bit argument printed in hex.
2571 %s - string pointer (target_ulong) and length (int) pair. */
2572 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2574 va_list va;
2575 char *p;
2576 char *p_end;
2577 target_ulong addr;
2578 uint64_t i64;
2579 GDBState *s;
2581 s = gdbserver_state;
2582 if (!s)
2583 return;
2584 s->current_syscall_cb = cb;
2585 #ifndef CONFIG_USER_ONLY
2586 vm_stop(RUN_STATE_DEBUG);
2587 #endif
2588 va_start(va, fmt);
2589 p = s->syscall_buf;
2590 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2591 *(p++) = 'F';
2592 while (*fmt) {
2593 if (*fmt == '%') {
2594 fmt++;
2595 switch (*fmt++) {
2596 case 'x':
2597 addr = va_arg(va, target_ulong);
2598 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2599 break;
2600 case 'l':
2601 if (*(fmt++) != 'x')
2602 goto bad_format;
2603 i64 = va_arg(va, uint64_t);
2604 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2605 break;
2606 case 's':
2607 addr = va_arg(va, target_ulong);
2608 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2609 addr, va_arg(va, int));
2610 break;
2611 default:
2612 bad_format:
2613 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2614 fmt - 1);
2615 break;
2617 } else {
2618 *(p++) = *(fmt++);
2621 *p = 0;
2622 va_end(va);
2623 #ifdef CONFIG_USER_ONLY
2624 put_packet(s, s->syscall_buf);
2625 gdb_handlesig(s->c_cpu, 0);
2626 #else
2627 /* In this case wait to send the syscall packet until notification that
2628 the CPU has stopped. This must be done because if the packet is sent
2629 now the reply from the syscall request could be received while the CPU
2630 is still in the running state, which can cause packets to be dropped
2631 and state transition 'T' packets to be sent while the syscall is still
2632 being processed. */
2633 cpu_exit(s->c_cpu);
2634 #endif
2637 static void gdb_read_byte(GDBState *s, int ch)
2639 int i, csum;
2640 uint8_t reply;
2642 #ifndef CONFIG_USER_ONLY
2643 if (s->last_packet_len) {
2644 /* Waiting for a response to the last packet. If we see the start
2645 of a new command then abandon the previous response. */
2646 if (ch == '-') {
2647 #ifdef DEBUG_GDB
2648 printf("Got NACK, retransmitting\n");
2649 #endif
2650 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2652 #ifdef DEBUG_GDB
2653 else if (ch == '+')
2654 printf("Got ACK\n");
2655 else
2656 printf("Got '%c' when expecting ACK/NACK\n", ch);
2657 #endif
2658 if (ch == '+' || ch == '$')
2659 s->last_packet_len = 0;
2660 if (ch != '$')
2661 return;
2663 if (runstate_is_running()) {
2664 /* when the CPU is running, we cannot do anything except stop
2665 it when receiving a char */
2666 vm_stop(RUN_STATE_PAUSED);
2667 } else
2668 #endif
2670 switch(s->state) {
2671 case RS_IDLE:
2672 if (ch == '$') {
2673 s->line_buf_index = 0;
2674 s->state = RS_GETLINE;
2676 break;
2677 case RS_GETLINE:
2678 if (ch == '#') {
2679 s->state = RS_CHKSUM1;
2680 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2681 s->state = RS_IDLE;
2682 } else {
2683 s->line_buf[s->line_buf_index++] = ch;
2685 break;
2686 case RS_CHKSUM1:
2687 s->line_buf[s->line_buf_index] = '\0';
2688 s->line_csum = fromhex(ch) << 4;
2689 s->state = RS_CHKSUM2;
2690 break;
2691 case RS_CHKSUM2:
2692 s->line_csum |= fromhex(ch);
2693 csum = 0;
2694 for(i = 0; i < s->line_buf_index; i++) {
2695 csum += s->line_buf[i];
2697 if (s->line_csum != (csum & 0xff)) {
2698 reply = '-';
2699 put_buffer(s, &reply, 1);
2700 s->state = RS_IDLE;
2701 } else {
2702 reply = '+';
2703 put_buffer(s, &reply, 1);
2704 s->state = gdb_handle_packet(s, s->line_buf);
2706 break;
2707 default:
2708 abort();
2713 /* Tell the remote gdb that the process has exited. */
2714 void gdb_exit(CPUArchState *env, int code)
2716 GDBState *s;
2717 char buf[4];
2719 s = gdbserver_state;
2720 if (!s) {
2721 return;
2723 #ifdef CONFIG_USER_ONLY
2724 if (gdbserver_fd < 0 || s->fd < 0) {
2725 return;
2727 #endif
2729 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2730 put_packet(s, buf);
2732 #ifndef CONFIG_USER_ONLY
2733 if (s->chr) {
2734 qemu_chr_delete(s->chr);
2736 #endif
2739 #ifdef CONFIG_USER_ONLY
2741 gdb_queuesig (void)
2743 GDBState *s;
2745 s = gdbserver_state;
2747 if (gdbserver_fd < 0 || s->fd < 0)
2748 return 0;
2749 else
2750 return 1;
2754 gdb_handlesig (CPUArchState *env, int sig)
2756 GDBState *s;
2757 char buf[256];
2758 int n;
2760 s = gdbserver_state;
2761 if (gdbserver_fd < 0 || s->fd < 0)
2762 return sig;
2764 /* disable single step if it was enabled */
2765 cpu_single_step(env, 0);
2766 tb_flush(env);
2768 if (sig != 0)
2770 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2771 put_packet(s, buf);
2773 /* put_packet() might have detected that the peer terminated the
2774 connection. */
2775 if (s->fd < 0)
2776 return sig;
2778 sig = 0;
2779 s->state = RS_IDLE;
2780 s->running_state = 0;
2781 while (s->running_state == 0) {
2782 n = read (s->fd, buf, 256);
2783 if (n > 0)
2785 int i;
2787 for (i = 0; i < n; i++)
2788 gdb_read_byte (s, buf[i]);
2790 else if (n == 0 || errno != EAGAIN)
2792 /* XXX: Connection closed. Should probably wait for another
2793 connection before continuing. */
2794 return sig;
2797 sig = s->signal;
2798 s->signal = 0;
2799 return sig;
2802 /* Tell the remote gdb that the process has exited due to SIG. */
2803 void gdb_signalled(CPUArchState *env, int sig)
2805 GDBState *s;
2806 char buf[4];
2808 s = gdbserver_state;
2809 if (gdbserver_fd < 0 || s->fd < 0)
2810 return;
2812 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2813 put_packet(s, buf);
2816 static void gdb_accept(void)
2818 GDBState *s;
2819 struct sockaddr_in sockaddr;
2820 socklen_t len;
2821 int val, fd;
2823 for(;;) {
2824 len = sizeof(sockaddr);
2825 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2826 if (fd < 0 && errno != EINTR) {
2827 perror("accept");
2828 return;
2829 } else if (fd >= 0) {
2830 #ifndef _WIN32
2831 fcntl(fd, F_SETFD, FD_CLOEXEC);
2832 #endif
2833 break;
2837 /* set short latency */
2838 val = 1;
2839 setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *)&val, sizeof(val));
2841 s = g_malloc0(sizeof(GDBState));
2842 s->c_cpu = first_cpu;
2843 s->g_cpu = first_cpu;
2844 s->fd = fd;
2845 gdb_has_xml = 0;
2847 gdbserver_state = s;
2849 fcntl(fd, F_SETFL, O_NONBLOCK);
2852 static int gdbserver_open(int port)
2854 struct sockaddr_in sockaddr;
2855 int fd, val, ret;
2857 fd = socket(PF_INET, SOCK_STREAM, 0);
2858 if (fd < 0) {
2859 perror("socket");
2860 return -1;
2862 #ifndef _WIN32
2863 fcntl(fd, F_SETFD, FD_CLOEXEC);
2864 #endif
2866 /* allow fast reuse */
2867 val = 1;
2868 setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val));
2870 sockaddr.sin_family = AF_INET;
2871 sockaddr.sin_port = htons(port);
2872 sockaddr.sin_addr.s_addr = 0;
2873 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2874 if (ret < 0) {
2875 perror("bind");
2876 close(fd);
2877 return -1;
2879 ret = listen(fd, 0);
2880 if (ret < 0) {
2881 perror("listen");
2882 close(fd);
2883 return -1;
2885 return fd;
2888 int gdbserver_start(int port)
2890 gdbserver_fd = gdbserver_open(port);
2891 if (gdbserver_fd < 0)
2892 return -1;
2893 /* accept connections */
2894 gdb_accept();
2895 return 0;
2898 /* Disable gdb stub for child processes. */
2899 void gdbserver_fork(CPUArchState *env)
2901 GDBState *s = gdbserver_state;
2902 if (gdbserver_fd < 0 || s->fd < 0)
2903 return;
2904 close(s->fd);
2905 s->fd = -1;
2906 cpu_breakpoint_remove_all(env, BP_GDB);
2907 cpu_watchpoint_remove_all(env, BP_GDB);
2909 #else
2910 static int gdb_chr_can_receive(void *opaque)
2912 /* We can handle an arbitrarily large amount of data.
2913 Pick the maximum packet size, which is as good as anything. */
2914 return MAX_PACKET_LENGTH;
2917 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2919 int i;
2921 for (i = 0; i < size; i++) {
2922 gdb_read_byte(gdbserver_state, buf[i]);
2926 static void gdb_chr_event(void *opaque, int event)
2928 switch (event) {
2929 case CHR_EVENT_OPENED:
2930 vm_stop(RUN_STATE_PAUSED);
2931 gdb_has_xml = 0;
2932 break;
2933 default:
2934 break;
2938 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2940 char buf[MAX_PACKET_LENGTH];
2942 buf[0] = 'O';
2943 if (len > (MAX_PACKET_LENGTH/2) - 1)
2944 len = (MAX_PACKET_LENGTH/2) - 1;
2945 memtohex(buf + 1, (uint8_t *)msg, len);
2946 put_packet(s, buf);
2949 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2951 const char *p = (const char *)buf;
2952 int max_sz;
2954 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2955 for (;;) {
2956 if (len <= max_sz) {
2957 gdb_monitor_output(gdbserver_state, p, len);
2958 break;
2960 gdb_monitor_output(gdbserver_state, p, max_sz);
2961 p += max_sz;
2962 len -= max_sz;
2964 return len;
2967 #ifndef _WIN32
2968 static void gdb_sigterm_handler(int signal)
2970 if (runstate_is_running()) {
2971 vm_stop(RUN_STATE_PAUSED);
2974 #endif
2976 int gdbserver_start(const char *device)
2978 GDBState *s;
2979 char gdbstub_device_name[128];
2980 CharDriverState *chr = NULL;
2981 CharDriverState *mon_chr;
2983 if (!device)
2984 return -1;
2985 if (strcmp(device, "none") != 0) {
2986 if (strstart(device, "tcp:", NULL)) {
2987 /* enforce required TCP attributes */
2988 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
2989 "%s,nowait,nodelay,server", device);
2990 device = gdbstub_device_name;
2992 #ifndef _WIN32
2993 else if (strcmp(device, "stdio") == 0) {
2994 struct sigaction act;
2996 memset(&act, 0, sizeof(act));
2997 act.sa_handler = gdb_sigterm_handler;
2998 sigaction(SIGINT, &act, NULL);
3000 #endif
3001 chr = qemu_chr_new("gdb", device, NULL);
3002 if (!chr)
3003 return -1;
3005 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3006 gdb_chr_event, NULL);
3009 s = gdbserver_state;
3010 if (!s) {
3011 s = g_malloc0(sizeof(GDBState));
3012 gdbserver_state = s;
3014 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3016 /* Initialize a monitor terminal for gdb */
3017 mon_chr = g_malloc0(sizeof(*mon_chr));
3018 mon_chr->chr_write = gdb_monitor_write;
3019 monitor_init(mon_chr, 0);
3020 } else {
3021 if (s->chr)
3022 qemu_chr_delete(s->chr);
3023 mon_chr = s->mon_chr;
3024 memset(s, 0, sizeof(GDBState));
3026 s->c_cpu = first_cpu;
3027 s->g_cpu = first_cpu;
3028 s->chr = chr;
3029 s->state = chr ? RS_IDLE : RS_INACTIVE;
3030 s->mon_chr = mon_chr;
3031 s->current_syscall_cb = NULL;
3033 return 0;
3035 #endif