memory: pass MemoryRegion to access_with_adjusted_size
[qemu/ar7.git] / gdbstub.c
blob35ca7c2c1e7d134cf18dab7af4d4ec78f242483e
1 /*
2 * gdb server stub
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
30 #include "qemu.h"
31 #else
32 #include "monitor/monitor.h"
33 #include "sysemu/char.h"
34 #include "sysemu/sysemu.h"
35 #include "exec/gdbstub.h"
36 #endif
38 #define MAX_PACKET_LENGTH 4096
40 #include "cpu.h"
41 #include "qemu/sockets.h"
42 #include "sysemu/kvm.h"
43 #include "qemu/bitops.h"
45 static inline int target_memory_rw_debug(CPUState *cpu, target_ulong addr,
46 uint8_t *buf, int len, bool is_write)
48 CPUClass *cc = CPU_GET_CLASS(cpu);
50 if (cc->memory_rw_debug) {
51 return cc->memory_rw_debug(cpu, addr, buf, len, is_write);
53 return cpu_memory_rw_debug(cpu, addr, buf, len, is_write);
56 enum {
57 GDB_SIGNAL_0 = 0,
58 GDB_SIGNAL_INT = 2,
59 GDB_SIGNAL_QUIT = 3,
60 GDB_SIGNAL_TRAP = 5,
61 GDB_SIGNAL_ABRT = 6,
62 GDB_SIGNAL_ALRM = 14,
63 GDB_SIGNAL_IO = 23,
64 GDB_SIGNAL_XCPU = 24,
65 GDB_SIGNAL_UNKNOWN = 143
68 #ifdef CONFIG_USER_ONLY
70 /* Map target signal numbers to GDB protocol signal numbers and vice
71 * versa. For user emulation's currently supported systems, we can
72 * assume most signals are defined.
75 static int gdb_signal_table[] = {
77 TARGET_SIGHUP,
78 TARGET_SIGINT,
79 TARGET_SIGQUIT,
80 TARGET_SIGILL,
81 TARGET_SIGTRAP,
82 TARGET_SIGABRT,
83 -1, /* SIGEMT */
84 TARGET_SIGFPE,
85 TARGET_SIGKILL,
86 TARGET_SIGBUS,
87 TARGET_SIGSEGV,
88 TARGET_SIGSYS,
89 TARGET_SIGPIPE,
90 TARGET_SIGALRM,
91 TARGET_SIGTERM,
92 TARGET_SIGURG,
93 TARGET_SIGSTOP,
94 TARGET_SIGTSTP,
95 TARGET_SIGCONT,
96 TARGET_SIGCHLD,
97 TARGET_SIGTTIN,
98 TARGET_SIGTTOU,
99 TARGET_SIGIO,
100 TARGET_SIGXCPU,
101 TARGET_SIGXFSZ,
102 TARGET_SIGVTALRM,
103 TARGET_SIGPROF,
104 TARGET_SIGWINCH,
105 -1, /* SIGLOST */
106 TARGET_SIGUSR1,
107 TARGET_SIGUSR2,
108 #ifdef TARGET_SIGPWR
109 TARGET_SIGPWR,
110 #else
112 #endif
113 -1, /* SIGPOLL */
125 #ifdef __SIGRTMIN
126 __SIGRTMIN + 1,
127 __SIGRTMIN + 2,
128 __SIGRTMIN + 3,
129 __SIGRTMIN + 4,
130 __SIGRTMIN + 5,
131 __SIGRTMIN + 6,
132 __SIGRTMIN + 7,
133 __SIGRTMIN + 8,
134 __SIGRTMIN + 9,
135 __SIGRTMIN + 10,
136 __SIGRTMIN + 11,
137 __SIGRTMIN + 12,
138 __SIGRTMIN + 13,
139 __SIGRTMIN + 14,
140 __SIGRTMIN + 15,
141 __SIGRTMIN + 16,
142 __SIGRTMIN + 17,
143 __SIGRTMIN + 18,
144 __SIGRTMIN + 19,
145 __SIGRTMIN + 20,
146 __SIGRTMIN + 21,
147 __SIGRTMIN + 22,
148 __SIGRTMIN + 23,
149 __SIGRTMIN + 24,
150 __SIGRTMIN + 25,
151 __SIGRTMIN + 26,
152 __SIGRTMIN + 27,
153 __SIGRTMIN + 28,
154 __SIGRTMIN + 29,
155 __SIGRTMIN + 30,
156 __SIGRTMIN + 31,
157 -1, /* SIGCANCEL */
158 __SIGRTMIN,
159 __SIGRTMIN + 32,
160 __SIGRTMIN + 33,
161 __SIGRTMIN + 34,
162 __SIGRTMIN + 35,
163 __SIGRTMIN + 36,
164 __SIGRTMIN + 37,
165 __SIGRTMIN + 38,
166 __SIGRTMIN + 39,
167 __SIGRTMIN + 40,
168 __SIGRTMIN + 41,
169 __SIGRTMIN + 42,
170 __SIGRTMIN + 43,
171 __SIGRTMIN + 44,
172 __SIGRTMIN + 45,
173 __SIGRTMIN + 46,
174 __SIGRTMIN + 47,
175 __SIGRTMIN + 48,
176 __SIGRTMIN + 49,
177 __SIGRTMIN + 50,
178 __SIGRTMIN + 51,
179 __SIGRTMIN + 52,
180 __SIGRTMIN + 53,
181 __SIGRTMIN + 54,
182 __SIGRTMIN + 55,
183 __SIGRTMIN + 56,
184 __SIGRTMIN + 57,
185 __SIGRTMIN + 58,
186 __SIGRTMIN + 59,
187 __SIGRTMIN + 60,
188 __SIGRTMIN + 61,
189 __SIGRTMIN + 62,
190 __SIGRTMIN + 63,
191 __SIGRTMIN + 64,
192 __SIGRTMIN + 65,
193 __SIGRTMIN + 66,
194 __SIGRTMIN + 67,
195 __SIGRTMIN + 68,
196 __SIGRTMIN + 69,
197 __SIGRTMIN + 70,
198 __SIGRTMIN + 71,
199 __SIGRTMIN + 72,
200 __SIGRTMIN + 73,
201 __SIGRTMIN + 74,
202 __SIGRTMIN + 75,
203 __SIGRTMIN + 76,
204 __SIGRTMIN + 77,
205 __SIGRTMIN + 78,
206 __SIGRTMIN + 79,
207 __SIGRTMIN + 80,
208 __SIGRTMIN + 81,
209 __SIGRTMIN + 82,
210 __SIGRTMIN + 83,
211 __SIGRTMIN + 84,
212 __SIGRTMIN + 85,
213 __SIGRTMIN + 86,
214 __SIGRTMIN + 87,
215 __SIGRTMIN + 88,
216 __SIGRTMIN + 89,
217 __SIGRTMIN + 90,
218 __SIGRTMIN + 91,
219 __SIGRTMIN + 92,
220 __SIGRTMIN + 93,
221 __SIGRTMIN + 94,
222 __SIGRTMIN + 95,
223 -1, /* SIGINFO */
224 -1, /* UNKNOWN */
225 -1, /* DEFAULT */
232 #endif
234 #else
235 /* In system mode we only need SIGINT and SIGTRAP; other signals
236 are not yet supported. */
238 enum {
239 TARGET_SIGINT = 2,
240 TARGET_SIGTRAP = 5
243 static int gdb_signal_table[] = {
246 TARGET_SIGINT,
249 TARGET_SIGTRAP
251 #endif
253 #ifdef CONFIG_USER_ONLY
254 static int target_signal_to_gdb (int sig)
256 int i;
257 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
258 if (gdb_signal_table[i] == sig)
259 return i;
260 return GDB_SIGNAL_UNKNOWN;
262 #endif
264 static int gdb_signal_to_target (int sig)
266 if (sig < ARRAY_SIZE (gdb_signal_table))
267 return gdb_signal_table[sig];
268 else
269 return -1;
272 //#define DEBUG_GDB
274 typedef struct GDBRegisterState {
275 int base_reg;
276 int num_regs;
277 gdb_reg_cb get_reg;
278 gdb_reg_cb set_reg;
279 const char *xml;
280 struct GDBRegisterState *next;
281 } GDBRegisterState;
283 enum RSState {
284 RS_INACTIVE,
285 RS_IDLE,
286 RS_GETLINE,
287 RS_CHKSUM1,
288 RS_CHKSUM2,
290 typedef struct GDBState {
291 CPUState *c_cpu; /* current CPU for step/continue ops */
292 CPUState *g_cpu; /* current CPU for other ops */
293 CPUState *query_cpu; /* for q{f|s}ThreadInfo */
294 enum RSState state; /* parsing state */
295 char line_buf[MAX_PACKET_LENGTH];
296 int line_buf_index;
297 int line_csum;
298 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
299 int last_packet_len;
300 int signal;
301 #ifdef CONFIG_USER_ONLY
302 int fd;
303 int running_state;
304 #else
305 CharDriverState *chr;
306 CharDriverState *mon_chr;
307 #endif
308 char syscall_buf[256];
309 gdb_syscall_complete_cb current_syscall_cb;
310 } GDBState;
312 /* By default use no IRQs and no timers while single stepping so as to
313 * make single stepping like an ICE HW step.
315 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
317 static GDBState *gdbserver_state;
319 /* This is an ugly hack to cope with both new and old gdb.
320 If gdb sends qXfer:features:read then assume we're talking to a newish
321 gdb that understands target descriptions. */
322 static int gdb_has_xml;
324 #ifdef CONFIG_USER_ONLY
325 /* XXX: This is not thread safe. Do we care? */
326 static int gdbserver_fd = -1;
328 static int get_char(GDBState *s)
330 uint8_t ch;
331 int ret;
333 for(;;) {
334 ret = qemu_recv(s->fd, &ch, 1, 0);
335 if (ret < 0) {
336 if (errno == ECONNRESET)
337 s->fd = -1;
338 if (errno != EINTR && errno != EAGAIN)
339 return -1;
340 } else if (ret == 0) {
341 close(s->fd);
342 s->fd = -1;
343 return -1;
344 } else {
345 break;
348 return ch;
350 #endif
352 static enum {
353 GDB_SYS_UNKNOWN,
354 GDB_SYS_ENABLED,
355 GDB_SYS_DISABLED,
356 } gdb_syscall_mode;
358 /* If gdb is connected when the first semihosting syscall occurs then use
359 remote gdb syscalls. Otherwise use native file IO. */
360 int use_gdb_syscalls(void)
362 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
363 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
364 : GDB_SYS_DISABLED);
366 return gdb_syscall_mode == GDB_SYS_ENABLED;
369 /* Resume execution. */
370 static inline void gdb_continue(GDBState *s)
372 #ifdef CONFIG_USER_ONLY
373 s->running_state = 1;
374 #else
375 if (runstate_check(RUN_STATE_GUEST_PANICKED)) {
376 runstate_set(RUN_STATE_DEBUG);
378 if (!runstate_needs_reset()) {
379 vm_start();
381 #endif
384 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
386 #ifdef CONFIG_USER_ONLY
387 int ret;
389 while (len > 0) {
390 ret = send(s->fd, buf, len, 0);
391 if (ret < 0) {
392 if (errno != EINTR && errno != EAGAIN)
393 return;
394 } else {
395 buf += ret;
396 len -= ret;
399 #else
400 qemu_chr_fe_write(s->chr, buf, len);
401 #endif
404 static inline int fromhex(int v)
406 if (v >= '0' && v <= '9')
407 return v - '0';
408 else if (v >= 'A' && v <= 'F')
409 return v - 'A' + 10;
410 else if (v >= 'a' && v <= 'f')
411 return v - 'a' + 10;
412 else
413 return 0;
416 static inline int tohex(int v)
418 if (v < 10)
419 return v + '0';
420 else
421 return v - 10 + 'a';
424 static void memtohex(char *buf, const uint8_t *mem, int len)
426 int i, c;
427 char *q;
428 q = buf;
429 for(i = 0; i < len; i++) {
430 c = mem[i];
431 *q++ = tohex(c >> 4);
432 *q++ = tohex(c & 0xf);
434 *q = '\0';
437 static void hextomem(uint8_t *mem, const char *buf, int len)
439 int i;
441 for(i = 0; i < len; i++) {
442 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
443 buf += 2;
447 /* return -1 if error, 0 if OK */
448 static int put_packet_binary(GDBState *s, const char *buf, int len)
450 int csum, i;
451 uint8_t *p;
453 for(;;) {
454 p = s->last_packet;
455 *(p++) = '$';
456 memcpy(p, buf, len);
457 p += len;
458 csum = 0;
459 for(i = 0; i < len; i++) {
460 csum += buf[i];
462 *(p++) = '#';
463 *(p++) = tohex((csum >> 4) & 0xf);
464 *(p++) = tohex((csum) & 0xf);
466 s->last_packet_len = p - s->last_packet;
467 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
469 #ifdef CONFIG_USER_ONLY
470 i = get_char(s);
471 if (i < 0)
472 return -1;
473 if (i == '+')
474 break;
475 #else
476 break;
477 #endif
479 return 0;
482 /* return -1 if error, 0 if OK */
483 static int put_packet(GDBState *s, const char *buf)
485 #ifdef DEBUG_GDB
486 printf("reply='%s'\n", buf);
487 #endif
489 return put_packet_binary(s, buf, strlen(buf));
492 /* The GDB remote protocol transfers values in target byte order. This means
493 we can use the raw memory access routines to access the value buffer.
494 Conveniently, these also handle the case where the buffer is mis-aligned.
496 #define GET_REG8(val) do { \
497 stb_p(mem_buf, val); \
498 return 1; \
499 } while(0)
500 #define GET_REG16(val) do { \
501 stw_p(mem_buf, val); \
502 return 2; \
503 } while(0)
504 #define GET_REG32(val) do { \
505 stl_p(mem_buf, val); \
506 return 4; \
507 } while(0)
508 #define GET_REG64(val) do { \
509 stq_p(mem_buf, val); \
510 return 8; \
511 } while(0)
513 #if TARGET_LONG_BITS == 64
514 #define GET_REGL(val) GET_REG64(val)
515 #define ldtul_p(addr) ldq_p(addr)
516 #else
517 #define GET_REGL(val) GET_REG32(val)
518 #define ldtul_p(addr) ldl_p(addr)
519 #endif
521 #if defined(TARGET_I386)
523 #ifdef TARGET_X86_64
524 static const int gpr_map[16] = {
525 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
526 8, 9, 10, 11, 12, 13, 14, 15
528 #else
529 #define gpr_map gpr_map32
530 #endif
531 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
533 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
535 #define IDX_IP_REG CPU_NB_REGS
536 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
537 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
538 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
539 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
540 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
542 static int cpu_gdb_read_register(CPUX86State *env, uint8_t *mem_buf, int n)
544 if (n < CPU_NB_REGS) {
545 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
546 GET_REG64(env->regs[gpr_map[n]]);
547 } else if (n < CPU_NB_REGS32) {
548 GET_REG32(env->regs[gpr_map32[n]]);
550 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
551 #ifdef USE_X86LDOUBLE
552 /* FIXME: byteswap float values - after fixing fpregs layout. */
553 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
554 #else
555 memset(mem_buf, 0, 10);
556 #endif
557 return 10;
558 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
559 n -= IDX_XMM_REGS;
560 if (n < CPU_NB_REGS32 ||
561 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
562 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
563 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
564 return 16;
566 } else {
567 switch (n) {
568 case IDX_IP_REG:
569 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
570 GET_REG64(env->eip);
571 } else {
572 GET_REG32(env->eip);
574 case IDX_FLAGS_REG: GET_REG32(env->eflags);
576 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
577 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
578 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
579 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
580 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
581 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
583 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
584 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
585 (env->fpstt & 0x7) << 11);
586 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
587 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
588 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
589 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
590 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
591 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
593 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
596 return 0;
599 static int cpu_x86_gdb_load_seg(CPUX86State *env, int sreg, uint8_t *mem_buf)
601 uint16_t selector = ldl_p(mem_buf);
603 if (selector != env->segs[sreg].selector) {
604 #if defined(CONFIG_USER_ONLY)
605 cpu_x86_load_seg(env, sreg, selector);
606 #else
607 unsigned int limit, flags;
608 target_ulong base;
610 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
611 base = selector << 4;
612 limit = 0xffff;
613 flags = 0;
614 } else {
615 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
616 return 4;
618 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
619 #endif
621 return 4;
624 static int cpu_gdb_write_register(CPUX86State *env, uint8_t *mem_buf, int n)
626 uint32_t tmp;
628 if (n < CPU_NB_REGS) {
629 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
630 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
631 return sizeof(target_ulong);
632 } else if (n < CPU_NB_REGS32) {
633 n = gpr_map32[n];
634 env->regs[n] &= ~0xffffffffUL;
635 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
636 return 4;
638 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
639 #ifdef USE_X86LDOUBLE
640 /* FIXME: byteswap float values - after fixing fpregs layout. */
641 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
642 #endif
643 return 10;
644 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
645 n -= IDX_XMM_REGS;
646 if (n < CPU_NB_REGS32 ||
647 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
648 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
649 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
650 return 16;
652 } else {
653 switch (n) {
654 case IDX_IP_REG:
655 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
656 env->eip = ldq_p(mem_buf);
657 return 8;
658 } else {
659 env->eip &= ~0xffffffffUL;
660 env->eip |= (uint32_t)ldl_p(mem_buf);
661 return 4;
663 case IDX_FLAGS_REG:
664 env->eflags = ldl_p(mem_buf);
665 return 4;
667 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
668 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
669 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
670 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
671 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
672 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
674 case IDX_FP_REGS + 8:
675 env->fpuc = ldl_p(mem_buf);
676 return 4;
677 case IDX_FP_REGS + 9:
678 tmp = ldl_p(mem_buf);
679 env->fpstt = (tmp >> 11) & 7;
680 env->fpus = tmp & ~0x3800;
681 return 4;
682 case IDX_FP_REGS + 10: /* ftag */ return 4;
683 case IDX_FP_REGS + 11: /* fiseg */ return 4;
684 case IDX_FP_REGS + 12: /* fioff */ return 4;
685 case IDX_FP_REGS + 13: /* foseg */ return 4;
686 case IDX_FP_REGS + 14: /* fooff */ return 4;
687 case IDX_FP_REGS + 15: /* fop */ return 4;
689 case IDX_MXCSR_REG:
690 env->mxcsr = ldl_p(mem_buf);
691 return 4;
694 /* Unrecognised register. */
695 return 0;
698 #elif defined (TARGET_PPC)
700 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
701 expects whatever the target description contains. Due to a
702 historical mishap the FP registers appear in between core integer
703 regs and PC, MSR, CR, and so forth. We hack round this by giving the
704 FP regs zero size when talking to a newer gdb. */
705 #define NUM_CORE_REGS 71
706 #if defined (TARGET_PPC64)
707 #define GDB_CORE_XML "power64-core.xml"
708 #else
709 #define GDB_CORE_XML "power-core.xml"
710 #endif
712 static int cpu_gdb_read_register(CPUPPCState *env, uint8_t *mem_buf, int n)
714 if (n < 32) {
715 /* gprs */
716 GET_REGL(env->gpr[n]);
717 } else if (n < 64) {
718 /* fprs */
719 if (gdb_has_xml)
720 return 0;
721 stfq_p(mem_buf, env->fpr[n-32]);
722 return 8;
723 } else {
724 switch (n) {
725 case 64: GET_REGL(env->nip);
726 case 65: GET_REGL(env->msr);
727 case 66:
729 uint32_t cr = 0;
730 int i;
731 for (i = 0; i < 8; i++)
732 cr |= env->crf[i] << (32 - ((i + 1) * 4));
733 GET_REG32(cr);
735 case 67: GET_REGL(env->lr);
736 case 68: GET_REGL(env->ctr);
737 case 69: GET_REGL(env->xer);
738 case 70:
740 if (gdb_has_xml)
741 return 0;
742 GET_REG32(env->fpscr);
746 return 0;
749 static int cpu_gdb_write_register(CPUPPCState *env, uint8_t *mem_buf, int n)
751 if (n < 32) {
752 /* gprs */
753 env->gpr[n] = ldtul_p(mem_buf);
754 return sizeof(target_ulong);
755 } else if (n < 64) {
756 /* fprs */
757 if (gdb_has_xml)
758 return 0;
759 env->fpr[n-32] = ldfq_p(mem_buf);
760 return 8;
761 } else {
762 switch (n) {
763 case 64:
764 env->nip = ldtul_p(mem_buf);
765 return sizeof(target_ulong);
766 case 65:
767 ppc_store_msr(env, ldtul_p(mem_buf));
768 return sizeof(target_ulong);
769 case 66:
771 uint32_t cr = ldl_p(mem_buf);
772 int i;
773 for (i = 0; i < 8; i++)
774 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
775 return 4;
777 case 67:
778 env->lr = ldtul_p(mem_buf);
779 return sizeof(target_ulong);
780 case 68:
781 env->ctr = ldtul_p(mem_buf);
782 return sizeof(target_ulong);
783 case 69:
784 env->xer = ldtul_p(mem_buf);
785 return sizeof(target_ulong);
786 case 70:
787 /* fpscr */
788 if (gdb_has_xml)
789 return 0;
790 store_fpscr(env, ldtul_p(mem_buf), 0xffffffff);
791 return sizeof(target_ulong);
794 return 0;
797 #elif defined (TARGET_SPARC)
799 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
800 #define NUM_CORE_REGS 86
801 #else
802 #define NUM_CORE_REGS 72
803 #endif
805 #ifdef TARGET_ABI32
806 #define GET_REGA(val) GET_REG32(val)
807 #else
808 #define GET_REGA(val) GET_REGL(val)
809 #endif
811 static int cpu_gdb_read_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
813 if (n < 8) {
814 /* g0..g7 */
815 GET_REGA(env->gregs[n]);
817 if (n < 32) {
818 /* register window */
819 GET_REGA(env->regwptr[n - 8]);
821 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
822 if (n < 64) {
823 /* fprs */
824 if (n & 1) {
825 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
826 } else {
827 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
830 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
831 switch (n) {
832 case 64: GET_REGA(env->y);
833 case 65: GET_REGA(cpu_get_psr(env));
834 case 66: GET_REGA(env->wim);
835 case 67: GET_REGA(env->tbr);
836 case 68: GET_REGA(env->pc);
837 case 69: GET_REGA(env->npc);
838 case 70: GET_REGA(env->fsr);
839 case 71: GET_REGA(0); /* csr */
840 default: GET_REGA(0);
842 #else
843 if (n < 64) {
844 /* f0-f31 */
845 if (n & 1) {
846 GET_REG32(env->fpr[(n - 32) / 2].l.lower);
847 } else {
848 GET_REG32(env->fpr[(n - 32) / 2].l.upper);
851 if (n < 80) {
852 /* f32-f62 (double width, even numbers only) */
853 GET_REG64(env->fpr[(n - 32) / 2].ll);
855 switch (n) {
856 case 80: GET_REGL(env->pc);
857 case 81: GET_REGL(env->npc);
858 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
859 ((env->asi & 0xff) << 24) |
860 ((env->pstate & 0xfff) << 8) |
861 cpu_get_cwp64(env));
862 case 83: GET_REGL(env->fsr);
863 case 84: GET_REGL(env->fprs);
864 case 85: GET_REGL(env->y);
866 #endif
867 return 0;
870 static int cpu_gdb_write_register(CPUSPARCState *env, uint8_t *mem_buf, int n)
872 #if defined(TARGET_ABI32)
873 abi_ulong tmp;
875 tmp = ldl_p(mem_buf);
876 #else
877 target_ulong tmp;
879 tmp = ldtul_p(mem_buf);
880 #endif
882 if (n < 8) {
883 /* g0..g7 */
884 env->gregs[n] = tmp;
885 } else if (n < 32) {
886 /* register window */
887 env->regwptr[n - 8] = tmp;
889 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
890 else if (n < 64) {
891 /* fprs */
892 /* f0-f31 */
893 if (n & 1) {
894 env->fpr[(n - 32) / 2].l.lower = tmp;
895 } else {
896 env->fpr[(n - 32) / 2].l.upper = tmp;
898 } else {
899 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
900 switch (n) {
901 case 64: env->y = tmp; break;
902 case 65: cpu_put_psr(env, tmp); break;
903 case 66: env->wim = tmp; break;
904 case 67: env->tbr = tmp; break;
905 case 68: env->pc = tmp; break;
906 case 69: env->npc = tmp; break;
907 case 70: env->fsr = tmp; break;
908 default: return 0;
911 return 4;
912 #else
913 else if (n < 64) {
914 /* f0-f31 */
915 tmp = ldl_p(mem_buf);
916 if (n & 1) {
917 env->fpr[(n - 32) / 2].l.lower = tmp;
918 } else {
919 env->fpr[(n - 32) / 2].l.upper = tmp;
921 return 4;
922 } else if (n < 80) {
923 /* f32-f62 (double width, even numbers only) */
924 env->fpr[(n - 32) / 2].ll = tmp;
925 } else {
926 switch (n) {
927 case 80: env->pc = tmp; break;
928 case 81: env->npc = tmp; break;
929 case 82:
930 cpu_put_ccr(env, tmp >> 32);
931 env->asi = (tmp >> 24) & 0xff;
932 env->pstate = (tmp >> 8) & 0xfff;
933 cpu_put_cwp64(env, tmp & 0xff);
934 break;
935 case 83: env->fsr = tmp; break;
936 case 84: env->fprs = tmp; break;
937 case 85: env->y = tmp; break;
938 default: return 0;
941 return 8;
942 #endif
944 #elif defined (TARGET_ARM)
946 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
947 whatever the target description contains. Due to a historical mishap
948 the FPA registers appear in between core integer regs and the CPSR.
949 We hack round this by giving the FPA regs zero size when talking to a
950 newer gdb. */
951 #define NUM_CORE_REGS 26
952 #define GDB_CORE_XML "arm-core.xml"
954 static int cpu_gdb_read_register(CPUARMState *env, uint8_t *mem_buf, int n)
956 if (n < 16) {
957 /* Core integer register. */
958 GET_REG32(env->regs[n]);
960 if (n < 24) {
961 /* FPA registers. */
962 if (gdb_has_xml)
963 return 0;
964 memset(mem_buf, 0, 12);
965 return 12;
967 switch (n) {
968 case 24:
969 /* FPA status register. */
970 if (gdb_has_xml)
971 return 0;
972 GET_REG32(0);
973 case 25:
974 /* CPSR */
975 GET_REG32(cpsr_read(env));
977 /* Unknown register. */
978 return 0;
981 static int cpu_gdb_write_register(CPUARMState *env, uint8_t *mem_buf, int n)
983 uint32_t tmp;
985 tmp = ldl_p(mem_buf);
987 /* Mask out low bit of PC to workaround gdb bugs. This will probably
988 cause problems if we ever implement the Jazelle DBX extensions. */
989 if (n == 15)
990 tmp &= ~1;
992 if (n < 16) {
993 /* Core integer register. */
994 env->regs[n] = tmp;
995 return 4;
997 if (n < 24) { /* 16-23 */
998 /* FPA registers (ignored). */
999 if (gdb_has_xml)
1000 return 0;
1001 return 12;
1003 switch (n) {
1004 case 24:
1005 /* FPA status register (ignored). */
1006 if (gdb_has_xml)
1007 return 0;
1008 return 4;
1009 case 25:
1010 /* CPSR */
1011 cpsr_write (env, tmp, 0xffffffff);
1012 return 4;
1014 /* Unknown register. */
1015 return 0;
1018 #elif defined (TARGET_M68K)
1020 #define NUM_CORE_REGS 18
1022 #define GDB_CORE_XML "cf-core.xml"
1024 static int cpu_gdb_read_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1026 if (n < 8) {
1027 /* D0-D7 */
1028 GET_REG32(env->dregs[n]);
1029 } else if (n < 16) {
1030 /* A0-A7 */
1031 GET_REG32(env->aregs[n - 8]);
1032 } else {
1033 switch (n) {
1034 case 16: GET_REG32(env->sr);
1035 case 17: GET_REG32(env->pc);
1038 /* FP registers not included here because they vary between
1039 ColdFire and m68k. Use XML bits for these. */
1040 return 0;
1043 static int cpu_gdb_write_register(CPUM68KState *env, uint8_t *mem_buf, int n)
1045 uint32_t tmp;
1047 tmp = ldl_p(mem_buf);
1049 if (n < 8) {
1050 /* D0-D7 */
1051 env->dregs[n] = tmp;
1052 } else if (n < 16) {
1053 /* A0-A7 */
1054 env->aregs[n - 8] = tmp;
1055 } else {
1056 switch (n) {
1057 case 16: env->sr = tmp; break;
1058 case 17: env->pc = tmp; break;
1059 default: return 0;
1062 return 4;
1064 #elif defined (TARGET_MIPS)
1066 #define NUM_CORE_REGS 73
1068 static int cpu_gdb_read_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1070 if (n < 32) {
1071 GET_REGL(env->active_tc.gpr[n]);
1073 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1074 if (n >= 38 && n < 70) {
1075 if (env->CP0_Status & (1 << CP0St_FR))
1076 GET_REGL(env->active_fpu.fpr[n - 38].d);
1077 else
1078 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1080 switch (n) {
1081 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1082 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1085 switch (n) {
1086 case 32: GET_REGL((int32_t)env->CP0_Status);
1087 case 33: GET_REGL(env->active_tc.LO[0]);
1088 case 34: GET_REGL(env->active_tc.HI[0]);
1089 case 35: GET_REGL(env->CP0_BadVAddr);
1090 case 36: GET_REGL((int32_t)env->CP0_Cause);
1091 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1092 case 72: GET_REGL(0); /* fp */
1093 case 89: GET_REGL((int32_t)env->CP0_PRid);
1095 if (n >= 73 && n <= 88) {
1096 /* 16 embedded regs. */
1097 GET_REGL(0);
1100 return 0;
1103 /* convert MIPS rounding mode in FCR31 to IEEE library */
1104 static unsigned int ieee_rm[] =
1106 float_round_nearest_even,
1107 float_round_to_zero,
1108 float_round_up,
1109 float_round_down
1111 #define RESTORE_ROUNDING_MODE \
1112 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1114 static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
1116 target_ulong tmp;
1118 tmp = ldtul_p(mem_buf);
1120 if (n < 32) {
1121 env->active_tc.gpr[n] = tmp;
1122 return sizeof(target_ulong);
1124 if (env->CP0_Config1 & (1 << CP0C1_FP)
1125 && n >= 38 && n < 73) {
1126 if (n < 70) {
1127 if (env->CP0_Status & (1 << CP0St_FR))
1128 env->active_fpu.fpr[n - 38].d = tmp;
1129 else
1130 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1132 switch (n) {
1133 case 70:
1134 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1135 /* set rounding mode */
1136 RESTORE_ROUNDING_MODE;
1137 break;
1138 case 71: env->active_fpu.fcr0 = tmp; break;
1140 return sizeof(target_ulong);
1142 switch (n) {
1143 case 32: env->CP0_Status = tmp; break;
1144 case 33: env->active_tc.LO[0] = tmp; break;
1145 case 34: env->active_tc.HI[0] = tmp; break;
1146 case 35: env->CP0_BadVAddr = tmp; break;
1147 case 36: env->CP0_Cause = tmp; break;
1148 case 37:
1149 env->active_tc.PC = tmp & ~(target_ulong)1;
1150 if (tmp & 1) {
1151 env->hflags |= MIPS_HFLAG_M16;
1152 } else {
1153 env->hflags &= ~(MIPS_HFLAG_M16);
1155 break;
1156 case 72: /* fp, ignored */ break;
1157 default:
1158 if (n > 89)
1159 return 0;
1160 /* Other registers are readonly. Ignore writes. */
1161 break;
1164 return sizeof(target_ulong);
1166 #elif defined(TARGET_OPENRISC)
1168 #define NUM_CORE_REGS (32 + 3)
1170 static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
1172 if (n < 32) {
1173 GET_REG32(env->gpr[n]);
1174 } else {
1175 switch (n) {
1176 case 32: /* PPC */
1177 GET_REG32(env->ppc);
1178 break;
1180 case 33: /* NPC */
1181 GET_REG32(env->npc);
1182 break;
1184 case 34: /* SR */
1185 GET_REG32(env->sr);
1186 break;
1188 default:
1189 break;
1192 return 0;
1195 static int cpu_gdb_write_register(CPUOpenRISCState *env,
1196 uint8_t *mem_buf, int n)
1198 uint32_t tmp;
1200 if (n > NUM_CORE_REGS) {
1201 return 0;
1204 tmp = ldl_p(mem_buf);
1206 if (n < 32) {
1207 env->gpr[n] = tmp;
1208 } else {
1209 switch (n) {
1210 case 32: /* PPC */
1211 env->ppc = tmp;
1212 break;
1214 case 33: /* NPC */
1215 env->npc = tmp;
1216 break;
1218 case 34: /* SR */
1219 env->sr = tmp;
1220 break;
1222 default:
1223 break;
1226 return 4;
1228 #elif defined (TARGET_SH4)
1230 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1231 /* FIXME: We should use XML for this. */
1233 #define NUM_CORE_REGS 59
1235 static int cpu_gdb_read_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1237 switch (n) {
1238 case 0 ... 7:
1239 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1240 GET_REGL(env->gregs[n + 16]);
1241 } else {
1242 GET_REGL(env->gregs[n]);
1244 case 8 ... 15:
1245 GET_REGL(env->gregs[n]);
1246 case 16:
1247 GET_REGL(env->pc);
1248 case 17:
1249 GET_REGL(env->pr);
1250 case 18:
1251 GET_REGL(env->gbr);
1252 case 19:
1253 GET_REGL(env->vbr);
1254 case 20:
1255 GET_REGL(env->mach);
1256 case 21:
1257 GET_REGL(env->macl);
1258 case 22:
1259 GET_REGL(env->sr);
1260 case 23:
1261 GET_REGL(env->fpul);
1262 case 24:
1263 GET_REGL(env->fpscr);
1264 case 25 ... 40:
1265 if (env->fpscr & FPSCR_FR) {
1266 stfl_p(mem_buf, env->fregs[n - 9]);
1267 } else {
1268 stfl_p(mem_buf, env->fregs[n - 25]);
1270 return 4;
1271 case 41:
1272 GET_REGL(env->ssr);
1273 case 42:
1274 GET_REGL(env->spc);
1275 case 43 ... 50:
1276 GET_REGL(env->gregs[n - 43]);
1277 case 51 ... 58:
1278 GET_REGL(env->gregs[n - (51 - 16)]);
1281 return 0;
1284 static int cpu_gdb_write_register(CPUSH4State *env, uint8_t *mem_buf, int n)
1286 switch (n) {
1287 case 0 ... 7:
1288 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1289 env->gregs[n + 16] = ldl_p(mem_buf);
1290 } else {
1291 env->gregs[n] = ldl_p(mem_buf);
1293 break;
1294 case 8 ... 15:
1295 env->gregs[n] = ldl_p(mem_buf);
1296 break;
1297 case 16:
1298 env->pc = ldl_p(mem_buf);
1299 break;
1300 case 17:
1301 env->pr = ldl_p(mem_buf);
1302 break;
1303 case 18:
1304 env->gbr = ldl_p(mem_buf);
1305 break;
1306 case 19:
1307 env->vbr = ldl_p(mem_buf);
1308 break;
1309 case 20:
1310 env->mach = ldl_p(mem_buf);
1311 break;
1312 case 21:
1313 env->macl = ldl_p(mem_buf);
1314 break;
1315 case 22:
1316 env->sr = ldl_p(mem_buf);
1317 break;
1318 case 23:
1319 env->fpul = ldl_p(mem_buf);
1320 break;
1321 case 24:
1322 env->fpscr = ldl_p(mem_buf);
1323 break;
1324 case 25 ... 40:
1325 if (env->fpscr & FPSCR_FR) {
1326 env->fregs[n - 9] = ldfl_p(mem_buf);
1327 } else {
1328 env->fregs[n - 25] = ldfl_p(mem_buf);
1330 break;
1331 case 41:
1332 env->ssr = ldl_p(mem_buf);
1333 break;
1334 case 42:
1335 env->spc = ldl_p(mem_buf);
1336 break;
1337 case 43 ... 50:
1338 env->gregs[n - 43] = ldl_p(mem_buf);
1339 break;
1340 case 51 ... 58:
1341 env->gregs[n - (51 - 16)] = ldl_p(mem_buf);
1342 break;
1343 default: return 0;
1346 return 4;
1348 #elif defined (TARGET_MICROBLAZE)
1350 #define NUM_CORE_REGS (32 + 5)
1352 static int cpu_gdb_read_register(CPUMBState *env, uint8_t *mem_buf, int n)
1354 if (n < 32) {
1355 GET_REG32(env->regs[n]);
1356 } else {
1357 GET_REG32(env->sregs[n - 32]);
1359 return 0;
1362 static int cpu_gdb_write_register(CPUMBState *env, uint8_t *mem_buf, int n)
1364 uint32_t tmp;
1366 if (n > NUM_CORE_REGS)
1367 return 0;
1369 tmp = ldl_p(mem_buf);
1371 if (n < 32) {
1372 env->regs[n] = tmp;
1373 } else {
1374 env->sregs[n - 32] = tmp;
1376 return 4;
1378 #elif defined (TARGET_CRIS)
1380 #define NUM_CORE_REGS 49
1382 static int
1383 read_register_crisv10(CPUCRISState *env, uint8_t *mem_buf, int n)
1385 if (n < 15) {
1386 GET_REG32(env->regs[n]);
1389 if (n == 15) {
1390 GET_REG32(env->pc);
1393 if (n < 32) {
1394 switch (n) {
1395 case 16:
1396 GET_REG8(env->pregs[n - 16]);
1397 break;
1398 case 17:
1399 GET_REG8(env->pregs[n - 16]);
1400 break;
1401 case 20:
1402 case 21:
1403 GET_REG16(env->pregs[n - 16]);
1404 break;
1405 default:
1406 if (n >= 23) {
1407 GET_REG32(env->pregs[n - 16]);
1409 break;
1412 return 0;
1415 static int cpu_gdb_read_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1417 uint8_t srs;
1419 if (env->pregs[PR_VR] < 32)
1420 return read_register_crisv10(env, mem_buf, n);
1422 srs = env->pregs[PR_SRS];
1423 if (n < 16) {
1424 GET_REG32(env->regs[n]);
1427 if (n >= 21 && n < 32) {
1428 GET_REG32(env->pregs[n - 16]);
1430 if (n >= 33 && n < 49) {
1431 GET_REG32(env->sregs[srs][n - 33]);
1433 switch (n) {
1434 case 16: GET_REG8(env->pregs[0]);
1435 case 17: GET_REG8(env->pregs[1]);
1436 case 18: GET_REG32(env->pregs[2]);
1437 case 19: GET_REG8(srs);
1438 case 20: GET_REG16(env->pregs[4]);
1439 case 32: GET_REG32(env->pc);
1442 return 0;
1445 static int cpu_gdb_write_register(CPUCRISState *env, uint8_t *mem_buf, int n)
1447 uint32_t tmp;
1449 if (n > 49)
1450 return 0;
1452 tmp = ldl_p(mem_buf);
1454 if (n < 16) {
1455 env->regs[n] = tmp;
1458 if (n >= 21 && n < 32) {
1459 env->pregs[n - 16] = tmp;
1462 /* FIXME: Should support function regs be writable? */
1463 switch (n) {
1464 case 16: return 1;
1465 case 17: return 1;
1466 case 18: env->pregs[PR_PID] = tmp; break;
1467 case 19: return 1;
1468 case 20: return 2;
1469 case 32: env->pc = tmp; break;
1472 return 4;
1474 #elif defined (TARGET_ALPHA)
1476 #define NUM_CORE_REGS 67
1478 static int cpu_gdb_read_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1480 uint64_t val;
1481 CPU_DoubleU d;
1483 switch (n) {
1484 case 0 ... 30:
1485 val = env->ir[n];
1486 break;
1487 case 32 ... 62:
1488 d.d = env->fir[n - 32];
1489 val = d.ll;
1490 break;
1491 case 63:
1492 val = cpu_alpha_load_fpcr(env);
1493 break;
1494 case 64:
1495 val = env->pc;
1496 break;
1497 case 66:
1498 val = env->unique;
1499 break;
1500 case 31:
1501 case 65:
1502 /* 31 really is the zero register; 65 is unassigned in the
1503 gdb protocol, but is still required to occupy 8 bytes. */
1504 val = 0;
1505 break;
1506 default:
1507 return 0;
1509 GET_REGL(val);
1512 static int cpu_gdb_write_register(CPUAlphaState *env, uint8_t *mem_buf, int n)
1514 target_ulong tmp = ldtul_p(mem_buf);
1515 CPU_DoubleU d;
1517 switch (n) {
1518 case 0 ... 30:
1519 env->ir[n] = tmp;
1520 break;
1521 case 32 ... 62:
1522 d.ll = tmp;
1523 env->fir[n - 32] = d.d;
1524 break;
1525 case 63:
1526 cpu_alpha_store_fpcr(env, tmp);
1527 break;
1528 case 64:
1529 env->pc = tmp;
1530 break;
1531 case 66:
1532 env->unique = tmp;
1533 break;
1534 case 31:
1535 case 65:
1536 /* 31 really is the zero register; 65 is unassigned in the
1537 gdb protocol, but is still required to occupy 8 bytes. */
1538 break;
1539 default:
1540 return 0;
1542 return 8;
1544 #elif defined (TARGET_S390X)
1546 #define NUM_CORE_REGS S390_NUM_REGS
1548 static int cpu_gdb_read_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1550 uint64_t val;
1551 int cc_op;
1553 switch (n) {
1554 case S390_PSWM_REGNUM:
1555 cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr);
1556 val = deposit64(env->psw.mask, 44, 2, cc_op);
1557 GET_REGL(val);
1558 break;
1559 case S390_PSWA_REGNUM:
1560 GET_REGL(env->psw.addr);
1561 break;
1562 case S390_R0_REGNUM ... S390_R15_REGNUM:
1563 GET_REGL(env->regs[n-S390_R0_REGNUM]);
1564 break;
1565 case S390_A0_REGNUM ... S390_A15_REGNUM:
1566 GET_REG32(env->aregs[n-S390_A0_REGNUM]);
1567 break;
1568 case S390_FPC_REGNUM:
1569 GET_REG32(env->fpc);
1570 break;
1571 case S390_F0_REGNUM ... S390_F15_REGNUM:
1572 GET_REG64(env->fregs[n-S390_F0_REGNUM].ll);
1573 break;
1576 return 0;
1579 static int cpu_gdb_write_register(CPUS390XState *env, uint8_t *mem_buf, int n)
1581 target_ulong tmpl;
1582 uint32_t tmp32;
1583 int r = 8;
1584 tmpl = ldtul_p(mem_buf);
1585 tmp32 = ldl_p(mem_buf);
1587 switch (n) {
1588 case S390_PSWM_REGNUM:
1589 env->psw.mask = tmpl;
1590 env->cc_op = extract64(tmpl, 44, 2);
1591 break;
1592 case S390_PSWA_REGNUM:
1593 env->psw.addr = tmpl;
1594 break;
1595 case S390_R0_REGNUM ... S390_R15_REGNUM:
1596 env->regs[n-S390_R0_REGNUM] = tmpl;
1597 break;
1598 case S390_A0_REGNUM ... S390_A15_REGNUM:
1599 env->aregs[n-S390_A0_REGNUM] = tmp32;
1600 r = 4;
1601 break;
1602 case S390_FPC_REGNUM:
1603 env->fpc = tmp32;
1604 r = 4;
1605 break;
1606 case S390_F0_REGNUM ... S390_F15_REGNUM:
1607 env->fregs[n-S390_F0_REGNUM].ll = tmpl;
1608 break;
1609 default:
1610 return 0;
1612 return r;
1614 #elif defined (TARGET_LM32)
1616 #include "hw/lm32/lm32_pic.h"
1617 #define NUM_CORE_REGS (32 + 7)
1619 static int cpu_gdb_read_register(CPULM32State *env, uint8_t *mem_buf, int n)
1621 if (n < 32) {
1622 GET_REG32(env->regs[n]);
1623 } else {
1624 switch (n) {
1625 case 32:
1626 GET_REG32(env->pc);
1627 break;
1628 /* FIXME: put in right exception ID */
1629 case 33:
1630 GET_REG32(0);
1631 break;
1632 case 34:
1633 GET_REG32(env->eba);
1634 break;
1635 case 35:
1636 GET_REG32(env->deba);
1637 break;
1638 case 36:
1639 GET_REG32(env->ie);
1640 break;
1641 case 37:
1642 GET_REG32(lm32_pic_get_im(env->pic_state));
1643 break;
1644 case 38:
1645 GET_REG32(lm32_pic_get_ip(env->pic_state));
1646 break;
1649 return 0;
1652 static int cpu_gdb_write_register(CPULM32State *env, uint8_t *mem_buf, int n)
1654 uint32_t tmp;
1656 if (n > NUM_CORE_REGS) {
1657 return 0;
1660 tmp = ldl_p(mem_buf);
1662 if (n < 32) {
1663 env->regs[n] = tmp;
1664 } else {
1665 switch (n) {
1666 case 32:
1667 env->pc = tmp;
1668 break;
1669 case 34:
1670 env->eba = tmp;
1671 break;
1672 case 35:
1673 env->deba = tmp;
1674 break;
1675 case 36:
1676 env->ie = tmp;
1677 break;
1678 case 37:
1679 lm32_pic_set_im(env->pic_state, tmp);
1680 break;
1681 case 38:
1682 lm32_pic_set_ip(env->pic_state, tmp);
1683 break;
1686 return 4;
1688 #elif defined(TARGET_XTENSA)
1690 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1691 * Use num_regs to see all registers. gdb modification is required for that:
1692 * reset bit 0 in the 'flags' field of the registers definitions in the
1693 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1695 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1696 #define num_g_regs NUM_CORE_REGS
1698 static int cpu_gdb_read_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1700 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1702 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1703 return 0;
1706 switch (reg->type) {
1707 case 9: /*pc*/
1708 GET_REG32(env->pc);
1709 break;
1711 case 1: /*ar*/
1712 xtensa_sync_phys_from_window(env);
1713 GET_REG32(env->phys_regs[(reg->targno & 0xff) % env->config->nareg]);
1714 break;
1716 case 2: /*SR*/
1717 GET_REG32(env->sregs[reg->targno & 0xff]);
1718 break;
1720 case 3: /*UR*/
1721 GET_REG32(env->uregs[reg->targno & 0xff]);
1722 break;
1724 case 4: /*f*/
1725 GET_REG32(float32_val(env->fregs[reg->targno & 0x0f]));
1726 break;
1728 case 8: /*a*/
1729 GET_REG32(env->regs[reg->targno & 0x0f]);
1730 break;
1732 default:
1733 qemu_log("%s from reg %d of unsupported type %d\n",
1734 __func__, n, reg->type);
1735 return 0;
1739 static int cpu_gdb_write_register(CPUXtensaState *env, uint8_t *mem_buf, int n)
1741 uint32_t tmp;
1742 const XtensaGdbReg *reg = env->config->gdb_regmap.reg + n;
1744 if (n < 0 || n >= env->config->gdb_regmap.num_regs) {
1745 return 0;
1748 tmp = ldl_p(mem_buf);
1750 switch (reg->type) {
1751 case 9: /*pc*/
1752 env->pc = tmp;
1753 break;
1755 case 1: /*ar*/
1756 env->phys_regs[(reg->targno & 0xff) % env->config->nareg] = tmp;
1757 xtensa_sync_window_from_phys(env);
1758 break;
1760 case 2: /*SR*/
1761 env->sregs[reg->targno & 0xff] = tmp;
1762 break;
1764 case 3: /*UR*/
1765 env->uregs[reg->targno & 0xff] = tmp;
1766 break;
1768 case 4: /*f*/
1769 env->fregs[reg->targno & 0x0f] = make_float32(tmp);
1770 break;
1772 case 8: /*a*/
1773 env->regs[reg->targno & 0x0f] = tmp;
1774 break;
1776 default:
1777 qemu_log("%s to reg %d of unsupported type %d\n",
1778 __func__, n, reg->type);
1779 return 0;
1782 return 4;
1784 #else
1786 #define NUM_CORE_REGS 0
1788 static int cpu_gdb_read_register(CPUArchState *env, uint8_t *mem_buf, int n)
1790 return 0;
1793 static int cpu_gdb_write_register(CPUArchState *env, uint8_t *mem_buf, int n)
1795 return 0;
1798 #endif
1800 #if !defined(TARGET_XTENSA)
1801 static int num_g_regs = NUM_CORE_REGS;
1802 #endif
1804 #ifdef GDB_CORE_XML
1805 /* Encode data using the encoding for 'x' packets. */
1806 static int memtox(char *buf, const char *mem, int len)
1808 char *p = buf;
1809 char c;
1811 while (len--) {
1812 c = *(mem++);
1813 switch (c) {
1814 case '#': case '$': case '*': case '}':
1815 *(p++) = '}';
1816 *(p++) = c ^ 0x20;
1817 break;
1818 default:
1819 *(p++) = c;
1820 break;
1823 return p - buf;
1826 static const char *get_feature_xml(const char *p, const char **newp)
1828 size_t len;
1829 int i;
1830 const char *name;
1831 static char target_xml[1024];
1833 len = 0;
1834 while (p[len] && p[len] != ':')
1835 len++;
1836 *newp = p + len;
1838 name = NULL;
1839 if (strncmp(p, "target.xml", len) == 0) {
1840 /* Generate the XML description for this CPU. */
1841 if (!target_xml[0]) {
1842 GDBRegisterState *r;
1843 CPUState *cpu = first_cpu;
1845 snprintf(target_xml, sizeof(target_xml),
1846 "<?xml version=\"1.0\"?>"
1847 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1848 "<target>"
1849 "<xi:include href=\"%s\"/>",
1850 GDB_CORE_XML);
1852 for (r = cpu->gdb_regs; r; r = r->next) {
1853 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1854 pstrcat(target_xml, sizeof(target_xml), r->xml);
1855 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1857 pstrcat(target_xml, sizeof(target_xml), "</target>");
1859 return target_xml;
1861 for (i = 0; ; i++) {
1862 name = xml_builtin[i][0];
1863 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1864 break;
1866 return name ? xml_builtin[i][1] : NULL;
1868 #endif
1870 static int gdb_read_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1872 CPUArchState *env = cpu->env_ptr;
1873 GDBRegisterState *r;
1875 if (reg < NUM_CORE_REGS)
1876 return cpu_gdb_read_register(env, mem_buf, reg);
1878 for (r = cpu->gdb_regs; r; r = r->next) {
1879 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1880 return r->get_reg(env, mem_buf, reg - r->base_reg);
1883 return 0;
1886 static int gdb_write_register(CPUState *cpu, uint8_t *mem_buf, int reg)
1888 CPUArchState *env = cpu->env_ptr;
1889 GDBRegisterState *r;
1891 if (reg < NUM_CORE_REGS)
1892 return cpu_gdb_write_register(env, mem_buf, reg);
1894 for (r = cpu->gdb_regs; r; r = r->next) {
1895 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1896 return r->set_reg(env, mem_buf, reg - r->base_reg);
1899 return 0;
1902 #if !defined(TARGET_XTENSA)
1903 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1904 specifies the first register number and these registers are included in
1905 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1906 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1909 void gdb_register_coprocessor(CPUState *cpu,
1910 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1911 int num_regs, const char *xml, int g_pos)
1913 GDBRegisterState *s;
1914 GDBRegisterState **p;
1915 static int last_reg = NUM_CORE_REGS;
1917 p = &cpu->gdb_regs;
1918 while (*p) {
1919 /* Check for duplicates. */
1920 if (strcmp((*p)->xml, xml) == 0)
1921 return;
1922 p = &(*p)->next;
1925 s = g_new0(GDBRegisterState, 1);
1926 s->base_reg = last_reg;
1927 s->num_regs = num_regs;
1928 s->get_reg = get_reg;
1929 s->set_reg = set_reg;
1930 s->xml = xml;
1932 /* Add to end of list. */
1933 last_reg += num_regs;
1934 *p = s;
1935 if (g_pos) {
1936 if (g_pos != s->base_reg) {
1937 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1938 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1939 } else {
1940 num_g_regs = last_reg;
1944 #endif
1946 #ifndef CONFIG_USER_ONLY
1947 static const int xlat_gdb_type[] = {
1948 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1949 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1950 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1952 #endif
1954 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1956 CPUState *cpu;
1957 CPUArchState *env;
1958 int err = 0;
1960 if (kvm_enabled()) {
1961 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1964 switch (type) {
1965 case GDB_BREAKPOINT_SW:
1966 case GDB_BREAKPOINT_HW:
1967 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1968 env = cpu->env_ptr;
1969 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1970 if (err)
1971 break;
1973 return err;
1974 #ifndef CONFIG_USER_ONLY
1975 case GDB_WATCHPOINT_WRITE:
1976 case GDB_WATCHPOINT_READ:
1977 case GDB_WATCHPOINT_ACCESS:
1978 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
1979 env = cpu->env_ptr;
1980 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1981 NULL);
1982 if (err)
1983 break;
1985 return err;
1986 #endif
1987 default:
1988 return -ENOSYS;
1992 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1994 CPUState *cpu;
1995 CPUArchState *env;
1996 int err = 0;
1998 if (kvm_enabled()) {
1999 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
2002 switch (type) {
2003 case GDB_BREAKPOINT_SW:
2004 case GDB_BREAKPOINT_HW:
2005 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2006 env = cpu->env_ptr;
2007 err = cpu_breakpoint_remove(env, addr, BP_GDB);
2008 if (err)
2009 break;
2011 return err;
2012 #ifndef CONFIG_USER_ONLY
2013 case GDB_WATCHPOINT_WRITE:
2014 case GDB_WATCHPOINT_READ:
2015 case GDB_WATCHPOINT_ACCESS:
2016 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2017 env = cpu->env_ptr;
2018 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
2019 if (err)
2020 break;
2022 return err;
2023 #endif
2024 default:
2025 return -ENOSYS;
2029 static void gdb_breakpoint_remove_all(void)
2031 CPUState *cpu;
2032 CPUArchState *env;
2034 if (kvm_enabled()) {
2035 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
2036 return;
2039 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2040 env = cpu->env_ptr;
2041 cpu_breakpoint_remove_all(env, BP_GDB);
2042 #ifndef CONFIG_USER_ONLY
2043 cpu_watchpoint_remove_all(env, BP_GDB);
2044 #endif
2048 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
2050 CPUState *cpu = s->c_cpu;
2051 CPUClass *cc = CPU_GET_CLASS(cpu);
2053 cpu_synchronize_state(cpu);
2054 if (cc->set_pc) {
2055 cc->set_pc(cpu, pc);
2059 static CPUState *find_cpu(uint32_t thread_id)
2061 CPUState *cpu;
2063 for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2064 if (cpu_index(cpu) == thread_id) {
2065 return cpu;
2069 return NULL;
2072 static int gdb_handle_packet(GDBState *s, const char *line_buf)
2074 #ifdef TARGET_XTENSA
2075 CPUArchState *env;
2076 #endif
2077 CPUState *cpu;
2078 const char *p;
2079 uint32_t thread;
2080 int ch, reg_size, type, res;
2081 char buf[MAX_PACKET_LENGTH];
2082 uint8_t mem_buf[MAX_PACKET_LENGTH];
2083 uint8_t *registers;
2084 target_ulong addr, len;
2086 #ifdef DEBUG_GDB
2087 printf("command='%s'\n", line_buf);
2088 #endif
2089 p = line_buf;
2090 ch = *p++;
2091 switch(ch) {
2092 case '?':
2093 /* TODO: Make this return the correct value for user-mode. */
2094 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
2095 cpu_index(s->c_cpu));
2096 put_packet(s, buf);
2097 /* Remove all the breakpoints when this query is issued,
2098 * because gdb is doing and initial connect and the state
2099 * should be cleaned up.
2101 gdb_breakpoint_remove_all();
2102 break;
2103 case 'c':
2104 if (*p != '\0') {
2105 addr = strtoull(p, (char **)&p, 16);
2106 gdb_set_cpu_pc(s, addr);
2108 s->signal = 0;
2109 gdb_continue(s);
2110 return RS_IDLE;
2111 case 'C':
2112 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
2113 if (s->signal == -1)
2114 s->signal = 0;
2115 gdb_continue(s);
2116 return RS_IDLE;
2117 case 'v':
2118 if (strncmp(p, "Cont", 4) == 0) {
2119 int res_signal, res_thread;
2121 p += 4;
2122 if (*p == '?') {
2123 put_packet(s, "vCont;c;C;s;S");
2124 break;
2126 res = 0;
2127 res_signal = 0;
2128 res_thread = 0;
2129 while (*p) {
2130 int action, signal;
2132 if (*p++ != ';') {
2133 res = 0;
2134 break;
2136 action = *p++;
2137 signal = 0;
2138 if (action == 'C' || action == 'S') {
2139 signal = strtoul(p, (char **)&p, 16);
2140 } else if (action != 'c' && action != 's') {
2141 res = 0;
2142 break;
2144 thread = 0;
2145 if (*p == ':') {
2146 thread = strtoull(p+1, (char **)&p, 16);
2148 action = tolower(action);
2149 if (res == 0 || (res == 'c' && action == 's')) {
2150 res = action;
2151 res_signal = signal;
2152 res_thread = thread;
2155 if (res) {
2156 if (res_thread != -1 && res_thread != 0) {
2157 cpu = find_cpu(res_thread);
2158 if (cpu == NULL) {
2159 put_packet(s, "E22");
2160 break;
2162 s->c_cpu = cpu;
2164 if (res == 's') {
2165 cpu_single_step(s->c_cpu, sstep_flags);
2167 s->signal = res_signal;
2168 gdb_continue(s);
2169 return RS_IDLE;
2171 break;
2172 } else {
2173 goto unknown_command;
2175 case 'k':
2176 #ifdef CONFIG_USER_ONLY
2177 /* Kill the target */
2178 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
2179 exit(0);
2180 #endif
2181 case 'D':
2182 /* Detach packet */
2183 gdb_breakpoint_remove_all();
2184 gdb_syscall_mode = GDB_SYS_DISABLED;
2185 gdb_continue(s);
2186 put_packet(s, "OK");
2187 break;
2188 case 's':
2189 if (*p != '\0') {
2190 addr = strtoull(p, (char **)&p, 16);
2191 gdb_set_cpu_pc(s, addr);
2193 cpu_single_step(s->c_cpu, sstep_flags);
2194 gdb_continue(s);
2195 return RS_IDLE;
2196 case 'F':
2198 target_ulong ret;
2199 target_ulong err;
2201 ret = strtoull(p, (char **)&p, 16);
2202 if (*p == ',') {
2203 p++;
2204 err = strtoull(p, (char **)&p, 16);
2205 } else {
2206 err = 0;
2208 if (*p == ',')
2209 p++;
2210 type = *p;
2211 if (s->current_syscall_cb) {
2212 s->current_syscall_cb(s->c_cpu, ret, err);
2213 s->current_syscall_cb = NULL;
2215 if (type == 'C') {
2216 put_packet(s, "T02");
2217 } else {
2218 gdb_continue(s);
2221 break;
2222 case 'g':
2223 cpu_synchronize_state(s->g_cpu);
2224 #ifdef TARGET_XTENSA
2225 env = s->g_cpu->env_ptr;
2226 #endif
2227 len = 0;
2228 for (addr = 0; addr < num_g_regs; addr++) {
2229 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
2230 len += reg_size;
2232 memtohex(buf, mem_buf, len);
2233 put_packet(s, buf);
2234 break;
2235 case 'G':
2236 cpu_synchronize_state(s->g_cpu);
2237 #ifdef TARGET_XTENSA
2238 env = s->g_cpu->env_ptr;
2239 #endif
2240 registers = mem_buf;
2241 len = strlen(p) / 2;
2242 hextomem((uint8_t *)registers, p, len);
2243 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
2244 reg_size = gdb_write_register(s->g_cpu, registers, addr);
2245 len -= reg_size;
2246 registers += reg_size;
2248 put_packet(s, "OK");
2249 break;
2250 case 'm':
2251 addr = strtoull(p, (char **)&p, 16);
2252 if (*p == ',')
2253 p++;
2254 len = strtoull(p, NULL, 16);
2255 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len, false) != 0) {
2256 put_packet (s, "E14");
2257 } else {
2258 memtohex(buf, mem_buf, len);
2259 put_packet(s, buf);
2261 break;
2262 case 'M':
2263 addr = strtoull(p, (char **)&p, 16);
2264 if (*p == ',')
2265 p++;
2266 len = strtoull(p, (char **)&p, 16);
2267 if (*p == ':')
2268 p++;
2269 hextomem(mem_buf, p, len);
2270 if (target_memory_rw_debug(s->g_cpu, addr, mem_buf, len,
2271 true) != 0) {
2272 put_packet(s, "E14");
2273 } else {
2274 put_packet(s, "OK");
2276 break;
2277 case 'p':
2278 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2279 This works, but can be very slow. Anything new enough to
2280 understand XML also knows how to use this properly. */
2281 if (!gdb_has_xml)
2282 goto unknown_command;
2283 addr = strtoull(p, (char **)&p, 16);
2284 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
2285 if (reg_size) {
2286 memtohex(buf, mem_buf, reg_size);
2287 put_packet(s, buf);
2288 } else {
2289 put_packet(s, "E14");
2291 break;
2292 case 'P':
2293 if (!gdb_has_xml)
2294 goto unknown_command;
2295 addr = strtoull(p, (char **)&p, 16);
2296 if (*p == '=')
2297 p++;
2298 reg_size = strlen(p) / 2;
2299 hextomem(mem_buf, p, reg_size);
2300 gdb_write_register(s->g_cpu, mem_buf, addr);
2301 put_packet(s, "OK");
2302 break;
2303 case 'Z':
2304 case 'z':
2305 type = strtoul(p, (char **)&p, 16);
2306 if (*p == ',')
2307 p++;
2308 addr = strtoull(p, (char **)&p, 16);
2309 if (*p == ',')
2310 p++;
2311 len = strtoull(p, (char **)&p, 16);
2312 if (ch == 'Z')
2313 res = gdb_breakpoint_insert(addr, len, type);
2314 else
2315 res = gdb_breakpoint_remove(addr, len, type);
2316 if (res >= 0)
2317 put_packet(s, "OK");
2318 else if (res == -ENOSYS)
2319 put_packet(s, "");
2320 else
2321 put_packet(s, "E22");
2322 break;
2323 case 'H':
2324 type = *p++;
2325 thread = strtoull(p, (char **)&p, 16);
2326 if (thread == -1 || thread == 0) {
2327 put_packet(s, "OK");
2328 break;
2330 cpu = find_cpu(thread);
2331 if (cpu == NULL) {
2332 put_packet(s, "E22");
2333 break;
2335 switch (type) {
2336 case 'c':
2337 s->c_cpu = cpu;
2338 put_packet(s, "OK");
2339 break;
2340 case 'g':
2341 s->g_cpu = cpu;
2342 put_packet(s, "OK");
2343 break;
2344 default:
2345 put_packet(s, "E22");
2346 break;
2348 break;
2349 case 'T':
2350 thread = strtoull(p, (char **)&p, 16);
2351 cpu = find_cpu(thread);
2353 if (cpu != NULL) {
2354 put_packet(s, "OK");
2355 } else {
2356 put_packet(s, "E22");
2358 break;
2359 case 'q':
2360 case 'Q':
2361 /* parse any 'q' packets here */
2362 if (!strcmp(p,"qemu.sstepbits")) {
2363 /* Query Breakpoint bit definitions */
2364 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2365 SSTEP_ENABLE,
2366 SSTEP_NOIRQ,
2367 SSTEP_NOTIMER);
2368 put_packet(s, buf);
2369 break;
2370 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2371 /* Display or change the sstep_flags */
2372 p += 10;
2373 if (*p != '=') {
2374 /* Display current setting */
2375 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2376 put_packet(s, buf);
2377 break;
2379 p++;
2380 type = strtoul(p, (char **)&p, 16);
2381 sstep_flags = type;
2382 put_packet(s, "OK");
2383 break;
2384 } else if (strcmp(p,"C") == 0) {
2385 /* "Current thread" remains vague in the spec, so always return
2386 * the first CPU (gdb returns the first thread). */
2387 put_packet(s, "QC1");
2388 break;
2389 } else if (strcmp(p,"fThreadInfo") == 0) {
2390 s->query_cpu = first_cpu;
2391 goto report_cpuinfo;
2392 } else if (strcmp(p,"sThreadInfo") == 0) {
2393 report_cpuinfo:
2394 if (s->query_cpu) {
2395 snprintf(buf, sizeof(buf), "m%x", cpu_index(s->query_cpu));
2396 put_packet(s, buf);
2397 s->query_cpu = s->query_cpu->next_cpu;
2398 } else
2399 put_packet(s, "l");
2400 break;
2401 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2402 thread = strtoull(p+16, (char **)&p, 16);
2403 cpu = find_cpu(thread);
2404 if (cpu != NULL) {
2405 cpu_synchronize_state(cpu);
2406 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2407 "CPU#%d [%s]", cpu->cpu_index,
2408 cpu->halted ? "halted " : "running");
2409 memtohex(buf, mem_buf, len);
2410 put_packet(s, buf);
2412 break;
2414 #ifdef CONFIG_USER_ONLY
2415 else if (strncmp(p, "Offsets", 7) == 0) {
2416 CPUArchState *env = s->c_cpu->env_ptr;
2417 TaskState *ts = env->opaque;
2419 snprintf(buf, sizeof(buf),
2420 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2421 ";Bss=" TARGET_ABI_FMT_lx,
2422 ts->info->code_offset,
2423 ts->info->data_offset,
2424 ts->info->data_offset);
2425 put_packet(s, buf);
2426 break;
2428 #else /* !CONFIG_USER_ONLY */
2429 else if (strncmp(p, "Rcmd,", 5) == 0) {
2430 int len = strlen(p + 5);
2432 if ((len % 2) != 0) {
2433 put_packet(s, "E01");
2434 break;
2436 hextomem(mem_buf, p + 5, len);
2437 len = len / 2;
2438 mem_buf[len++] = 0;
2439 qemu_chr_be_write(s->mon_chr, mem_buf, len);
2440 put_packet(s, "OK");
2441 break;
2443 #endif /* !CONFIG_USER_ONLY */
2444 if (strncmp(p, "Supported", 9) == 0) {
2445 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2446 #ifdef GDB_CORE_XML
2447 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2448 #endif
2449 put_packet(s, buf);
2450 break;
2452 #ifdef GDB_CORE_XML
2453 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2454 const char *xml;
2455 target_ulong total_len;
2457 gdb_has_xml = 1;
2458 p += 19;
2459 xml = get_feature_xml(p, &p);
2460 if (!xml) {
2461 snprintf(buf, sizeof(buf), "E00");
2462 put_packet(s, buf);
2463 break;
2466 if (*p == ':')
2467 p++;
2468 addr = strtoul(p, (char **)&p, 16);
2469 if (*p == ',')
2470 p++;
2471 len = strtoul(p, (char **)&p, 16);
2473 total_len = strlen(xml);
2474 if (addr > total_len) {
2475 snprintf(buf, sizeof(buf), "E00");
2476 put_packet(s, buf);
2477 break;
2479 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2480 len = (MAX_PACKET_LENGTH - 5) / 2;
2481 if (len < total_len - addr) {
2482 buf[0] = 'm';
2483 len = memtox(buf + 1, xml + addr, len);
2484 } else {
2485 buf[0] = 'l';
2486 len = memtox(buf + 1, xml + addr, total_len - addr);
2488 put_packet_binary(s, buf, len + 1);
2489 break;
2491 #endif
2492 /* Unrecognised 'q' command. */
2493 goto unknown_command;
2495 default:
2496 unknown_command:
2497 /* put empty packet */
2498 buf[0] = '\0';
2499 put_packet(s, buf);
2500 break;
2502 return RS_IDLE;
2505 void gdb_set_stop_cpu(CPUState *cpu)
2507 gdbserver_state->c_cpu = cpu;
2508 gdbserver_state->g_cpu = cpu;
2511 #ifndef CONFIG_USER_ONLY
2512 static void gdb_vm_state_change(void *opaque, int running, RunState state)
2514 GDBState *s = gdbserver_state;
2515 CPUArchState *env = s->c_cpu->env_ptr;
2516 CPUState *cpu = s->c_cpu;
2517 char buf[256];
2518 const char *type;
2519 int ret;
2521 if (running || s->state == RS_INACTIVE) {
2522 return;
2524 /* Is there a GDB syscall waiting to be sent? */
2525 if (s->current_syscall_cb) {
2526 put_packet(s, s->syscall_buf);
2527 return;
2529 switch (state) {
2530 case RUN_STATE_DEBUG:
2531 if (env->watchpoint_hit) {
2532 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2533 case BP_MEM_READ:
2534 type = "r";
2535 break;
2536 case BP_MEM_ACCESS:
2537 type = "a";
2538 break;
2539 default:
2540 type = "";
2541 break;
2543 snprintf(buf, sizeof(buf),
2544 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2545 GDB_SIGNAL_TRAP, cpu_index(cpu), type,
2546 env->watchpoint_hit->vaddr);
2547 env->watchpoint_hit = NULL;
2548 goto send_packet;
2550 tb_flush(env);
2551 ret = GDB_SIGNAL_TRAP;
2552 break;
2553 case RUN_STATE_PAUSED:
2554 ret = GDB_SIGNAL_INT;
2555 break;
2556 case RUN_STATE_SHUTDOWN:
2557 ret = GDB_SIGNAL_QUIT;
2558 break;
2559 case RUN_STATE_IO_ERROR:
2560 ret = GDB_SIGNAL_IO;
2561 break;
2562 case RUN_STATE_WATCHDOG:
2563 ret = GDB_SIGNAL_ALRM;
2564 break;
2565 case RUN_STATE_INTERNAL_ERROR:
2566 ret = GDB_SIGNAL_ABRT;
2567 break;
2568 case RUN_STATE_SAVE_VM:
2569 case RUN_STATE_RESTORE_VM:
2570 return;
2571 case RUN_STATE_FINISH_MIGRATE:
2572 ret = GDB_SIGNAL_XCPU;
2573 break;
2574 default:
2575 ret = GDB_SIGNAL_UNKNOWN;
2576 break;
2578 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, cpu_index(cpu));
2580 send_packet:
2581 put_packet(s, buf);
2583 /* disable single step if it was enabled */
2584 cpu_single_step(cpu, 0);
2586 #endif
2588 /* Send a gdb syscall request.
2589 This accepts limited printf-style format specifiers, specifically:
2590 %x - target_ulong argument printed in hex.
2591 %lx - 64-bit argument printed in hex.
2592 %s - string pointer (target_ulong) and length (int) pair. */
2593 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2595 va_list va;
2596 char *p;
2597 char *p_end;
2598 target_ulong addr;
2599 uint64_t i64;
2600 GDBState *s;
2602 s = gdbserver_state;
2603 if (!s)
2604 return;
2605 s->current_syscall_cb = cb;
2606 #ifndef CONFIG_USER_ONLY
2607 vm_stop(RUN_STATE_DEBUG);
2608 #endif
2609 va_start(va, fmt);
2610 p = s->syscall_buf;
2611 p_end = &s->syscall_buf[sizeof(s->syscall_buf)];
2612 *(p++) = 'F';
2613 while (*fmt) {
2614 if (*fmt == '%') {
2615 fmt++;
2616 switch (*fmt++) {
2617 case 'x':
2618 addr = va_arg(va, target_ulong);
2619 p += snprintf(p, p_end - p, TARGET_FMT_lx, addr);
2620 break;
2621 case 'l':
2622 if (*(fmt++) != 'x')
2623 goto bad_format;
2624 i64 = va_arg(va, uint64_t);
2625 p += snprintf(p, p_end - p, "%" PRIx64, i64);
2626 break;
2627 case 's':
2628 addr = va_arg(va, target_ulong);
2629 p += snprintf(p, p_end - p, TARGET_FMT_lx "/%x",
2630 addr, va_arg(va, int));
2631 break;
2632 default:
2633 bad_format:
2634 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2635 fmt - 1);
2636 break;
2638 } else {
2639 *(p++) = *(fmt++);
2642 *p = 0;
2643 va_end(va);
2644 #ifdef CONFIG_USER_ONLY
2645 put_packet(s, s->syscall_buf);
2646 gdb_handlesig(s->c_cpu, 0);
2647 #else
2648 /* In this case wait to send the syscall packet until notification that
2649 the CPU has stopped. This must be done because if the packet is sent
2650 now the reply from the syscall request could be received while the CPU
2651 is still in the running state, which can cause packets to be dropped
2652 and state transition 'T' packets to be sent while the syscall is still
2653 being processed. */
2654 cpu_exit(s->c_cpu);
2655 #endif
2658 static void gdb_read_byte(GDBState *s, int ch)
2660 int i, csum;
2661 uint8_t reply;
2663 #ifndef CONFIG_USER_ONLY
2664 if (s->last_packet_len) {
2665 /* Waiting for a response to the last packet. If we see the start
2666 of a new command then abandon the previous response. */
2667 if (ch == '-') {
2668 #ifdef DEBUG_GDB
2669 printf("Got NACK, retransmitting\n");
2670 #endif
2671 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2673 #ifdef DEBUG_GDB
2674 else if (ch == '+')
2675 printf("Got ACK\n");
2676 else
2677 printf("Got '%c' when expecting ACK/NACK\n", ch);
2678 #endif
2679 if (ch == '+' || ch == '$')
2680 s->last_packet_len = 0;
2681 if (ch != '$')
2682 return;
2684 if (runstate_is_running()) {
2685 /* when the CPU is running, we cannot do anything except stop
2686 it when receiving a char */
2687 vm_stop(RUN_STATE_PAUSED);
2688 } else
2689 #endif
2691 switch(s->state) {
2692 case RS_IDLE:
2693 if (ch == '$') {
2694 s->line_buf_index = 0;
2695 s->state = RS_GETLINE;
2697 break;
2698 case RS_GETLINE:
2699 if (ch == '#') {
2700 s->state = RS_CHKSUM1;
2701 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2702 s->state = RS_IDLE;
2703 } else {
2704 s->line_buf[s->line_buf_index++] = ch;
2706 break;
2707 case RS_CHKSUM1:
2708 s->line_buf[s->line_buf_index] = '\0';
2709 s->line_csum = fromhex(ch) << 4;
2710 s->state = RS_CHKSUM2;
2711 break;
2712 case RS_CHKSUM2:
2713 s->line_csum |= fromhex(ch);
2714 csum = 0;
2715 for(i = 0; i < s->line_buf_index; i++) {
2716 csum += s->line_buf[i];
2718 if (s->line_csum != (csum & 0xff)) {
2719 reply = '-';
2720 put_buffer(s, &reply, 1);
2721 s->state = RS_IDLE;
2722 } else {
2723 reply = '+';
2724 put_buffer(s, &reply, 1);
2725 s->state = gdb_handle_packet(s, s->line_buf);
2727 break;
2728 default:
2729 abort();
2734 /* Tell the remote gdb that the process has exited. */
2735 void gdb_exit(CPUArchState *env, int code)
2737 GDBState *s;
2738 char buf[4];
2740 s = gdbserver_state;
2741 if (!s) {
2742 return;
2744 #ifdef CONFIG_USER_ONLY
2745 if (gdbserver_fd < 0 || s->fd < 0) {
2746 return;
2748 #endif
2750 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2751 put_packet(s, buf);
2753 #ifndef CONFIG_USER_ONLY
2754 if (s->chr) {
2755 qemu_chr_delete(s->chr);
2757 #endif
2760 #ifdef CONFIG_USER_ONLY
2762 gdb_queuesig (void)
2764 GDBState *s;
2766 s = gdbserver_state;
2768 if (gdbserver_fd < 0 || s->fd < 0)
2769 return 0;
2770 else
2771 return 1;
2775 gdb_handlesig(CPUState *cpu, int sig)
2777 CPUArchState *env = cpu->env_ptr;
2778 GDBState *s;
2779 char buf[256];
2780 int n;
2782 s = gdbserver_state;
2783 if (gdbserver_fd < 0 || s->fd < 0) {
2784 return sig;
2787 /* disable single step if it was enabled */
2788 cpu_single_step(cpu, 0);
2789 tb_flush(env);
2791 if (sig != 0) {
2792 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb(sig));
2793 put_packet(s, buf);
2795 /* put_packet() might have detected that the peer terminated the
2796 connection. */
2797 if (s->fd < 0) {
2798 return sig;
2801 sig = 0;
2802 s->state = RS_IDLE;
2803 s->running_state = 0;
2804 while (s->running_state == 0) {
2805 n = read(s->fd, buf, 256);
2806 if (n > 0) {
2807 int i;
2809 for (i = 0; i < n; i++) {
2810 gdb_read_byte(s, buf[i]);
2812 } else if (n == 0 || errno != EAGAIN) {
2813 /* XXX: Connection closed. Should probably wait for another
2814 connection before continuing. */
2815 return sig;
2818 sig = s->signal;
2819 s->signal = 0;
2820 return sig;
2823 /* Tell the remote gdb that the process has exited due to SIG. */
2824 void gdb_signalled(CPUArchState *env, int sig)
2826 GDBState *s;
2827 char buf[4];
2829 s = gdbserver_state;
2830 if (gdbserver_fd < 0 || s->fd < 0) {
2831 return;
2834 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb(sig));
2835 put_packet(s, buf);
2838 static void gdb_accept(void)
2840 GDBState *s;
2841 struct sockaddr_in sockaddr;
2842 socklen_t len;
2843 int fd;
2845 for(;;) {
2846 len = sizeof(sockaddr);
2847 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2848 if (fd < 0 && errno != EINTR) {
2849 perror("accept");
2850 return;
2851 } else if (fd >= 0) {
2852 #ifndef _WIN32
2853 fcntl(fd, F_SETFD, FD_CLOEXEC);
2854 #endif
2855 break;
2859 /* set short latency */
2860 socket_set_nodelay(fd);
2862 s = g_malloc0(sizeof(GDBState));
2863 s->c_cpu = first_cpu;
2864 s->g_cpu = first_cpu;
2865 s->fd = fd;
2866 gdb_has_xml = 0;
2868 gdbserver_state = s;
2870 fcntl(fd, F_SETFL, O_NONBLOCK);
2873 static int gdbserver_open(int port)
2875 struct sockaddr_in sockaddr;
2876 int fd, val, ret;
2878 fd = socket(PF_INET, SOCK_STREAM, 0);
2879 if (fd < 0) {
2880 perror("socket");
2881 return -1;
2883 #ifndef _WIN32
2884 fcntl(fd, F_SETFD, FD_CLOEXEC);
2885 #endif
2887 /* allow fast reuse */
2888 val = 1;
2889 qemu_setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
2891 sockaddr.sin_family = AF_INET;
2892 sockaddr.sin_port = htons(port);
2893 sockaddr.sin_addr.s_addr = 0;
2894 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2895 if (ret < 0) {
2896 perror("bind");
2897 close(fd);
2898 return -1;
2900 ret = listen(fd, 0);
2901 if (ret < 0) {
2902 perror("listen");
2903 close(fd);
2904 return -1;
2906 return fd;
2909 int gdbserver_start(int port)
2911 gdbserver_fd = gdbserver_open(port);
2912 if (gdbserver_fd < 0)
2913 return -1;
2914 /* accept connections */
2915 gdb_accept();
2916 return 0;
2919 /* Disable gdb stub for child processes. */
2920 void gdbserver_fork(CPUArchState *env)
2922 GDBState *s = gdbserver_state;
2923 if (gdbserver_fd < 0 || s->fd < 0)
2924 return;
2925 close(s->fd);
2926 s->fd = -1;
2927 cpu_breakpoint_remove_all(env, BP_GDB);
2928 cpu_watchpoint_remove_all(env, BP_GDB);
2930 #else
2931 static int gdb_chr_can_receive(void *opaque)
2933 /* We can handle an arbitrarily large amount of data.
2934 Pick the maximum packet size, which is as good as anything. */
2935 return MAX_PACKET_LENGTH;
2938 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2940 int i;
2942 for (i = 0; i < size; i++) {
2943 gdb_read_byte(gdbserver_state, buf[i]);
2947 static void gdb_chr_event(void *opaque, int event)
2949 switch (event) {
2950 case CHR_EVENT_OPENED:
2951 vm_stop(RUN_STATE_PAUSED);
2952 gdb_has_xml = 0;
2953 break;
2954 default:
2955 break;
2959 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2961 char buf[MAX_PACKET_LENGTH];
2963 buf[0] = 'O';
2964 if (len > (MAX_PACKET_LENGTH/2) - 1)
2965 len = (MAX_PACKET_LENGTH/2) - 1;
2966 memtohex(buf + 1, (uint8_t *)msg, len);
2967 put_packet(s, buf);
2970 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2972 const char *p = (const char *)buf;
2973 int max_sz;
2975 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2976 for (;;) {
2977 if (len <= max_sz) {
2978 gdb_monitor_output(gdbserver_state, p, len);
2979 break;
2981 gdb_monitor_output(gdbserver_state, p, max_sz);
2982 p += max_sz;
2983 len -= max_sz;
2985 return len;
2988 #ifndef _WIN32
2989 static void gdb_sigterm_handler(int signal)
2991 if (runstate_is_running()) {
2992 vm_stop(RUN_STATE_PAUSED);
2995 #endif
2997 int gdbserver_start(const char *device)
2999 GDBState *s;
3000 char gdbstub_device_name[128];
3001 CharDriverState *chr = NULL;
3002 CharDriverState *mon_chr;
3004 if (!device)
3005 return -1;
3006 if (strcmp(device, "none") != 0) {
3007 if (strstart(device, "tcp:", NULL)) {
3008 /* enforce required TCP attributes */
3009 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
3010 "%s,nowait,nodelay,server", device);
3011 device = gdbstub_device_name;
3013 #ifndef _WIN32
3014 else if (strcmp(device, "stdio") == 0) {
3015 struct sigaction act;
3017 memset(&act, 0, sizeof(act));
3018 act.sa_handler = gdb_sigterm_handler;
3019 sigaction(SIGINT, &act, NULL);
3021 #endif
3022 chr = qemu_chr_new("gdb", device, NULL);
3023 if (!chr)
3024 return -1;
3026 qemu_chr_fe_claim_no_fail(chr);
3027 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
3028 gdb_chr_event, NULL);
3031 s = gdbserver_state;
3032 if (!s) {
3033 s = g_malloc0(sizeof(GDBState));
3034 gdbserver_state = s;
3036 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
3038 /* Initialize a monitor terminal for gdb */
3039 mon_chr = g_malloc0(sizeof(*mon_chr));
3040 mon_chr->chr_write = gdb_monitor_write;
3041 monitor_init(mon_chr, 0);
3042 } else {
3043 if (s->chr)
3044 qemu_chr_delete(s->chr);
3045 mon_chr = s->mon_chr;
3046 memset(s, 0, sizeof(GDBState));
3048 s->c_cpu = first_cpu;
3049 s->g_cpu = first_cpu;
3050 s->chr = chr;
3051 s->state = chr ? RS_IDLE : RS_INACTIVE;
3052 s->mon_chr = mon_chr;
3053 s->current_syscall_cb = NULL;
3055 return 0;
3057 #endif