scsi-disk: return CHECK CONDITION for unknown page codes in the MODE SENSE command
[qemu.git] / gdbstub.c
blob2b03ef2aa0478866456a07c03554cfed77ed9835
1 /*
2 * gdb server stub
4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "config.h"
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
22 #include <stdlib.h>
23 #include <stdio.h>
24 #include <stdarg.h>
25 #include <string.h>
26 #include <errno.h>
27 #include <unistd.h>
28 #include <fcntl.h>
30 #include "qemu.h"
31 #else
32 #include "monitor.h"
33 #include "qemu-char.h"
34 #include "sysemu.h"
35 #include "gdbstub.h"
36 #endif
38 #define MAX_PACKET_LENGTH 4096
40 #include "exec-all.h"
41 #include "qemu_socket.h"
42 #include "kvm.h"
45 enum {
46 GDB_SIGNAL_0 = 0,
47 GDB_SIGNAL_INT = 2,
48 GDB_SIGNAL_TRAP = 5,
49 GDB_SIGNAL_UNKNOWN = 143
52 #ifdef CONFIG_USER_ONLY
54 /* Map target signal numbers to GDB protocol signal numbers and vice
55 * versa. For user emulation's currently supported systems, we can
56 * assume most signals are defined.
59 static int gdb_signal_table[] = {
61 TARGET_SIGHUP,
62 TARGET_SIGINT,
63 TARGET_SIGQUIT,
64 TARGET_SIGILL,
65 TARGET_SIGTRAP,
66 TARGET_SIGABRT,
67 -1, /* SIGEMT */
68 TARGET_SIGFPE,
69 TARGET_SIGKILL,
70 TARGET_SIGBUS,
71 TARGET_SIGSEGV,
72 TARGET_SIGSYS,
73 TARGET_SIGPIPE,
74 TARGET_SIGALRM,
75 TARGET_SIGTERM,
76 TARGET_SIGURG,
77 TARGET_SIGSTOP,
78 TARGET_SIGTSTP,
79 TARGET_SIGCONT,
80 TARGET_SIGCHLD,
81 TARGET_SIGTTIN,
82 TARGET_SIGTTOU,
83 TARGET_SIGIO,
84 TARGET_SIGXCPU,
85 TARGET_SIGXFSZ,
86 TARGET_SIGVTALRM,
87 TARGET_SIGPROF,
88 TARGET_SIGWINCH,
89 -1, /* SIGLOST */
90 TARGET_SIGUSR1,
91 TARGET_SIGUSR2,
92 #ifdef TARGET_SIGPWR
93 TARGET_SIGPWR,
94 #else
95 -1,
96 #endif
97 -1, /* SIGPOLL */
98 -1,
99 -1,
109 #ifdef __SIGRTMIN
110 __SIGRTMIN + 1,
111 __SIGRTMIN + 2,
112 __SIGRTMIN + 3,
113 __SIGRTMIN + 4,
114 __SIGRTMIN + 5,
115 __SIGRTMIN + 6,
116 __SIGRTMIN + 7,
117 __SIGRTMIN + 8,
118 __SIGRTMIN + 9,
119 __SIGRTMIN + 10,
120 __SIGRTMIN + 11,
121 __SIGRTMIN + 12,
122 __SIGRTMIN + 13,
123 __SIGRTMIN + 14,
124 __SIGRTMIN + 15,
125 __SIGRTMIN + 16,
126 __SIGRTMIN + 17,
127 __SIGRTMIN + 18,
128 __SIGRTMIN + 19,
129 __SIGRTMIN + 20,
130 __SIGRTMIN + 21,
131 __SIGRTMIN + 22,
132 __SIGRTMIN + 23,
133 __SIGRTMIN + 24,
134 __SIGRTMIN + 25,
135 __SIGRTMIN + 26,
136 __SIGRTMIN + 27,
137 __SIGRTMIN + 28,
138 __SIGRTMIN + 29,
139 __SIGRTMIN + 30,
140 __SIGRTMIN + 31,
141 -1, /* SIGCANCEL */
142 __SIGRTMIN,
143 __SIGRTMIN + 32,
144 __SIGRTMIN + 33,
145 __SIGRTMIN + 34,
146 __SIGRTMIN + 35,
147 __SIGRTMIN + 36,
148 __SIGRTMIN + 37,
149 __SIGRTMIN + 38,
150 __SIGRTMIN + 39,
151 __SIGRTMIN + 40,
152 __SIGRTMIN + 41,
153 __SIGRTMIN + 42,
154 __SIGRTMIN + 43,
155 __SIGRTMIN + 44,
156 __SIGRTMIN + 45,
157 __SIGRTMIN + 46,
158 __SIGRTMIN + 47,
159 __SIGRTMIN + 48,
160 __SIGRTMIN + 49,
161 __SIGRTMIN + 50,
162 __SIGRTMIN + 51,
163 __SIGRTMIN + 52,
164 __SIGRTMIN + 53,
165 __SIGRTMIN + 54,
166 __SIGRTMIN + 55,
167 __SIGRTMIN + 56,
168 __SIGRTMIN + 57,
169 __SIGRTMIN + 58,
170 __SIGRTMIN + 59,
171 __SIGRTMIN + 60,
172 __SIGRTMIN + 61,
173 __SIGRTMIN + 62,
174 __SIGRTMIN + 63,
175 __SIGRTMIN + 64,
176 __SIGRTMIN + 65,
177 __SIGRTMIN + 66,
178 __SIGRTMIN + 67,
179 __SIGRTMIN + 68,
180 __SIGRTMIN + 69,
181 __SIGRTMIN + 70,
182 __SIGRTMIN + 71,
183 __SIGRTMIN + 72,
184 __SIGRTMIN + 73,
185 __SIGRTMIN + 74,
186 __SIGRTMIN + 75,
187 __SIGRTMIN + 76,
188 __SIGRTMIN + 77,
189 __SIGRTMIN + 78,
190 __SIGRTMIN + 79,
191 __SIGRTMIN + 80,
192 __SIGRTMIN + 81,
193 __SIGRTMIN + 82,
194 __SIGRTMIN + 83,
195 __SIGRTMIN + 84,
196 __SIGRTMIN + 85,
197 __SIGRTMIN + 86,
198 __SIGRTMIN + 87,
199 __SIGRTMIN + 88,
200 __SIGRTMIN + 89,
201 __SIGRTMIN + 90,
202 __SIGRTMIN + 91,
203 __SIGRTMIN + 92,
204 __SIGRTMIN + 93,
205 __SIGRTMIN + 94,
206 __SIGRTMIN + 95,
207 -1, /* SIGINFO */
208 -1, /* UNKNOWN */
209 -1, /* DEFAULT */
216 #endif
218 #else
219 /* In system mode we only need SIGINT and SIGTRAP; other signals
220 are not yet supported. */
222 enum {
223 TARGET_SIGINT = 2,
224 TARGET_SIGTRAP = 5
227 static int gdb_signal_table[] = {
230 TARGET_SIGINT,
233 TARGET_SIGTRAP
235 #endif
237 #ifdef CONFIG_USER_ONLY
238 static int target_signal_to_gdb (int sig)
240 int i;
241 for (i = 0; i < ARRAY_SIZE (gdb_signal_table); i++)
242 if (gdb_signal_table[i] == sig)
243 return i;
244 return GDB_SIGNAL_UNKNOWN;
246 #endif
248 static int gdb_signal_to_target (int sig)
250 if (sig < ARRAY_SIZE (gdb_signal_table))
251 return gdb_signal_table[sig];
252 else
253 return -1;
256 //#define DEBUG_GDB
258 typedef struct GDBRegisterState {
259 int base_reg;
260 int num_regs;
261 gdb_reg_cb get_reg;
262 gdb_reg_cb set_reg;
263 const char *xml;
264 struct GDBRegisterState *next;
265 } GDBRegisterState;
267 enum RSState {
268 RS_INACTIVE,
269 RS_IDLE,
270 RS_GETLINE,
271 RS_CHKSUM1,
272 RS_CHKSUM2,
273 RS_SYSCALL,
275 typedef struct GDBState {
276 CPUState *c_cpu; /* current CPU for step/continue ops */
277 CPUState *g_cpu; /* current CPU for other ops */
278 CPUState *query_cpu; /* for q{f|s}ThreadInfo */
279 enum RSState state; /* parsing state */
280 char line_buf[MAX_PACKET_LENGTH];
281 int line_buf_index;
282 int line_csum;
283 uint8_t last_packet[MAX_PACKET_LENGTH + 4];
284 int last_packet_len;
285 int signal;
286 #ifdef CONFIG_USER_ONLY
287 int fd;
288 int running_state;
289 #else
290 CharDriverState *chr;
291 CharDriverState *mon_chr;
292 #endif
293 } GDBState;
295 /* By default use no IRQs and no timers while single stepping so as to
296 * make single stepping like an ICE HW step.
298 static int sstep_flags = SSTEP_ENABLE|SSTEP_NOIRQ|SSTEP_NOTIMER;
300 static GDBState *gdbserver_state;
302 /* This is an ugly hack to cope with both new and old gdb.
303 If gdb sends qXfer:features:read then assume we're talking to a newish
304 gdb that understands target descriptions. */
305 static int gdb_has_xml;
307 #ifdef CONFIG_USER_ONLY
308 /* XXX: This is not thread safe. Do we care? */
309 static int gdbserver_fd = -1;
311 static int get_char(GDBState *s)
313 uint8_t ch;
314 int ret;
316 for(;;) {
317 ret = recv(s->fd, &ch, 1, 0);
318 if (ret < 0) {
319 if (errno == ECONNRESET)
320 s->fd = -1;
321 if (errno != EINTR && errno != EAGAIN)
322 return -1;
323 } else if (ret == 0) {
324 close(s->fd);
325 s->fd = -1;
326 return -1;
327 } else {
328 break;
331 return ch;
333 #endif
335 static gdb_syscall_complete_cb gdb_current_syscall_cb;
337 static enum {
338 GDB_SYS_UNKNOWN,
339 GDB_SYS_ENABLED,
340 GDB_SYS_DISABLED,
341 } gdb_syscall_mode;
343 /* If gdb is connected when the first semihosting syscall occurs then use
344 remote gdb syscalls. Otherwise use native file IO. */
345 int use_gdb_syscalls(void)
347 if (gdb_syscall_mode == GDB_SYS_UNKNOWN) {
348 gdb_syscall_mode = (gdbserver_state ? GDB_SYS_ENABLED
349 : GDB_SYS_DISABLED);
351 return gdb_syscall_mode == GDB_SYS_ENABLED;
354 /* Resume execution. */
355 static inline void gdb_continue(GDBState *s)
357 #ifdef CONFIG_USER_ONLY
358 s->running_state = 1;
359 #else
360 vm_start();
361 #endif
364 static void put_buffer(GDBState *s, const uint8_t *buf, int len)
366 #ifdef CONFIG_USER_ONLY
367 int ret;
369 while (len > 0) {
370 ret = send(s->fd, buf, len, 0);
371 if (ret < 0) {
372 if (errno != EINTR && errno != EAGAIN)
373 return;
374 } else {
375 buf += ret;
376 len -= ret;
379 #else
380 qemu_chr_write(s->chr, buf, len);
381 #endif
384 static inline int fromhex(int v)
386 if (v >= '0' && v <= '9')
387 return v - '0';
388 else if (v >= 'A' && v <= 'F')
389 return v - 'A' + 10;
390 else if (v >= 'a' && v <= 'f')
391 return v - 'a' + 10;
392 else
393 return 0;
396 static inline int tohex(int v)
398 if (v < 10)
399 return v + '0';
400 else
401 return v - 10 + 'a';
404 static void memtohex(char *buf, const uint8_t *mem, int len)
406 int i, c;
407 char *q;
408 q = buf;
409 for(i = 0; i < len; i++) {
410 c = mem[i];
411 *q++ = tohex(c >> 4);
412 *q++ = tohex(c & 0xf);
414 *q = '\0';
417 static void hextomem(uint8_t *mem, const char *buf, int len)
419 int i;
421 for(i = 0; i < len; i++) {
422 mem[i] = (fromhex(buf[0]) << 4) | fromhex(buf[1]);
423 buf += 2;
427 /* return -1 if error, 0 if OK */
428 static int put_packet_binary(GDBState *s, const char *buf, int len)
430 int csum, i;
431 uint8_t *p;
433 for(;;) {
434 p = s->last_packet;
435 *(p++) = '$';
436 memcpy(p, buf, len);
437 p += len;
438 csum = 0;
439 for(i = 0; i < len; i++) {
440 csum += buf[i];
442 *(p++) = '#';
443 *(p++) = tohex((csum >> 4) & 0xf);
444 *(p++) = tohex((csum) & 0xf);
446 s->last_packet_len = p - s->last_packet;
447 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
449 #ifdef CONFIG_USER_ONLY
450 i = get_char(s);
451 if (i < 0)
452 return -1;
453 if (i == '+')
454 break;
455 #else
456 break;
457 #endif
459 return 0;
462 /* return -1 if error, 0 if OK */
463 static int put_packet(GDBState *s, const char *buf)
465 #ifdef DEBUG_GDB
466 printf("reply='%s'\n", buf);
467 #endif
469 return put_packet_binary(s, buf, strlen(buf));
472 /* The GDB remote protocol transfers values in target byte order. This means
473 we can use the raw memory access routines to access the value buffer.
474 Conveniently, these also handle the case where the buffer is mis-aligned.
476 #define GET_REG8(val) do { \
477 stb_p(mem_buf, val); \
478 return 1; \
479 } while(0)
480 #define GET_REG16(val) do { \
481 stw_p(mem_buf, val); \
482 return 2; \
483 } while(0)
484 #define GET_REG32(val) do { \
485 stl_p(mem_buf, val); \
486 return 4; \
487 } while(0)
488 #define GET_REG64(val) do { \
489 stq_p(mem_buf, val); \
490 return 8; \
491 } while(0)
493 #if TARGET_LONG_BITS == 64
494 #define GET_REGL(val) GET_REG64(val)
495 #define ldtul_p(addr) ldq_p(addr)
496 #else
497 #define GET_REGL(val) GET_REG32(val)
498 #define ldtul_p(addr) ldl_p(addr)
499 #endif
501 #if defined(TARGET_I386)
503 #ifdef TARGET_X86_64
504 static const int gpr_map[16] = {
505 R_EAX, R_EBX, R_ECX, R_EDX, R_ESI, R_EDI, R_EBP, R_ESP,
506 8, 9, 10, 11, 12, 13, 14, 15
508 #else
509 #define gpr_map gpr_map32
510 #endif
511 static const int gpr_map32[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
513 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
515 #define IDX_IP_REG CPU_NB_REGS
516 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
517 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
518 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
519 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
520 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
522 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
524 if (n < CPU_NB_REGS) {
525 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
526 GET_REG64(env->regs[gpr_map[n]]);
527 } else if (n < CPU_NB_REGS32) {
528 GET_REG32(env->regs[gpr_map32[n]]);
530 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
531 #ifdef USE_X86LDOUBLE
532 /* FIXME: byteswap float values - after fixing fpregs layout. */
533 memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10);
534 #else
535 memset(mem_buf, 0, 10);
536 #endif
537 return 10;
538 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
539 n -= IDX_XMM_REGS;
540 if (n < CPU_NB_REGS32 ||
541 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
542 stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0));
543 stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1));
544 return 16;
546 } else {
547 switch (n) {
548 case IDX_IP_REG:
549 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
550 GET_REG64(env->eip);
551 } else {
552 GET_REG32(env->eip);
554 case IDX_FLAGS_REG: GET_REG32(env->eflags);
556 case IDX_SEG_REGS: GET_REG32(env->segs[R_CS].selector);
557 case IDX_SEG_REGS + 1: GET_REG32(env->segs[R_SS].selector);
558 case IDX_SEG_REGS + 2: GET_REG32(env->segs[R_DS].selector);
559 case IDX_SEG_REGS + 3: GET_REG32(env->segs[R_ES].selector);
560 case IDX_SEG_REGS + 4: GET_REG32(env->segs[R_FS].selector);
561 case IDX_SEG_REGS + 5: GET_REG32(env->segs[R_GS].selector);
563 case IDX_FP_REGS + 8: GET_REG32(env->fpuc);
564 case IDX_FP_REGS + 9: GET_REG32((env->fpus & ~0x3800) |
565 (env->fpstt & 0x7) << 11);
566 case IDX_FP_REGS + 10: GET_REG32(0); /* ftag */
567 case IDX_FP_REGS + 11: GET_REG32(0); /* fiseg */
568 case IDX_FP_REGS + 12: GET_REG32(0); /* fioff */
569 case IDX_FP_REGS + 13: GET_REG32(0); /* foseg */
570 case IDX_FP_REGS + 14: GET_REG32(0); /* fooff */
571 case IDX_FP_REGS + 15: GET_REG32(0); /* fop */
573 case IDX_MXCSR_REG: GET_REG32(env->mxcsr);
576 return 0;
579 static int cpu_x86_gdb_load_seg(CPUState *env, int sreg, uint8_t *mem_buf)
581 uint16_t selector = ldl_p(mem_buf);
583 if (selector != env->segs[sreg].selector) {
584 #if defined(CONFIG_USER_ONLY)
585 cpu_x86_load_seg(env, sreg, selector);
586 #else
587 unsigned int limit, flags;
588 target_ulong base;
590 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
591 base = selector << 4;
592 limit = 0xffff;
593 flags = 0;
594 } else {
595 if (!cpu_x86_get_descr_debug(env, selector, &base, &limit, &flags))
596 return 4;
598 cpu_x86_load_seg_cache(env, sreg, selector, base, limit, flags);
599 #endif
601 return 4;
604 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
606 uint32_t tmp;
608 if (n < CPU_NB_REGS) {
609 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
610 env->regs[gpr_map[n]] = ldtul_p(mem_buf);
611 return sizeof(target_ulong);
612 } else if (n < CPU_NB_REGS32) {
613 n = gpr_map32[n];
614 env->regs[n] &= ~0xffffffffUL;
615 env->regs[n] |= (uint32_t)ldl_p(mem_buf);
616 return 4;
618 } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) {
619 #ifdef USE_X86LDOUBLE
620 /* FIXME: byteswap float values - after fixing fpregs layout. */
621 memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10);
622 #endif
623 return 10;
624 } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) {
625 n -= IDX_XMM_REGS;
626 if (n < CPU_NB_REGS32 ||
627 (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) {
628 env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf);
629 env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8);
630 return 16;
632 } else {
633 switch (n) {
634 case IDX_IP_REG:
635 if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) {
636 env->eip = ldq_p(mem_buf);
637 return 8;
638 } else {
639 env->eip &= ~0xffffffffUL;
640 env->eip |= (uint32_t)ldl_p(mem_buf);
641 return 4;
643 case IDX_FLAGS_REG:
644 env->eflags = ldl_p(mem_buf);
645 return 4;
647 case IDX_SEG_REGS: return cpu_x86_gdb_load_seg(env, R_CS, mem_buf);
648 case IDX_SEG_REGS + 1: return cpu_x86_gdb_load_seg(env, R_SS, mem_buf);
649 case IDX_SEG_REGS + 2: return cpu_x86_gdb_load_seg(env, R_DS, mem_buf);
650 case IDX_SEG_REGS + 3: return cpu_x86_gdb_load_seg(env, R_ES, mem_buf);
651 case IDX_SEG_REGS + 4: return cpu_x86_gdb_load_seg(env, R_FS, mem_buf);
652 case IDX_SEG_REGS + 5: return cpu_x86_gdb_load_seg(env, R_GS, mem_buf);
654 case IDX_FP_REGS + 8:
655 env->fpuc = ldl_p(mem_buf);
656 return 4;
657 case IDX_FP_REGS + 9:
658 tmp = ldl_p(mem_buf);
659 env->fpstt = (tmp >> 11) & 7;
660 env->fpus = tmp & ~0x3800;
661 return 4;
662 case IDX_FP_REGS + 10: /* ftag */ return 4;
663 case IDX_FP_REGS + 11: /* fiseg */ return 4;
664 case IDX_FP_REGS + 12: /* fioff */ return 4;
665 case IDX_FP_REGS + 13: /* foseg */ return 4;
666 case IDX_FP_REGS + 14: /* fooff */ return 4;
667 case IDX_FP_REGS + 15: /* fop */ return 4;
669 case IDX_MXCSR_REG:
670 env->mxcsr = ldl_p(mem_buf);
671 return 4;
674 /* Unrecognised register. */
675 return 0;
678 #elif defined (TARGET_PPC)
680 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
681 expects whatever the target description contains. Due to a
682 historical mishap the FP registers appear in between core integer
683 regs and PC, MSR, CR, and so forth. We hack round this by giving the
684 FP regs zero size when talking to a newer gdb. */
685 #define NUM_CORE_REGS 71
686 #if defined (TARGET_PPC64)
687 #define GDB_CORE_XML "power64-core.xml"
688 #else
689 #define GDB_CORE_XML "power-core.xml"
690 #endif
692 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
694 if (n < 32) {
695 /* gprs */
696 GET_REGL(env->gpr[n]);
697 } else if (n < 64) {
698 /* fprs */
699 if (gdb_has_xml)
700 return 0;
701 stfq_p(mem_buf, env->fpr[n-32]);
702 return 8;
703 } else {
704 switch (n) {
705 case 64: GET_REGL(env->nip);
706 case 65: GET_REGL(env->msr);
707 case 66:
709 uint32_t cr = 0;
710 int i;
711 for (i = 0; i < 8; i++)
712 cr |= env->crf[i] << (32 - ((i + 1) * 4));
713 GET_REG32(cr);
715 case 67: GET_REGL(env->lr);
716 case 68: GET_REGL(env->ctr);
717 case 69: GET_REGL(env->xer);
718 case 70:
720 if (gdb_has_xml)
721 return 0;
722 GET_REG32(0); /* fpscr */
726 return 0;
729 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
731 if (n < 32) {
732 /* gprs */
733 env->gpr[n] = ldtul_p(mem_buf);
734 return sizeof(target_ulong);
735 } else if (n < 64) {
736 /* fprs */
737 if (gdb_has_xml)
738 return 0;
739 env->fpr[n-32] = ldfq_p(mem_buf);
740 return 8;
741 } else {
742 switch (n) {
743 case 64:
744 env->nip = ldtul_p(mem_buf);
745 return sizeof(target_ulong);
746 case 65:
747 ppc_store_msr(env, ldtul_p(mem_buf));
748 return sizeof(target_ulong);
749 case 66:
751 uint32_t cr = ldl_p(mem_buf);
752 int i;
753 for (i = 0; i < 8; i++)
754 env->crf[i] = (cr >> (32 - ((i + 1) * 4))) & 0xF;
755 return 4;
757 case 67:
758 env->lr = ldtul_p(mem_buf);
759 return sizeof(target_ulong);
760 case 68:
761 env->ctr = ldtul_p(mem_buf);
762 return sizeof(target_ulong);
763 case 69:
764 env->xer = ldtul_p(mem_buf);
765 return sizeof(target_ulong);
766 case 70:
767 /* fpscr */
768 if (gdb_has_xml)
769 return 0;
770 return 4;
773 return 0;
776 #elif defined (TARGET_SPARC)
778 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
779 #define NUM_CORE_REGS 86
780 #else
781 #define NUM_CORE_REGS 72
782 #endif
784 #ifdef TARGET_ABI32
785 #define GET_REGA(val) GET_REG32(val)
786 #else
787 #define GET_REGA(val) GET_REGL(val)
788 #endif
790 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
792 if (n < 8) {
793 /* g0..g7 */
794 GET_REGA(env->gregs[n]);
796 if (n < 32) {
797 /* register window */
798 GET_REGA(env->regwptr[n - 8]);
800 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
801 if (n < 64) {
802 /* fprs */
803 GET_REG32(*((uint32_t *)&env->fpr[n - 32]));
805 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
806 switch (n) {
807 case 64: GET_REGA(env->y);
808 case 65: GET_REGA(cpu_get_psr(env));
809 case 66: GET_REGA(env->wim);
810 case 67: GET_REGA(env->tbr);
811 case 68: GET_REGA(env->pc);
812 case 69: GET_REGA(env->npc);
813 case 70: GET_REGA(env->fsr);
814 case 71: GET_REGA(0); /* csr */
815 default: GET_REGA(0);
817 #else
818 if (n < 64) {
819 /* f0-f31 */
820 GET_REG32(*((uint32_t *)&env->fpr[n - 32]));
822 if (n < 80) {
823 /* f32-f62 (double width, even numbers only) */
824 uint64_t val;
826 val = (uint64_t)*((uint32_t *)&env->fpr[(n - 64) * 2 + 32]) << 32;
827 val |= *((uint32_t *)&env->fpr[(n - 64) * 2 + 33]);
828 GET_REG64(val);
830 switch (n) {
831 case 80: GET_REGL(env->pc);
832 case 81: GET_REGL(env->npc);
833 case 82: GET_REGL((cpu_get_ccr(env) << 32) |
834 ((env->asi & 0xff) << 24) |
835 ((env->pstate & 0xfff) << 8) |
836 cpu_get_cwp64(env));
837 case 83: GET_REGL(env->fsr);
838 case 84: GET_REGL(env->fprs);
839 case 85: GET_REGL(env->y);
841 #endif
842 return 0;
845 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
847 #if defined(TARGET_ABI32)
848 abi_ulong tmp;
850 tmp = ldl_p(mem_buf);
851 #else
852 target_ulong tmp;
854 tmp = ldtul_p(mem_buf);
855 #endif
857 if (n < 8) {
858 /* g0..g7 */
859 env->gregs[n] = tmp;
860 } else if (n < 32) {
861 /* register window */
862 env->regwptr[n - 8] = tmp;
864 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
865 else if (n < 64) {
866 /* fprs */
867 *((uint32_t *)&env->fpr[n - 32]) = tmp;
868 } else {
869 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
870 switch (n) {
871 case 64: env->y = tmp; break;
872 case 65: cpu_put_psr(env, tmp); break;
873 case 66: env->wim = tmp; break;
874 case 67: env->tbr = tmp; break;
875 case 68: env->pc = tmp; break;
876 case 69: env->npc = tmp; break;
877 case 70: env->fsr = tmp; break;
878 default: return 0;
881 return 4;
882 #else
883 else if (n < 64) {
884 /* f0-f31 */
885 env->fpr[n] = ldfl_p(mem_buf);
886 return 4;
887 } else if (n < 80) {
888 /* f32-f62 (double width, even numbers only) */
889 *((uint32_t *)&env->fpr[(n - 64) * 2 + 32]) = tmp >> 32;
890 *((uint32_t *)&env->fpr[(n - 64) * 2 + 33]) = tmp;
891 } else {
892 switch (n) {
893 case 80: env->pc = tmp; break;
894 case 81: env->npc = tmp; break;
895 case 82:
896 cpu_put_ccr(env, tmp >> 32);
897 env->asi = (tmp >> 24) & 0xff;
898 env->pstate = (tmp >> 8) & 0xfff;
899 cpu_put_cwp64(env, tmp & 0xff);
900 break;
901 case 83: env->fsr = tmp; break;
902 case 84: env->fprs = tmp; break;
903 case 85: env->y = tmp; break;
904 default: return 0;
907 return 8;
908 #endif
910 #elif defined (TARGET_ARM)
912 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
913 whatever the target description contains. Due to a historical mishap
914 the FPA registers appear in between core integer regs and the CPSR.
915 We hack round this by giving the FPA regs zero size when talking to a
916 newer gdb. */
917 #define NUM_CORE_REGS 26
918 #define GDB_CORE_XML "arm-core.xml"
920 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
922 if (n < 16) {
923 /* Core integer register. */
924 GET_REG32(env->regs[n]);
926 if (n < 24) {
927 /* FPA registers. */
928 if (gdb_has_xml)
929 return 0;
930 memset(mem_buf, 0, 12);
931 return 12;
933 switch (n) {
934 case 24:
935 /* FPA status register. */
936 if (gdb_has_xml)
937 return 0;
938 GET_REG32(0);
939 case 25:
940 /* CPSR */
941 GET_REG32(cpsr_read(env));
943 /* Unknown register. */
944 return 0;
947 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
949 uint32_t tmp;
951 tmp = ldl_p(mem_buf);
953 /* Mask out low bit of PC to workaround gdb bugs. This will probably
954 cause problems if we ever implement the Jazelle DBX extensions. */
955 if (n == 15)
956 tmp &= ~1;
958 if (n < 16) {
959 /* Core integer register. */
960 env->regs[n] = tmp;
961 return 4;
963 if (n < 24) { /* 16-23 */
964 /* FPA registers (ignored). */
965 if (gdb_has_xml)
966 return 0;
967 return 12;
969 switch (n) {
970 case 24:
971 /* FPA status register (ignored). */
972 if (gdb_has_xml)
973 return 0;
974 return 4;
975 case 25:
976 /* CPSR */
977 cpsr_write (env, tmp, 0xffffffff);
978 return 4;
980 /* Unknown register. */
981 return 0;
984 #elif defined (TARGET_M68K)
986 #define NUM_CORE_REGS 18
988 #define GDB_CORE_XML "cf-core.xml"
990 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
992 if (n < 8) {
993 /* D0-D7 */
994 GET_REG32(env->dregs[n]);
995 } else if (n < 16) {
996 /* A0-A7 */
997 GET_REG32(env->aregs[n - 8]);
998 } else {
999 switch (n) {
1000 case 16: GET_REG32(env->sr);
1001 case 17: GET_REG32(env->pc);
1004 /* FP registers not included here because they vary between
1005 ColdFire and m68k. Use XML bits for these. */
1006 return 0;
1009 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1011 uint32_t tmp;
1013 tmp = ldl_p(mem_buf);
1015 if (n < 8) {
1016 /* D0-D7 */
1017 env->dregs[n] = tmp;
1018 } else if (n < 16) {
1019 /* A0-A7 */
1020 env->aregs[n - 8] = tmp;
1021 } else {
1022 switch (n) {
1023 case 16: env->sr = tmp; break;
1024 case 17: env->pc = tmp; break;
1025 default: return 0;
1028 return 4;
1030 #elif defined (TARGET_MIPS)
1032 #define NUM_CORE_REGS 73
1034 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1036 if (n < 32) {
1037 GET_REGL(env->active_tc.gpr[n]);
1039 if (env->CP0_Config1 & (1 << CP0C1_FP)) {
1040 if (n >= 38 && n < 70) {
1041 if (env->CP0_Status & (1 << CP0St_FR))
1042 GET_REGL(env->active_fpu.fpr[n - 38].d);
1043 else
1044 GET_REGL(env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX]);
1046 switch (n) {
1047 case 70: GET_REGL((int32_t)env->active_fpu.fcr31);
1048 case 71: GET_REGL((int32_t)env->active_fpu.fcr0);
1051 switch (n) {
1052 case 32: GET_REGL((int32_t)env->CP0_Status);
1053 case 33: GET_REGL(env->active_tc.LO[0]);
1054 case 34: GET_REGL(env->active_tc.HI[0]);
1055 case 35: GET_REGL(env->CP0_BadVAddr);
1056 case 36: GET_REGL((int32_t)env->CP0_Cause);
1057 case 37: GET_REGL(env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16));
1058 case 72: GET_REGL(0); /* fp */
1059 case 89: GET_REGL((int32_t)env->CP0_PRid);
1061 if (n >= 73 && n <= 88) {
1062 /* 16 embedded regs. */
1063 GET_REGL(0);
1066 return 0;
1069 /* convert MIPS rounding mode in FCR31 to IEEE library */
1070 static unsigned int ieee_rm[] =
1072 float_round_nearest_even,
1073 float_round_to_zero,
1074 float_round_up,
1075 float_round_down
1077 #define RESTORE_ROUNDING_MODE \
1078 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1080 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1082 target_ulong tmp;
1084 tmp = ldtul_p(mem_buf);
1086 if (n < 32) {
1087 env->active_tc.gpr[n] = tmp;
1088 return sizeof(target_ulong);
1090 if (env->CP0_Config1 & (1 << CP0C1_FP)
1091 && n >= 38 && n < 73) {
1092 if (n < 70) {
1093 if (env->CP0_Status & (1 << CP0St_FR))
1094 env->active_fpu.fpr[n - 38].d = tmp;
1095 else
1096 env->active_fpu.fpr[n - 38].w[FP_ENDIAN_IDX] = tmp;
1098 switch (n) {
1099 case 70:
1100 env->active_fpu.fcr31 = tmp & 0xFF83FFFF;
1101 /* set rounding mode */
1102 RESTORE_ROUNDING_MODE;
1103 #ifndef CONFIG_SOFTFLOAT
1104 /* no floating point exception for native float */
1105 SET_FP_ENABLE(env->active_fpu.fcr31, 0);
1106 #endif
1107 break;
1108 case 71: env->active_fpu.fcr0 = tmp; break;
1110 return sizeof(target_ulong);
1112 switch (n) {
1113 case 32: env->CP0_Status = tmp; break;
1114 case 33: env->active_tc.LO[0] = tmp; break;
1115 case 34: env->active_tc.HI[0] = tmp; break;
1116 case 35: env->CP0_BadVAddr = tmp; break;
1117 case 36: env->CP0_Cause = tmp; break;
1118 case 37:
1119 env->active_tc.PC = tmp & ~(target_ulong)1;
1120 if (tmp & 1) {
1121 env->hflags |= MIPS_HFLAG_M16;
1122 } else {
1123 env->hflags &= ~(MIPS_HFLAG_M16);
1125 break;
1126 case 72: /* fp, ignored */ break;
1127 default:
1128 if (n > 89)
1129 return 0;
1130 /* Other registers are readonly. Ignore writes. */
1131 break;
1134 return sizeof(target_ulong);
1136 #elif defined (TARGET_SH4)
1138 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1139 /* FIXME: We should use XML for this. */
1141 #define NUM_CORE_REGS 59
1143 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1145 if (n < 8) {
1146 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1147 GET_REGL(env->gregs[n + 16]);
1148 } else {
1149 GET_REGL(env->gregs[n]);
1151 } else if (n < 16) {
1152 GET_REGL(env->gregs[n]);
1153 } else if (n >= 25 && n < 41) {
1154 GET_REGL(env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)]);
1155 } else if (n >= 43 && n < 51) {
1156 GET_REGL(env->gregs[n - 43]);
1157 } else if (n >= 51 && n < 59) {
1158 GET_REGL(env->gregs[n - (51 - 16)]);
1160 switch (n) {
1161 case 16: GET_REGL(env->pc);
1162 case 17: GET_REGL(env->pr);
1163 case 18: GET_REGL(env->gbr);
1164 case 19: GET_REGL(env->vbr);
1165 case 20: GET_REGL(env->mach);
1166 case 21: GET_REGL(env->macl);
1167 case 22: GET_REGL(env->sr);
1168 case 23: GET_REGL(env->fpul);
1169 case 24: GET_REGL(env->fpscr);
1170 case 41: GET_REGL(env->ssr);
1171 case 42: GET_REGL(env->spc);
1174 return 0;
1177 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1179 uint32_t tmp;
1181 tmp = ldl_p(mem_buf);
1183 if (n < 8) {
1184 if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
1185 env->gregs[n + 16] = tmp;
1186 } else {
1187 env->gregs[n] = tmp;
1189 return 4;
1190 } else if (n < 16) {
1191 env->gregs[n] = tmp;
1192 return 4;
1193 } else if (n >= 25 && n < 41) {
1194 env->fregs[(n - 25) + ((env->fpscr & FPSCR_FR) ? 16 : 0)] = tmp;
1195 return 4;
1196 } else if (n >= 43 && n < 51) {
1197 env->gregs[n - 43] = tmp;
1198 return 4;
1199 } else if (n >= 51 && n < 59) {
1200 env->gregs[n - (51 - 16)] = tmp;
1201 return 4;
1203 switch (n) {
1204 case 16: env->pc = tmp; break;
1205 case 17: env->pr = tmp; break;
1206 case 18: env->gbr = tmp; break;
1207 case 19: env->vbr = tmp; break;
1208 case 20: env->mach = tmp; break;
1209 case 21: env->macl = tmp; break;
1210 case 22: env->sr = tmp; break;
1211 case 23: env->fpul = tmp; break;
1212 case 24: env->fpscr = tmp; break;
1213 case 41: env->ssr = tmp; break;
1214 case 42: env->spc = tmp; break;
1215 default: return 0;
1218 return 4;
1220 #elif defined (TARGET_MICROBLAZE)
1222 #define NUM_CORE_REGS (32 + 5)
1224 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1226 if (n < 32) {
1227 GET_REG32(env->regs[n]);
1228 } else {
1229 GET_REG32(env->sregs[n - 32]);
1231 return 0;
1234 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1236 uint32_t tmp;
1238 if (n > NUM_CORE_REGS)
1239 return 0;
1241 tmp = ldl_p(mem_buf);
1243 if (n < 32) {
1244 env->regs[n] = tmp;
1245 } else {
1246 env->sregs[n - 32] = tmp;
1248 return 4;
1250 #elif defined (TARGET_CRIS)
1252 #define NUM_CORE_REGS 49
1254 static int
1255 read_register_crisv10(CPUState *env, uint8_t *mem_buf, int n)
1257 if (n < 15) {
1258 GET_REG32(env->regs[n]);
1261 if (n == 15) {
1262 GET_REG32(env->pc);
1265 if (n < 32) {
1266 switch (n) {
1267 case 16:
1268 GET_REG8(env->pregs[n - 16]);
1269 break;
1270 case 17:
1271 GET_REG8(env->pregs[n - 16]);
1272 break;
1273 case 20:
1274 case 21:
1275 GET_REG16(env->pregs[n - 16]);
1276 break;
1277 default:
1278 if (n >= 23) {
1279 GET_REG32(env->pregs[n - 16]);
1281 break;
1284 return 0;
1287 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1289 uint8_t srs;
1291 if (env->pregs[PR_VR] < 32)
1292 return read_register_crisv10(env, mem_buf, n);
1294 srs = env->pregs[PR_SRS];
1295 if (n < 16) {
1296 GET_REG32(env->regs[n]);
1299 if (n >= 21 && n < 32) {
1300 GET_REG32(env->pregs[n - 16]);
1302 if (n >= 33 && n < 49) {
1303 GET_REG32(env->sregs[srs][n - 33]);
1305 switch (n) {
1306 case 16: GET_REG8(env->pregs[0]);
1307 case 17: GET_REG8(env->pregs[1]);
1308 case 18: GET_REG32(env->pregs[2]);
1309 case 19: GET_REG8(srs);
1310 case 20: GET_REG16(env->pregs[4]);
1311 case 32: GET_REG32(env->pc);
1314 return 0;
1317 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1319 uint32_t tmp;
1321 if (n > 49)
1322 return 0;
1324 tmp = ldl_p(mem_buf);
1326 if (n < 16) {
1327 env->regs[n] = tmp;
1330 if (n >= 21 && n < 32) {
1331 env->pregs[n - 16] = tmp;
1334 /* FIXME: Should support function regs be writable? */
1335 switch (n) {
1336 case 16: return 1;
1337 case 17: return 1;
1338 case 18: env->pregs[PR_PID] = tmp; break;
1339 case 19: return 1;
1340 case 20: return 2;
1341 case 32: env->pc = tmp; break;
1344 return 4;
1346 #elif defined (TARGET_ALPHA)
1348 #define NUM_CORE_REGS 67
1350 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1352 uint64_t val;
1353 CPU_DoubleU d;
1355 switch (n) {
1356 case 0 ... 30:
1357 val = env->ir[n];
1358 break;
1359 case 32 ... 62:
1360 d.d = env->fir[n - 32];
1361 val = d.ll;
1362 break;
1363 case 63:
1364 val = cpu_alpha_load_fpcr(env);
1365 break;
1366 case 64:
1367 val = env->pc;
1368 break;
1369 case 66:
1370 val = env->unique;
1371 break;
1372 case 31:
1373 case 65:
1374 /* 31 really is the zero register; 65 is unassigned in the
1375 gdb protocol, but is still required to occupy 8 bytes. */
1376 val = 0;
1377 break;
1378 default:
1379 return 0;
1381 GET_REGL(val);
1384 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1386 target_ulong tmp = ldtul_p(mem_buf);
1387 CPU_DoubleU d;
1389 switch (n) {
1390 case 0 ... 30:
1391 env->ir[n] = tmp;
1392 break;
1393 case 32 ... 62:
1394 d.ll = tmp;
1395 env->fir[n - 32] = d.d;
1396 break;
1397 case 63:
1398 cpu_alpha_store_fpcr(env, tmp);
1399 break;
1400 case 64:
1401 env->pc = tmp;
1402 break;
1403 case 66:
1404 env->unique = tmp;
1405 break;
1406 case 31:
1407 case 65:
1408 /* 31 really is the zero register; 65 is unassigned in the
1409 gdb protocol, but is still required to occupy 8 bytes. */
1410 break;
1411 default:
1412 return 0;
1414 return 8;
1416 #elif defined (TARGET_S390X)
1418 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1420 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1422 switch (n) {
1423 case S390_PSWM_REGNUM: GET_REGL(env->psw.mask); break;
1424 case S390_PSWA_REGNUM: GET_REGL(env->psw.addr); break;
1425 case S390_R0_REGNUM ... S390_R15_REGNUM:
1426 GET_REGL(env->regs[n-S390_R0_REGNUM]); break;
1427 case S390_A0_REGNUM ... S390_A15_REGNUM:
1428 GET_REG32(env->aregs[n-S390_A0_REGNUM]); break;
1429 case S390_FPC_REGNUM: GET_REG32(env->fpc); break;
1430 case S390_F0_REGNUM ... S390_F15_REGNUM:
1431 /* XXX */
1432 break;
1433 case S390_PC_REGNUM: GET_REGL(env->psw.addr); break;
1434 case S390_CC_REGNUM: GET_REG32(env->cc); break;
1437 return 0;
1440 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1442 target_ulong tmpl;
1443 uint32_t tmp32;
1444 int r = 8;
1445 tmpl = ldtul_p(mem_buf);
1446 tmp32 = ldl_p(mem_buf);
1448 switch (n) {
1449 case S390_PSWM_REGNUM: env->psw.mask = tmpl; break;
1450 case S390_PSWA_REGNUM: env->psw.addr = tmpl; break;
1451 case S390_R0_REGNUM ... S390_R15_REGNUM:
1452 env->regs[n-S390_R0_REGNUM] = tmpl; break;
1453 case S390_A0_REGNUM ... S390_A15_REGNUM:
1454 env->aregs[n-S390_A0_REGNUM] = tmp32; r=4; break;
1455 case S390_FPC_REGNUM: env->fpc = tmp32; r=4; break;
1456 case S390_F0_REGNUM ... S390_F15_REGNUM:
1457 /* XXX */
1458 break;
1459 case S390_PC_REGNUM: env->psw.addr = tmpl; break;
1460 case S390_CC_REGNUM: env->cc = tmp32; r=4; break;
1463 return r;
1465 #else
1467 #define NUM_CORE_REGS 0
1469 static int cpu_gdb_read_register(CPUState *env, uint8_t *mem_buf, int n)
1471 return 0;
1474 static int cpu_gdb_write_register(CPUState *env, uint8_t *mem_buf, int n)
1476 return 0;
1479 #endif
1481 static int num_g_regs = NUM_CORE_REGS;
1483 #ifdef GDB_CORE_XML
1484 /* Encode data using the encoding for 'x' packets. */
1485 static int memtox(char *buf, const char *mem, int len)
1487 char *p = buf;
1488 char c;
1490 while (len--) {
1491 c = *(mem++);
1492 switch (c) {
1493 case '#': case '$': case '*': case '}':
1494 *(p++) = '}';
1495 *(p++) = c ^ 0x20;
1496 break;
1497 default:
1498 *(p++) = c;
1499 break;
1502 return p - buf;
1505 static const char *get_feature_xml(const char *p, const char **newp)
1507 extern const char *const xml_builtin[][2];
1508 size_t len;
1509 int i;
1510 const char *name;
1511 static char target_xml[1024];
1513 len = 0;
1514 while (p[len] && p[len] != ':')
1515 len++;
1516 *newp = p + len;
1518 name = NULL;
1519 if (strncmp(p, "target.xml", len) == 0) {
1520 /* Generate the XML description for this CPU. */
1521 if (!target_xml[0]) {
1522 GDBRegisterState *r;
1524 snprintf(target_xml, sizeof(target_xml),
1525 "<?xml version=\"1.0\"?>"
1526 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1527 "<target>"
1528 "<xi:include href=\"%s\"/>",
1529 GDB_CORE_XML);
1531 for (r = first_cpu->gdb_regs; r; r = r->next) {
1532 pstrcat(target_xml, sizeof(target_xml), "<xi:include href=\"");
1533 pstrcat(target_xml, sizeof(target_xml), r->xml);
1534 pstrcat(target_xml, sizeof(target_xml), "\"/>");
1536 pstrcat(target_xml, sizeof(target_xml), "</target>");
1538 return target_xml;
1540 for (i = 0; ; i++) {
1541 name = xml_builtin[i][0];
1542 if (!name || (strncmp(name, p, len) == 0 && strlen(name) == len))
1543 break;
1545 return name ? xml_builtin[i][1] : NULL;
1547 #endif
1549 static int gdb_read_register(CPUState *env, uint8_t *mem_buf, int reg)
1551 GDBRegisterState *r;
1553 if (reg < NUM_CORE_REGS)
1554 return cpu_gdb_read_register(env, mem_buf, reg);
1556 for (r = env->gdb_regs; r; r = r->next) {
1557 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1558 return r->get_reg(env, mem_buf, reg - r->base_reg);
1561 return 0;
1564 static int gdb_write_register(CPUState *env, uint8_t *mem_buf, int reg)
1566 GDBRegisterState *r;
1568 if (reg < NUM_CORE_REGS)
1569 return cpu_gdb_write_register(env, mem_buf, reg);
1571 for (r = env->gdb_regs; r; r = r->next) {
1572 if (r->base_reg <= reg && reg < r->base_reg + r->num_regs) {
1573 return r->set_reg(env, mem_buf, reg - r->base_reg);
1576 return 0;
1579 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1580 specifies the first register number and these registers are included in
1581 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1582 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1585 void gdb_register_coprocessor(CPUState * env,
1586 gdb_reg_cb get_reg, gdb_reg_cb set_reg,
1587 int num_regs, const char *xml, int g_pos)
1589 GDBRegisterState *s;
1590 GDBRegisterState **p;
1591 static int last_reg = NUM_CORE_REGS;
1593 s = (GDBRegisterState *)qemu_mallocz(sizeof(GDBRegisterState));
1594 s->base_reg = last_reg;
1595 s->num_regs = num_regs;
1596 s->get_reg = get_reg;
1597 s->set_reg = set_reg;
1598 s->xml = xml;
1599 p = &env->gdb_regs;
1600 while (*p) {
1601 /* Check for duplicates. */
1602 if (strcmp((*p)->xml, xml) == 0)
1603 return;
1604 p = &(*p)->next;
1606 /* Add to end of list. */
1607 last_reg += num_regs;
1608 *p = s;
1609 if (g_pos) {
1610 if (g_pos != s->base_reg) {
1611 fprintf(stderr, "Error: Bad gdb register numbering for '%s'\n"
1612 "Expected %d got %d\n", xml, g_pos, s->base_reg);
1613 } else {
1614 num_g_regs = last_reg;
1619 #ifndef CONFIG_USER_ONLY
1620 static const int xlat_gdb_type[] = {
1621 [GDB_WATCHPOINT_WRITE] = BP_GDB | BP_MEM_WRITE,
1622 [GDB_WATCHPOINT_READ] = BP_GDB | BP_MEM_READ,
1623 [GDB_WATCHPOINT_ACCESS] = BP_GDB | BP_MEM_ACCESS,
1625 #endif
1627 static int gdb_breakpoint_insert(target_ulong addr, target_ulong len, int type)
1629 CPUState *env;
1630 int err = 0;
1632 if (kvm_enabled())
1633 return kvm_insert_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1635 switch (type) {
1636 case GDB_BREAKPOINT_SW:
1637 case GDB_BREAKPOINT_HW:
1638 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1639 err = cpu_breakpoint_insert(env, addr, BP_GDB, NULL);
1640 if (err)
1641 break;
1643 return err;
1644 #ifndef CONFIG_USER_ONLY
1645 case GDB_WATCHPOINT_WRITE:
1646 case GDB_WATCHPOINT_READ:
1647 case GDB_WATCHPOINT_ACCESS:
1648 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1649 err = cpu_watchpoint_insert(env, addr, len, xlat_gdb_type[type],
1650 NULL);
1651 if (err)
1652 break;
1654 return err;
1655 #endif
1656 default:
1657 return -ENOSYS;
1661 static int gdb_breakpoint_remove(target_ulong addr, target_ulong len, int type)
1663 CPUState *env;
1664 int err = 0;
1666 if (kvm_enabled())
1667 return kvm_remove_breakpoint(gdbserver_state->c_cpu, addr, len, type);
1669 switch (type) {
1670 case GDB_BREAKPOINT_SW:
1671 case GDB_BREAKPOINT_HW:
1672 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1673 err = cpu_breakpoint_remove(env, addr, BP_GDB);
1674 if (err)
1675 break;
1677 return err;
1678 #ifndef CONFIG_USER_ONLY
1679 case GDB_WATCHPOINT_WRITE:
1680 case GDB_WATCHPOINT_READ:
1681 case GDB_WATCHPOINT_ACCESS:
1682 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1683 err = cpu_watchpoint_remove(env, addr, len, xlat_gdb_type[type]);
1684 if (err)
1685 break;
1687 return err;
1688 #endif
1689 default:
1690 return -ENOSYS;
1694 static void gdb_breakpoint_remove_all(void)
1696 CPUState *env;
1698 if (kvm_enabled()) {
1699 kvm_remove_all_breakpoints(gdbserver_state->c_cpu);
1700 return;
1703 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1704 cpu_breakpoint_remove_all(env, BP_GDB);
1705 #ifndef CONFIG_USER_ONLY
1706 cpu_watchpoint_remove_all(env, BP_GDB);
1707 #endif
1711 static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
1713 #if defined(TARGET_I386)
1714 cpu_synchronize_state(s->c_cpu);
1715 s->c_cpu->eip = pc;
1716 #elif defined (TARGET_PPC)
1717 s->c_cpu->nip = pc;
1718 #elif defined (TARGET_SPARC)
1719 s->c_cpu->pc = pc;
1720 s->c_cpu->npc = pc + 4;
1721 #elif defined (TARGET_ARM)
1722 s->c_cpu->regs[15] = pc;
1723 #elif defined (TARGET_SH4)
1724 s->c_cpu->pc = pc;
1725 #elif defined (TARGET_MIPS)
1726 s->c_cpu->active_tc.PC = pc & ~(target_ulong)1;
1727 if (pc & 1) {
1728 s->c_cpu->hflags |= MIPS_HFLAG_M16;
1729 } else {
1730 s->c_cpu->hflags &= ~(MIPS_HFLAG_M16);
1732 #elif defined (TARGET_MICROBLAZE)
1733 s->c_cpu->sregs[SR_PC] = pc;
1734 #elif defined (TARGET_CRIS)
1735 s->c_cpu->pc = pc;
1736 #elif defined (TARGET_ALPHA)
1737 s->c_cpu->pc = pc;
1738 #elif defined (TARGET_S390X)
1739 cpu_synchronize_state(s->c_cpu);
1740 s->c_cpu->psw.addr = pc;
1741 #endif
1744 static inline int gdb_id(CPUState *env)
1746 #if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
1747 return env->host_tid;
1748 #else
1749 return env->cpu_index + 1;
1750 #endif
1753 static CPUState *find_cpu(uint32_t thread_id)
1755 CPUState *env;
1757 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1758 if (gdb_id(env) == thread_id) {
1759 return env;
1763 return NULL;
1766 static int gdb_handle_packet(GDBState *s, const char *line_buf)
1768 CPUState *env;
1769 const char *p;
1770 uint32_t thread;
1771 int ch, reg_size, type, res;
1772 char buf[MAX_PACKET_LENGTH];
1773 uint8_t mem_buf[MAX_PACKET_LENGTH];
1774 uint8_t *registers;
1775 target_ulong addr, len;
1777 #ifdef DEBUG_GDB
1778 printf("command='%s'\n", line_buf);
1779 #endif
1780 p = line_buf;
1781 ch = *p++;
1782 switch(ch) {
1783 case '?':
1784 /* TODO: Make this return the correct value for user-mode. */
1785 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", GDB_SIGNAL_TRAP,
1786 gdb_id(s->c_cpu));
1787 put_packet(s, buf);
1788 /* Remove all the breakpoints when this query is issued,
1789 * because gdb is doing and initial connect and the state
1790 * should be cleaned up.
1792 gdb_breakpoint_remove_all();
1793 break;
1794 case 'c':
1795 if (*p != '\0') {
1796 addr = strtoull(p, (char **)&p, 16);
1797 gdb_set_cpu_pc(s, addr);
1799 s->signal = 0;
1800 gdb_continue(s);
1801 return RS_IDLE;
1802 case 'C':
1803 s->signal = gdb_signal_to_target (strtoul(p, (char **)&p, 16));
1804 if (s->signal == -1)
1805 s->signal = 0;
1806 gdb_continue(s);
1807 return RS_IDLE;
1808 case 'v':
1809 if (strncmp(p, "Cont", 4) == 0) {
1810 int res_signal, res_thread;
1812 p += 4;
1813 if (*p == '?') {
1814 put_packet(s, "vCont;c;C;s;S");
1815 break;
1817 res = 0;
1818 res_signal = 0;
1819 res_thread = 0;
1820 while (*p) {
1821 int action, signal;
1823 if (*p++ != ';') {
1824 res = 0;
1825 break;
1827 action = *p++;
1828 signal = 0;
1829 if (action == 'C' || action == 'S') {
1830 signal = strtoul(p, (char **)&p, 16);
1831 } else if (action != 'c' && action != 's') {
1832 res = 0;
1833 break;
1835 thread = 0;
1836 if (*p == ':') {
1837 thread = strtoull(p+1, (char **)&p, 16);
1839 action = tolower(action);
1840 if (res == 0 || (res == 'c' && action == 's')) {
1841 res = action;
1842 res_signal = signal;
1843 res_thread = thread;
1846 if (res) {
1847 if (res_thread != -1 && res_thread != 0) {
1848 env = find_cpu(res_thread);
1849 if (env == NULL) {
1850 put_packet(s, "E22");
1851 break;
1853 s->c_cpu = env;
1855 if (res == 's') {
1856 cpu_single_step(s->c_cpu, sstep_flags);
1858 s->signal = res_signal;
1859 gdb_continue(s);
1860 return RS_IDLE;
1862 break;
1863 } else {
1864 goto unknown_command;
1866 case 'k':
1867 /* Kill the target */
1868 fprintf(stderr, "\nQEMU: Terminated via GDBstub\n");
1869 exit(0);
1870 case 'D':
1871 /* Detach packet */
1872 gdb_breakpoint_remove_all();
1873 gdb_syscall_mode = GDB_SYS_DISABLED;
1874 gdb_continue(s);
1875 put_packet(s, "OK");
1876 break;
1877 case 's':
1878 if (*p != '\0') {
1879 addr = strtoull(p, (char **)&p, 16);
1880 gdb_set_cpu_pc(s, addr);
1882 cpu_single_step(s->c_cpu, sstep_flags);
1883 gdb_continue(s);
1884 return RS_IDLE;
1885 case 'F':
1887 target_ulong ret;
1888 target_ulong err;
1890 ret = strtoull(p, (char **)&p, 16);
1891 if (*p == ',') {
1892 p++;
1893 err = strtoull(p, (char **)&p, 16);
1894 } else {
1895 err = 0;
1897 if (*p == ',')
1898 p++;
1899 type = *p;
1900 if (gdb_current_syscall_cb)
1901 gdb_current_syscall_cb(s->c_cpu, ret, err);
1902 if (type == 'C') {
1903 put_packet(s, "T02");
1904 } else {
1905 gdb_continue(s);
1908 break;
1909 case 'g':
1910 cpu_synchronize_state(s->g_cpu);
1911 len = 0;
1912 for (addr = 0; addr < num_g_regs; addr++) {
1913 reg_size = gdb_read_register(s->g_cpu, mem_buf + len, addr);
1914 len += reg_size;
1916 memtohex(buf, mem_buf, len);
1917 put_packet(s, buf);
1918 break;
1919 case 'G':
1920 cpu_synchronize_state(s->g_cpu);
1921 registers = mem_buf;
1922 len = strlen(p) / 2;
1923 hextomem((uint8_t *)registers, p, len);
1924 for (addr = 0; addr < num_g_regs && len > 0; addr++) {
1925 reg_size = gdb_write_register(s->g_cpu, registers, addr);
1926 len -= reg_size;
1927 registers += reg_size;
1929 put_packet(s, "OK");
1930 break;
1931 case 'm':
1932 addr = strtoull(p, (char **)&p, 16);
1933 if (*p == ',')
1934 p++;
1935 len = strtoull(p, NULL, 16);
1936 if (cpu_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 0) != 0) {
1937 put_packet (s, "E14");
1938 } else {
1939 memtohex(buf, mem_buf, len);
1940 put_packet(s, buf);
1942 break;
1943 case 'M':
1944 addr = strtoull(p, (char **)&p, 16);
1945 if (*p == ',')
1946 p++;
1947 len = strtoull(p, (char **)&p, 16);
1948 if (*p == ':')
1949 p++;
1950 hextomem(mem_buf, p, len);
1951 if (cpu_memory_rw_debug(s->g_cpu, addr, mem_buf, len, 1) != 0)
1952 put_packet(s, "E14");
1953 else
1954 put_packet(s, "OK");
1955 break;
1956 case 'p':
1957 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
1958 This works, but can be very slow. Anything new enough to
1959 understand XML also knows how to use this properly. */
1960 if (!gdb_has_xml)
1961 goto unknown_command;
1962 addr = strtoull(p, (char **)&p, 16);
1963 reg_size = gdb_read_register(s->g_cpu, mem_buf, addr);
1964 if (reg_size) {
1965 memtohex(buf, mem_buf, reg_size);
1966 put_packet(s, buf);
1967 } else {
1968 put_packet(s, "E14");
1970 break;
1971 case 'P':
1972 if (!gdb_has_xml)
1973 goto unknown_command;
1974 addr = strtoull(p, (char **)&p, 16);
1975 if (*p == '=')
1976 p++;
1977 reg_size = strlen(p) / 2;
1978 hextomem(mem_buf, p, reg_size);
1979 gdb_write_register(s->g_cpu, mem_buf, addr);
1980 put_packet(s, "OK");
1981 break;
1982 case 'Z':
1983 case 'z':
1984 type = strtoul(p, (char **)&p, 16);
1985 if (*p == ',')
1986 p++;
1987 addr = strtoull(p, (char **)&p, 16);
1988 if (*p == ',')
1989 p++;
1990 len = strtoull(p, (char **)&p, 16);
1991 if (ch == 'Z')
1992 res = gdb_breakpoint_insert(addr, len, type);
1993 else
1994 res = gdb_breakpoint_remove(addr, len, type);
1995 if (res >= 0)
1996 put_packet(s, "OK");
1997 else if (res == -ENOSYS)
1998 put_packet(s, "");
1999 else
2000 put_packet(s, "E22");
2001 break;
2002 case 'H':
2003 type = *p++;
2004 thread = strtoull(p, (char **)&p, 16);
2005 if (thread == -1 || thread == 0) {
2006 put_packet(s, "OK");
2007 break;
2009 env = find_cpu(thread);
2010 if (env == NULL) {
2011 put_packet(s, "E22");
2012 break;
2014 switch (type) {
2015 case 'c':
2016 s->c_cpu = env;
2017 put_packet(s, "OK");
2018 break;
2019 case 'g':
2020 s->g_cpu = env;
2021 put_packet(s, "OK");
2022 break;
2023 default:
2024 put_packet(s, "E22");
2025 break;
2027 break;
2028 case 'T':
2029 thread = strtoull(p, (char **)&p, 16);
2030 env = find_cpu(thread);
2032 if (env != NULL) {
2033 put_packet(s, "OK");
2034 } else {
2035 put_packet(s, "E22");
2037 break;
2038 case 'q':
2039 case 'Q':
2040 /* parse any 'q' packets here */
2041 if (!strcmp(p,"qemu.sstepbits")) {
2042 /* Query Breakpoint bit definitions */
2043 snprintf(buf, sizeof(buf), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2044 SSTEP_ENABLE,
2045 SSTEP_NOIRQ,
2046 SSTEP_NOTIMER);
2047 put_packet(s, buf);
2048 break;
2049 } else if (strncmp(p,"qemu.sstep",10) == 0) {
2050 /* Display or change the sstep_flags */
2051 p += 10;
2052 if (*p != '=') {
2053 /* Display current setting */
2054 snprintf(buf, sizeof(buf), "0x%x", sstep_flags);
2055 put_packet(s, buf);
2056 break;
2058 p++;
2059 type = strtoul(p, (char **)&p, 16);
2060 sstep_flags = type;
2061 put_packet(s, "OK");
2062 break;
2063 } else if (strcmp(p,"C") == 0) {
2064 /* "Current thread" remains vague in the spec, so always return
2065 * the first CPU (gdb returns the first thread). */
2066 put_packet(s, "QC1");
2067 break;
2068 } else if (strcmp(p,"fThreadInfo") == 0) {
2069 s->query_cpu = first_cpu;
2070 goto report_cpuinfo;
2071 } else if (strcmp(p,"sThreadInfo") == 0) {
2072 report_cpuinfo:
2073 if (s->query_cpu) {
2074 snprintf(buf, sizeof(buf), "m%x", gdb_id(s->query_cpu));
2075 put_packet(s, buf);
2076 s->query_cpu = s->query_cpu->next_cpu;
2077 } else
2078 put_packet(s, "l");
2079 break;
2080 } else if (strncmp(p,"ThreadExtraInfo,", 16) == 0) {
2081 thread = strtoull(p+16, (char **)&p, 16);
2082 env = find_cpu(thread);
2083 if (env != NULL) {
2084 cpu_synchronize_state(env);
2085 len = snprintf((char *)mem_buf, sizeof(mem_buf),
2086 "CPU#%d [%s]", env->cpu_index,
2087 env->halted ? "halted " : "running");
2088 memtohex(buf, mem_buf, len);
2089 put_packet(s, buf);
2091 break;
2093 #ifdef CONFIG_USER_ONLY
2094 else if (strncmp(p, "Offsets", 7) == 0) {
2095 TaskState *ts = s->c_cpu->opaque;
2097 snprintf(buf, sizeof(buf),
2098 "Text=" TARGET_ABI_FMT_lx ";Data=" TARGET_ABI_FMT_lx
2099 ";Bss=" TARGET_ABI_FMT_lx,
2100 ts->info->code_offset,
2101 ts->info->data_offset,
2102 ts->info->data_offset);
2103 put_packet(s, buf);
2104 break;
2106 #else /* !CONFIG_USER_ONLY */
2107 else if (strncmp(p, "Rcmd,", 5) == 0) {
2108 int len = strlen(p + 5);
2110 if ((len % 2) != 0) {
2111 put_packet(s, "E01");
2112 break;
2114 hextomem(mem_buf, p + 5, len);
2115 len = len / 2;
2116 mem_buf[len++] = 0;
2117 qemu_chr_read(s->mon_chr, mem_buf, len);
2118 put_packet(s, "OK");
2119 break;
2121 #endif /* !CONFIG_USER_ONLY */
2122 if (strncmp(p, "Supported", 9) == 0) {
2123 snprintf(buf, sizeof(buf), "PacketSize=%x", MAX_PACKET_LENGTH);
2124 #ifdef GDB_CORE_XML
2125 pstrcat(buf, sizeof(buf), ";qXfer:features:read+");
2126 #endif
2127 put_packet(s, buf);
2128 break;
2130 #ifdef GDB_CORE_XML
2131 if (strncmp(p, "Xfer:features:read:", 19) == 0) {
2132 const char *xml;
2133 target_ulong total_len;
2135 gdb_has_xml = 1;
2136 p += 19;
2137 xml = get_feature_xml(p, &p);
2138 if (!xml) {
2139 snprintf(buf, sizeof(buf), "E00");
2140 put_packet(s, buf);
2141 break;
2144 if (*p == ':')
2145 p++;
2146 addr = strtoul(p, (char **)&p, 16);
2147 if (*p == ',')
2148 p++;
2149 len = strtoul(p, (char **)&p, 16);
2151 total_len = strlen(xml);
2152 if (addr > total_len) {
2153 snprintf(buf, sizeof(buf), "E00");
2154 put_packet(s, buf);
2155 break;
2157 if (len > (MAX_PACKET_LENGTH - 5) / 2)
2158 len = (MAX_PACKET_LENGTH - 5) / 2;
2159 if (len < total_len - addr) {
2160 buf[0] = 'm';
2161 len = memtox(buf + 1, xml + addr, len);
2162 } else {
2163 buf[0] = 'l';
2164 len = memtox(buf + 1, xml + addr, total_len - addr);
2166 put_packet_binary(s, buf, len + 1);
2167 break;
2169 #endif
2170 /* Unrecognised 'q' command. */
2171 goto unknown_command;
2173 default:
2174 unknown_command:
2175 /* put empty packet */
2176 buf[0] = '\0';
2177 put_packet(s, buf);
2178 break;
2180 return RS_IDLE;
2183 void gdb_set_stop_cpu(CPUState *env)
2185 gdbserver_state->c_cpu = env;
2186 gdbserver_state->g_cpu = env;
2189 #ifndef CONFIG_USER_ONLY
2190 static void gdb_vm_state_change(void *opaque, int running, int reason)
2192 GDBState *s = gdbserver_state;
2193 CPUState *env = s->c_cpu;
2194 char buf[256];
2195 const char *type;
2196 int ret;
2198 if (running || (reason != EXCP_DEBUG && reason != EXCP_INTERRUPT) ||
2199 s->state == RS_INACTIVE || s->state == RS_SYSCALL)
2200 return;
2202 /* disable single step if it was enable */
2203 cpu_single_step(env, 0);
2205 if (reason == EXCP_DEBUG) {
2206 if (env->watchpoint_hit) {
2207 switch (env->watchpoint_hit->flags & BP_MEM_ACCESS) {
2208 case BP_MEM_READ:
2209 type = "r";
2210 break;
2211 case BP_MEM_ACCESS:
2212 type = "a";
2213 break;
2214 default:
2215 type = "";
2216 break;
2218 snprintf(buf, sizeof(buf),
2219 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx ";",
2220 GDB_SIGNAL_TRAP, gdb_id(env), type,
2221 env->watchpoint_hit->vaddr);
2222 put_packet(s, buf);
2223 env->watchpoint_hit = NULL;
2224 return;
2226 tb_flush(env);
2227 ret = GDB_SIGNAL_TRAP;
2228 } else {
2229 ret = GDB_SIGNAL_INT;
2231 snprintf(buf, sizeof(buf), "T%02xthread:%02x;", ret, gdb_id(env));
2232 put_packet(s, buf);
2234 #endif
2236 /* Send a gdb syscall request.
2237 This accepts limited printf-style format specifiers, specifically:
2238 %x - target_ulong argument printed in hex.
2239 %lx - 64-bit argument printed in hex.
2240 %s - string pointer (target_ulong) and length (int) pair. */
2241 void gdb_do_syscall(gdb_syscall_complete_cb cb, const char *fmt, ...)
2243 va_list va;
2244 char buf[256];
2245 char *p;
2246 target_ulong addr;
2247 uint64_t i64;
2248 GDBState *s;
2250 s = gdbserver_state;
2251 if (!s)
2252 return;
2253 gdb_current_syscall_cb = cb;
2254 s->state = RS_SYSCALL;
2255 #ifndef CONFIG_USER_ONLY
2256 vm_stop(EXCP_DEBUG);
2257 #endif
2258 s->state = RS_IDLE;
2259 va_start(va, fmt);
2260 p = buf;
2261 *(p++) = 'F';
2262 while (*fmt) {
2263 if (*fmt == '%') {
2264 fmt++;
2265 switch (*fmt++) {
2266 case 'x':
2267 addr = va_arg(va, target_ulong);
2268 p += snprintf(p, &buf[sizeof(buf)] - p, TARGET_FMT_lx, addr);
2269 break;
2270 case 'l':
2271 if (*(fmt++) != 'x')
2272 goto bad_format;
2273 i64 = va_arg(va, uint64_t);
2274 p += snprintf(p, &buf[sizeof(buf)] - p, "%" PRIx64, i64);
2275 break;
2276 case 's':
2277 addr = va_arg(va, target_ulong);
2278 p += snprintf(p, &buf[sizeof(buf)] - p, TARGET_FMT_lx "/%x",
2279 addr, va_arg(va, int));
2280 break;
2281 default:
2282 bad_format:
2283 fprintf(stderr, "gdbstub: Bad syscall format string '%s'\n",
2284 fmt - 1);
2285 break;
2287 } else {
2288 *(p++) = *(fmt++);
2291 *p = 0;
2292 va_end(va);
2293 put_packet(s, buf);
2294 #ifdef CONFIG_USER_ONLY
2295 gdb_handlesig(s->c_cpu, 0);
2296 #else
2297 cpu_exit(s->c_cpu);
2298 #endif
2301 static void gdb_read_byte(GDBState *s, int ch)
2303 int i, csum;
2304 uint8_t reply;
2306 #ifndef CONFIG_USER_ONLY
2307 if (s->last_packet_len) {
2308 /* Waiting for a response to the last packet. If we see the start
2309 of a new command then abandon the previous response. */
2310 if (ch == '-') {
2311 #ifdef DEBUG_GDB
2312 printf("Got NACK, retransmitting\n");
2313 #endif
2314 put_buffer(s, (uint8_t *)s->last_packet, s->last_packet_len);
2316 #ifdef DEBUG_GDB
2317 else if (ch == '+')
2318 printf("Got ACK\n");
2319 else
2320 printf("Got '%c' when expecting ACK/NACK\n", ch);
2321 #endif
2322 if (ch == '+' || ch == '$')
2323 s->last_packet_len = 0;
2324 if (ch != '$')
2325 return;
2327 if (vm_running) {
2328 /* when the CPU is running, we cannot do anything except stop
2329 it when receiving a char */
2330 vm_stop(EXCP_INTERRUPT);
2331 } else
2332 #endif
2334 switch(s->state) {
2335 case RS_IDLE:
2336 if (ch == '$') {
2337 s->line_buf_index = 0;
2338 s->state = RS_GETLINE;
2340 break;
2341 case RS_GETLINE:
2342 if (ch == '#') {
2343 s->state = RS_CHKSUM1;
2344 } else if (s->line_buf_index >= sizeof(s->line_buf) - 1) {
2345 s->state = RS_IDLE;
2346 } else {
2347 s->line_buf[s->line_buf_index++] = ch;
2349 break;
2350 case RS_CHKSUM1:
2351 s->line_buf[s->line_buf_index] = '\0';
2352 s->line_csum = fromhex(ch) << 4;
2353 s->state = RS_CHKSUM2;
2354 break;
2355 case RS_CHKSUM2:
2356 s->line_csum |= fromhex(ch);
2357 csum = 0;
2358 for(i = 0; i < s->line_buf_index; i++) {
2359 csum += s->line_buf[i];
2361 if (s->line_csum != (csum & 0xff)) {
2362 reply = '-';
2363 put_buffer(s, &reply, 1);
2364 s->state = RS_IDLE;
2365 } else {
2366 reply = '+';
2367 put_buffer(s, &reply, 1);
2368 s->state = gdb_handle_packet(s, s->line_buf);
2370 break;
2371 default:
2372 abort();
2377 /* Tell the remote gdb that the process has exited. */
2378 void gdb_exit(CPUState *env, int code)
2380 GDBState *s;
2381 char buf[4];
2383 s = gdbserver_state;
2384 if (!s) {
2385 return;
2387 #ifdef CONFIG_USER_ONLY
2388 if (gdbserver_fd < 0 || s->fd < 0) {
2389 return;
2391 #endif
2393 snprintf(buf, sizeof(buf), "W%02x", (uint8_t)code);
2394 put_packet(s, buf);
2397 #ifdef CONFIG_USER_ONLY
2399 gdb_queuesig (void)
2401 GDBState *s;
2403 s = gdbserver_state;
2405 if (gdbserver_fd < 0 || s->fd < 0)
2406 return 0;
2407 else
2408 return 1;
2412 gdb_handlesig (CPUState *env, int sig)
2414 GDBState *s;
2415 char buf[256];
2416 int n;
2418 s = gdbserver_state;
2419 if (gdbserver_fd < 0 || s->fd < 0)
2420 return sig;
2422 /* disable single step if it was enabled */
2423 cpu_single_step(env, 0);
2424 tb_flush(env);
2426 if (sig != 0)
2428 snprintf(buf, sizeof(buf), "S%02x", target_signal_to_gdb (sig));
2429 put_packet(s, buf);
2431 /* put_packet() might have detected that the peer terminated the
2432 connection. */
2433 if (s->fd < 0)
2434 return sig;
2436 sig = 0;
2437 s->state = RS_IDLE;
2438 s->running_state = 0;
2439 while (s->running_state == 0) {
2440 n = read (s->fd, buf, 256);
2441 if (n > 0)
2443 int i;
2445 for (i = 0; i < n; i++)
2446 gdb_read_byte (s, buf[i]);
2448 else if (n == 0 || errno != EAGAIN)
2450 /* XXX: Connection closed. Should probably wait for annother
2451 connection before continuing. */
2452 return sig;
2455 sig = s->signal;
2456 s->signal = 0;
2457 return sig;
2460 /* Tell the remote gdb that the process has exited due to SIG. */
2461 void gdb_signalled(CPUState *env, int sig)
2463 GDBState *s;
2464 char buf[4];
2466 s = gdbserver_state;
2467 if (gdbserver_fd < 0 || s->fd < 0)
2468 return;
2470 snprintf(buf, sizeof(buf), "X%02x", target_signal_to_gdb (sig));
2471 put_packet(s, buf);
2474 static void gdb_accept(void)
2476 GDBState *s;
2477 struct sockaddr_in sockaddr;
2478 socklen_t len;
2479 int val, fd;
2481 for(;;) {
2482 len = sizeof(sockaddr);
2483 fd = accept(gdbserver_fd, (struct sockaddr *)&sockaddr, &len);
2484 if (fd < 0 && errno != EINTR) {
2485 perror("accept");
2486 return;
2487 } else if (fd >= 0) {
2488 #ifndef _WIN32
2489 fcntl(fd, F_SETFD, FD_CLOEXEC);
2490 #endif
2491 break;
2495 /* set short latency */
2496 val = 1;
2497 setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (char *)&val, sizeof(val));
2499 s = qemu_mallocz(sizeof(GDBState));
2500 s->c_cpu = first_cpu;
2501 s->g_cpu = first_cpu;
2502 s->fd = fd;
2503 gdb_has_xml = 0;
2505 gdbserver_state = s;
2507 fcntl(fd, F_SETFL, O_NONBLOCK);
2510 static int gdbserver_open(int port)
2512 struct sockaddr_in sockaddr;
2513 int fd, val, ret;
2515 fd = socket(PF_INET, SOCK_STREAM, 0);
2516 if (fd < 0) {
2517 perror("socket");
2518 return -1;
2520 #ifndef _WIN32
2521 fcntl(fd, F_SETFD, FD_CLOEXEC);
2522 #endif
2524 /* allow fast reuse */
2525 val = 1;
2526 setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&val, sizeof(val));
2528 sockaddr.sin_family = AF_INET;
2529 sockaddr.sin_port = htons(port);
2530 sockaddr.sin_addr.s_addr = 0;
2531 ret = bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr));
2532 if (ret < 0) {
2533 perror("bind");
2534 return -1;
2536 ret = listen(fd, 0);
2537 if (ret < 0) {
2538 perror("listen");
2539 return -1;
2541 return fd;
2544 int gdbserver_start(int port)
2546 gdbserver_fd = gdbserver_open(port);
2547 if (gdbserver_fd < 0)
2548 return -1;
2549 /* accept connections */
2550 gdb_accept();
2551 return 0;
2554 /* Disable gdb stub for child processes. */
2555 void gdbserver_fork(CPUState *env)
2557 GDBState *s = gdbserver_state;
2558 if (gdbserver_fd < 0 || s->fd < 0)
2559 return;
2560 close(s->fd);
2561 s->fd = -1;
2562 cpu_breakpoint_remove_all(env, BP_GDB);
2563 cpu_watchpoint_remove_all(env, BP_GDB);
2565 #else
2566 static int gdb_chr_can_receive(void *opaque)
2568 /* We can handle an arbitrarily large amount of data.
2569 Pick the maximum packet size, which is as good as anything. */
2570 return MAX_PACKET_LENGTH;
2573 static void gdb_chr_receive(void *opaque, const uint8_t *buf, int size)
2575 int i;
2577 for (i = 0; i < size; i++) {
2578 gdb_read_byte(gdbserver_state, buf[i]);
2582 static void gdb_chr_event(void *opaque, int event)
2584 switch (event) {
2585 case CHR_EVENT_OPENED:
2586 vm_stop(EXCP_INTERRUPT);
2587 gdb_has_xml = 0;
2588 break;
2589 default:
2590 break;
2594 static void gdb_monitor_output(GDBState *s, const char *msg, int len)
2596 char buf[MAX_PACKET_LENGTH];
2598 buf[0] = 'O';
2599 if (len > (MAX_PACKET_LENGTH/2) - 1)
2600 len = (MAX_PACKET_LENGTH/2) - 1;
2601 memtohex(buf + 1, (uint8_t *)msg, len);
2602 put_packet(s, buf);
2605 static int gdb_monitor_write(CharDriverState *chr, const uint8_t *buf, int len)
2607 const char *p = (const char *)buf;
2608 int max_sz;
2610 max_sz = (sizeof(gdbserver_state->last_packet) - 2) / 2;
2611 for (;;) {
2612 if (len <= max_sz) {
2613 gdb_monitor_output(gdbserver_state, p, len);
2614 break;
2616 gdb_monitor_output(gdbserver_state, p, max_sz);
2617 p += max_sz;
2618 len -= max_sz;
2620 return len;
2623 #ifndef _WIN32
2624 static void gdb_sigterm_handler(int signal)
2626 if (vm_running)
2627 vm_stop(EXCP_INTERRUPT);
2629 #endif
2631 int gdbserver_start(const char *device)
2633 GDBState *s;
2634 char gdbstub_device_name[128];
2635 CharDriverState *chr = NULL;
2636 CharDriverState *mon_chr;
2638 if (!device)
2639 return -1;
2640 if (strcmp(device, "none") != 0) {
2641 if (strstart(device, "tcp:", NULL)) {
2642 /* enforce required TCP attributes */
2643 snprintf(gdbstub_device_name, sizeof(gdbstub_device_name),
2644 "%s,nowait,nodelay,server", device);
2645 device = gdbstub_device_name;
2647 #ifndef _WIN32
2648 else if (strcmp(device, "stdio") == 0) {
2649 struct sigaction act;
2651 memset(&act, 0, sizeof(act));
2652 act.sa_handler = gdb_sigterm_handler;
2653 sigaction(SIGINT, &act, NULL);
2655 #endif
2656 chr = qemu_chr_open("gdb", device, NULL);
2657 if (!chr)
2658 return -1;
2660 qemu_chr_add_handlers(chr, gdb_chr_can_receive, gdb_chr_receive,
2661 gdb_chr_event, NULL);
2664 s = gdbserver_state;
2665 if (!s) {
2666 s = qemu_mallocz(sizeof(GDBState));
2667 gdbserver_state = s;
2669 qemu_add_vm_change_state_handler(gdb_vm_state_change, NULL);
2671 /* Initialize a monitor terminal for gdb */
2672 mon_chr = qemu_mallocz(sizeof(*mon_chr));
2673 mon_chr->chr_write = gdb_monitor_write;
2674 monitor_init(mon_chr, 0);
2675 } else {
2676 if (s->chr)
2677 qemu_chr_close(s->chr);
2678 mon_chr = s->mon_chr;
2679 memset(s, 0, sizeof(GDBState));
2681 s->c_cpu = first_cpu;
2682 s->g_cpu = first_cpu;
2683 s->chr = chr;
2684 s->state = chr ? RS_IDLE : RS_INACTIVE;
2685 s->mon_chr = mon_chr;
2687 return 0;
2689 #endif