4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu-common.h"
21 #ifdef CONFIG_USER_ONLY
33 #include "qemu-char.h"
38 #define MAX_PACKET_LENGTH 4096
41 #include "qemu_socket.h"
44 #ifndef TARGET_CPU_MEMORY_RW_DEBUG
45 static inline int target_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
46 uint8_t *buf
, int len
, int is_write
)
48 return cpu_memory_rw_debug(env
, addr
, buf
, len
, is_write
);
51 /* target_memory_rw_debug() defined in cpu.h */
63 GDB_SIGNAL_UNKNOWN
= 143
66 #ifdef CONFIG_USER_ONLY
68 /* Map target signal numbers to GDB protocol signal numbers and vice
69 * versa. For user emulation's currently supported systems, we can
70 * assume most signals are defined.
73 static int gdb_signal_table
[] = {
233 /* In system mode we only need SIGINT and SIGTRAP; other signals
234 are not yet supported. */
241 static int gdb_signal_table
[] = {
251 #ifdef CONFIG_USER_ONLY
252 static int target_signal_to_gdb (int sig
)
255 for (i
= 0; i
< ARRAY_SIZE (gdb_signal_table
); i
++)
256 if (gdb_signal_table
[i
] == sig
)
258 return GDB_SIGNAL_UNKNOWN
;
262 static int gdb_signal_to_target (int sig
)
264 if (sig
< ARRAY_SIZE (gdb_signal_table
))
265 return gdb_signal_table
[sig
];
272 typedef struct GDBRegisterState
{
278 struct GDBRegisterState
*next
;
288 typedef struct GDBState
{
289 CPUArchState
*c_cpu
; /* current CPU for step/continue ops */
290 CPUArchState
*g_cpu
; /* current CPU for other ops */
291 CPUArchState
*query_cpu
; /* for q{f|s}ThreadInfo */
292 enum RSState state
; /* parsing state */
293 char line_buf
[MAX_PACKET_LENGTH
];
296 uint8_t last_packet
[MAX_PACKET_LENGTH
+ 4];
299 #ifdef CONFIG_USER_ONLY
303 CharDriverState
*chr
;
304 CharDriverState
*mon_chr
;
306 char syscall_buf
[256];
307 gdb_syscall_complete_cb current_syscall_cb
;
310 /* By default use no IRQs and no timers while single stepping so as to
311 * make single stepping like an ICE HW step.
313 static int sstep_flags
= SSTEP_ENABLE
|SSTEP_NOIRQ
|SSTEP_NOTIMER
;
315 static GDBState
*gdbserver_state
;
317 /* This is an ugly hack to cope with both new and old gdb.
318 If gdb sends qXfer:features:read then assume we're talking to a newish
319 gdb that understands target descriptions. */
320 static int gdb_has_xml
;
322 #ifdef CONFIG_USER_ONLY
323 /* XXX: This is not thread safe. Do we care? */
324 static int gdbserver_fd
= -1;
326 static int get_char(GDBState
*s
)
332 ret
= qemu_recv(s
->fd
, &ch
, 1, 0);
334 if (errno
== ECONNRESET
)
336 if (errno
!= EINTR
&& errno
!= EAGAIN
)
338 } else if (ret
== 0) {
356 /* If gdb is connected when the first semihosting syscall occurs then use
357 remote gdb syscalls. Otherwise use native file IO. */
358 int use_gdb_syscalls(void)
360 if (gdb_syscall_mode
== GDB_SYS_UNKNOWN
) {
361 gdb_syscall_mode
= (gdbserver_state
? GDB_SYS_ENABLED
364 return gdb_syscall_mode
== GDB_SYS_ENABLED
;
367 /* Resume execution. */
368 static inline void gdb_continue(GDBState
*s
)
370 #ifdef CONFIG_USER_ONLY
371 s
->running_state
= 1;
377 static void put_buffer(GDBState
*s
, const uint8_t *buf
, int len
)
379 #ifdef CONFIG_USER_ONLY
383 ret
= send(s
->fd
, buf
, len
, 0);
385 if (errno
!= EINTR
&& errno
!= EAGAIN
)
393 qemu_chr_fe_write(s
->chr
, buf
, len
);
397 static inline int fromhex(int v
)
399 if (v
>= '0' && v
<= '9')
401 else if (v
>= 'A' && v
<= 'F')
403 else if (v
>= 'a' && v
<= 'f')
409 static inline int tohex(int v
)
417 static void memtohex(char *buf
, const uint8_t *mem
, int len
)
422 for(i
= 0; i
< len
; i
++) {
424 *q
++ = tohex(c
>> 4);
425 *q
++ = tohex(c
& 0xf);
430 static void hextomem(uint8_t *mem
, const char *buf
, int len
)
434 for(i
= 0; i
< len
; i
++) {
435 mem
[i
] = (fromhex(buf
[0]) << 4) | fromhex(buf
[1]);
440 /* return -1 if error, 0 if OK */
441 static int put_packet_binary(GDBState
*s
, const char *buf
, int len
)
452 for(i
= 0; i
< len
; i
++) {
456 *(p
++) = tohex((csum
>> 4) & 0xf);
457 *(p
++) = tohex((csum
) & 0xf);
459 s
->last_packet_len
= p
- s
->last_packet
;
460 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
462 #ifdef CONFIG_USER_ONLY
475 /* return -1 if error, 0 if OK */
476 static int put_packet(GDBState
*s
, const char *buf
)
479 printf("reply='%s'\n", buf
);
482 return put_packet_binary(s
, buf
, strlen(buf
));
485 /* The GDB remote protocol transfers values in target byte order. This means
486 we can use the raw memory access routines to access the value buffer.
487 Conveniently, these also handle the case where the buffer is mis-aligned.
489 #define GET_REG8(val) do { \
490 stb_p(mem_buf, val); \
493 #define GET_REG16(val) do { \
494 stw_p(mem_buf, val); \
497 #define GET_REG32(val) do { \
498 stl_p(mem_buf, val); \
501 #define GET_REG64(val) do { \
502 stq_p(mem_buf, val); \
506 #if TARGET_LONG_BITS == 64
507 #define GET_REGL(val) GET_REG64(val)
508 #define ldtul_p(addr) ldq_p(addr)
510 #define GET_REGL(val) GET_REG32(val)
511 #define ldtul_p(addr) ldl_p(addr)
514 #if defined(TARGET_I386)
517 static const int gpr_map
[16] = {
518 R_EAX
, R_EBX
, R_ECX
, R_EDX
, R_ESI
, R_EDI
, R_EBP
, R_ESP
,
519 8, 9, 10, 11, 12, 13, 14, 15
522 #define gpr_map gpr_map32
524 static const int gpr_map32
[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
526 #define NUM_CORE_REGS (CPU_NB_REGS * 2 + 25)
528 #define IDX_IP_REG CPU_NB_REGS
529 #define IDX_FLAGS_REG (IDX_IP_REG + 1)
530 #define IDX_SEG_REGS (IDX_FLAGS_REG + 1)
531 #define IDX_FP_REGS (IDX_SEG_REGS + 6)
532 #define IDX_XMM_REGS (IDX_FP_REGS + 16)
533 #define IDX_MXCSR_REG (IDX_XMM_REGS + CPU_NB_REGS)
535 static int cpu_gdb_read_register(CPUX86State
*env
, uint8_t *mem_buf
, int n
)
537 if (n
< CPU_NB_REGS
) {
538 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
539 GET_REG64(env
->regs
[gpr_map
[n
]]);
540 } else if (n
< CPU_NB_REGS32
) {
541 GET_REG32(env
->regs
[gpr_map32
[n
]]);
543 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
544 #ifdef USE_X86LDOUBLE
545 /* FIXME: byteswap float values - after fixing fpregs layout. */
546 memcpy(mem_buf
, &env
->fpregs
[n
- IDX_FP_REGS
], 10);
548 memset(mem_buf
, 0, 10);
551 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
553 if (n
< CPU_NB_REGS32
||
554 (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
)) {
555 stq_p(mem_buf
, env
->xmm_regs
[n
].XMM_Q(0));
556 stq_p(mem_buf
+ 8, env
->xmm_regs
[n
].XMM_Q(1));
562 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
567 case IDX_FLAGS_REG
: GET_REG32(env
->eflags
);
569 case IDX_SEG_REGS
: GET_REG32(env
->segs
[R_CS
].selector
);
570 case IDX_SEG_REGS
+ 1: GET_REG32(env
->segs
[R_SS
].selector
);
571 case IDX_SEG_REGS
+ 2: GET_REG32(env
->segs
[R_DS
].selector
);
572 case IDX_SEG_REGS
+ 3: GET_REG32(env
->segs
[R_ES
].selector
);
573 case IDX_SEG_REGS
+ 4: GET_REG32(env
->segs
[R_FS
].selector
);
574 case IDX_SEG_REGS
+ 5: GET_REG32(env
->segs
[R_GS
].selector
);
576 case IDX_FP_REGS
+ 8: GET_REG32(env
->fpuc
);
577 case IDX_FP_REGS
+ 9: GET_REG32((env
->fpus
& ~0x3800) |
578 (env
->fpstt
& 0x7) << 11);
579 case IDX_FP_REGS
+ 10: GET_REG32(0); /* ftag */
580 case IDX_FP_REGS
+ 11: GET_REG32(0); /* fiseg */
581 case IDX_FP_REGS
+ 12: GET_REG32(0); /* fioff */
582 case IDX_FP_REGS
+ 13: GET_REG32(0); /* foseg */
583 case IDX_FP_REGS
+ 14: GET_REG32(0); /* fooff */
584 case IDX_FP_REGS
+ 15: GET_REG32(0); /* fop */
586 case IDX_MXCSR_REG
: GET_REG32(env
->mxcsr
);
592 static int cpu_x86_gdb_load_seg(CPUX86State
*env
, int sreg
, uint8_t *mem_buf
)
594 uint16_t selector
= ldl_p(mem_buf
);
596 if (selector
!= env
->segs
[sreg
].selector
) {
597 #if defined(CONFIG_USER_ONLY)
598 cpu_x86_load_seg(env
, sreg
, selector
);
600 unsigned int limit
, flags
;
603 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
604 base
= selector
<< 4;
608 if (!cpu_x86_get_descr_debug(env
, selector
, &base
, &limit
, &flags
))
611 cpu_x86_load_seg_cache(env
, sreg
, selector
, base
, limit
, flags
);
617 static int cpu_gdb_write_register(CPUX86State
*env
, uint8_t *mem_buf
, int n
)
621 if (n
< CPU_NB_REGS
) {
622 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
623 env
->regs
[gpr_map
[n
]] = ldtul_p(mem_buf
);
624 return sizeof(target_ulong
);
625 } else if (n
< CPU_NB_REGS32
) {
627 env
->regs
[n
] &= ~0xffffffffUL
;
628 env
->regs
[n
] |= (uint32_t)ldl_p(mem_buf
);
631 } else if (n
>= IDX_FP_REGS
&& n
< IDX_FP_REGS
+ 8) {
632 #ifdef USE_X86LDOUBLE
633 /* FIXME: byteswap float values - after fixing fpregs layout. */
634 memcpy(&env
->fpregs
[n
- IDX_FP_REGS
], mem_buf
, 10);
637 } else if (n
>= IDX_XMM_REGS
&& n
< IDX_XMM_REGS
+ CPU_NB_REGS
) {
639 if (n
< CPU_NB_REGS32
||
640 (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
)) {
641 env
->xmm_regs
[n
].XMM_Q(0) = ldq_p(mem_buf
);
642 env
->xmm_regs
[n
].XMM_Q(1) = ldq_p(mem_buf
+ 8);
648 if (TARGET_LONG_BITS
== 64 && env
->hflags
& HF_CS64_MASK
) {
649 env
->eip
= ldq_p(mem_buf
);
652 env
->eip
&= ~0xffffffffUL
;
653 env
->eip
|= (uint32_t)ldl_p(mem_buf
);
657 env
->eflags
= ldl_p(mem_buf
);
660 case IDX_SEG_REGS
: return cpu_x86_gdb_load_seg(env
, R_CS
, mem_buf
);
661 case IDX_SEG_REGS
+ 1: return cpu_x86_gdb_load_seg(env
, R_SS
, mem_buf
);
662 case IDX_SEG_REGS
+ 2: return cpu_x86_gdb_load_seg(env
, R_DS
, mem_buf
);
663 case IDX_SEG_REGS
+ 3: return cpu_x86_gdb_load_seg(env
, R_ES
, mem_buf
);
664 case IDX_SEG_REGS
+ 4: return cpu_x86_gdb_load_seg(env
, R_FS
, mem_buf
);
665 case IDX_SEG_REGS
+ 5: return cpu_x86_gdb_load_seg(env
, R_GS
, mem_buf
);
667 case IDX_FP_REGS
+ 8:
668 env
->fpuc
= ldl_p(mem_buf
);
670 case IDX_FP_REGS
+ 9:
671 tmp
= ldl_p(mem_buf
);
672 env
->fpstt
= (tmp
>> 11) & 7;
673 env
->fpus
= tmp
& ~0x3800;
675 case IDX_FP_REGS
+ 10: /* ftag */ return 4;
676 case IDX_FP_REGS
+ 11: /* fiseg */ return 4;
677 case IDX_FP_REGS
+ 12: /* fioff */ return 4;
678 case IDX_FP_REGS
+ 13: /* foseg */ return 4;
679 case IDX_FP_REGS
+ 14: /* fooff */ return 4;
680 case IDX_FP_REGS
+ 15: /* fop */ return 4;
683 env
->mxcsr
= ldl_p(mem_buf
);
687 /* Unrecognised register. */
691 #elif defined (TARGET_PPC)
693 /* Old gdb always expects FP registers. Newer (xml-aware) gdb only
694 expects whatever the target description contains. Due to a
695 historical mishap the FP registers appear in between core integer
696 regs and PC, MSR, CR, and so forth. We hack round this by giving the
697 FP regs zero size when talking to a newer gdb. */
698 #define NUM_CORE_REGS 71
699 #if defined (TARGET_PPC64)
700 #define GDB_CORE_XML "power64-core.xml"
702 #define GDB_CORE_XML "power-core.xml"
705 static int cpu_gdb_read_register(CPUPPCState
*env
, uint8_t *mem_buf
, int n
)
709 GET_REGL(env
->gpr
[n
]);
714 stfq_p(mem_buf
, env
->fpr
[n
-32]);
718 case 64: GET_REGL(env
->nip
);
719 case 65: GET_REGL(env
->msr
);
724 for (i
= 0; i
< 8; i
++)
725 cr
|= env
->crf
[i
] << (32 - ((i
+ 1) * 4));
728 case 67: GET_REGL(env
->lr
);
729 case 68: GET_REGL(env
->ctr
);
730 case 69: GET_REGL(env
->xer
);
735 GET_REG32(env
->fpscr
);
742 static int cpu_gdb_write_register(CPUPPCState
*env
, uint8_t *mem_buf
, int n
)
746 env
->gpr
[n
] = ldtul_p(mem_buf
);
747 return sizeof(target_ulong
);
752 env
->fpr
[n
-32] = ldfq_p(mem_buf
);
757 env
->nip
= ldtul_p(mem_buf
);
758 return sizeof(target_ulong
);
760 ppc_store_msr(env
, ldtul_p(mem_buf
));
761 return sizeof(target_ulong
);
764 uint32_t cr
= ldl_p(mem_buf
);
766 for (i
= 0; i
< 8; i
++)
767 env
->crf
[i
] = (cr
>> (32 - ((i
+ 1) * 4))) & 0xF;
771 env
->lr
= ldtul_p(mem_buf
);
772 return sizeof(target_ulong
);
774 env
->ctr
= ldtul_p(mem_buf
);
775 return sizeof(target_ulong
);
777 env
->xer
= ldtul_p(mem_buf
);
778 return sizeof(target_ulong
);
789 #elif defined (TARGET_SPARC)
791 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
792 #define NUM_CORE_REGS 86
794 #define NUM_CORE_REGS 72
798 #define GET_REGA(val) GET_REG32(val)
800 #define GET_REGA(val) GET_REGL(val)
803 static int cpu_gdb_read_register(CPUSPARCState
*env
, uint8_t *mem_buf
, int n
)
807 GET_REGA(env
->gregs
[n
]);
810 /* register window */
811 GET_REGA(env
->regwptr
[n
- 8]);
813 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
817 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.lower
);
819 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.upper
);
822 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
824 case 64: GET_REGA(env
->y
);
825 case 65: GET_REGA(cpu_get_psr(env
));
826 case 66: GET_REGA(env
->wim
);
827 case 67: GET_REGA(env
->tbr
);
828 case 68: GET_REGA(env
->pc
);
829 case 69: GET_REGA(env
->npc
);
830 case 70: GET_REGA(env
->fsr
);
831 case 71: GET_REGA(0); /* csr */
832 default: GET_REGA(0);
838 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.lower
);
840 GET_REG32(env
->fpr
[(n
- 32) / 2].l
.upper
);
844 /* f32-f62 (double width, even numbers only) */
845 GET_REG64(env
->fpr
[(n
- 32) / 2].ll
);
848 case 80: GET_REGL(env
->pc
);
849 case 81: GET_REGL(env
->npc
);
850 case 82: GET_REGL((cpu_get_ccr(env
) << 32) |
851 ((env
->asi
& 0xff) << 24) |
852 ((env
->pstate
& 0xfff) << 8) |
854 case 83: GET_REGL(env
->fsr
);
855 case 84: GET_REGL(env
->fprs
);
856 case 85: GET_REGL(env
->y
);
862 static int cpu_gdb_write_register(CPUSPARCState
*env
, uint8_t *mem_buf
, int n
)
864 #if defined(TARGET_ABI32)
867 tmp
= ldl_p(mem_buf
);
871 tmp
= ldtul_p(mem_buf
);
878 /* register window */
879 env
->regwptr
[n
- 8] = tmp
;
881 #if defined(TARGET_ABI32) || !defined(TARGET_SPARC64)
886 env
->fpr
[(n
- 32) / 2].l
.lower
= tmp
;
888 env
->fpr
[(n
- 32) / 2].l
.upper
= tmp
;
891 /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
893 case 64: env
->y
= tmp
; break;
894 case 65: cpu_put_psr(env
, tmp
); break;
895 case 66: env
->wim
= tmp
; break;
896 case 67: env
->tbr
= tmp
; break;
897 case 68: env
->pc
= tmp
; break;
898 case 69: env
->npc
= tmp
; break;
899 case 70: env
->fsr
= tmp
; break;
907 tmp
= ldl_p(mem_buf
);
909 env
->fpr
[(n
- 32) / 2].l
.lower
= tmp
;
911 env
->fpr
[(n
- 32) / 2].l
.upper
= tmp
;
915 /* f32-f62 (double width, even numbers only) */
916 env
->fpr
[(n
- 32) / 2].ll
= tmp
;
919 case 80: env
->pc
= tmp
; break;
920 case 81: env
->npc
= tmp
; break;
922 cpu_put_ccr(env
, tmp
>> 32);
923 env
->asi
= (tmp
>> 24) & 0xff;
924 env
->pstate
= (tmp
>> 8) & 0xfff;
925 cpu_put_cwp64(env
, tmp
& 0xff);
927 case 83: env
->fsr
= tmp
; break;
928 case 84: env
->fprs
= tmp
; break;
929 case 85: env
->y
= tmp
; break;
936 #elif defined (TARGET_ARM)
938 /* Old gdb always expect FPA registers. Newer (xml-aware) gdb only expect
939 whatever the target description contains. Due to a historical mishap
940 the FPA registers appear in between core integer regs and the CPSR.
941 We hack round this by giving the FPA regs zero size when talking to a
943 #define NUM_CORE_REGS 26
944 #define GDB_CORE_XML "arm-core.xml"
946 static int cpu_gdb_read_register(CPUARMState
*env
, uint8_t *mem_buf
, int n
)
949 /* Core integer register. */
950 GET_REG32(env
->regs
[n
]);
956 memset(mem_buf
, 0, 12);
961 /* FPA status register. */
967 GET_REG32(cpsr_read(env
));
969 /* Unknown register. */
973 static int cpu_gdb_write_register(CPUARMState
*env
, uint8_t *mem_buf
, int n
)
977 tmp
= ldl_p(mem_buf
);
979 /* Mask out low bit of PC to workaround gdb bugs. This will probably
980 cause problems if we ever implement the Jazelle DBX extensions. */
985 /* Core integer register. */
989 if (n
< 24) { /* 16-23 */
990 /* FPA registers (ignored). */
997 /* FPA status register (ignored). */
1003 cpsr_write (env
, tmp
, 0xffffffff);
1006 /* Unknown register. */
1010 #elif defined (TARGET_M68K)
1012 #define NUM_CORE_REGS 18
1014 #define GDB_CORE_XML "cf-core.xml"
1016 static int cpu_gdb_read_register(CPUM68KState
*env
, uint8_t *mem_buf
, int n
)
1020 GET_REG32(env
->dregs
[n
]);
1021 } else if (n
< 16) {
1023 GET_REG32(env
->aregs
[n
- 8]);
1026 case 16: GET_REG32(env
->sr
);
1027 case 17: GET_REG32(env
->pc
);
1030 /* FP registers not included here because they vary between
1031 ColdFire and m68k. Use XML bits for these. */
1035 static int cpu_gdb_write_register(CPUM68KState
*env
, uint8_t *mem_buf
, int n
)
1039 tmp
= ldl_p(mem_buf
);
1043 env
->dregs
[n
] = tmp
;
1044 } else if (n
< 16) {
1046 env
->aregs
[n
- 8] = tmp
;
1049 case 16: env
->sr
= tmp
; break;
1050 case 17: env
->pc
= tmp
; break;
1056 #elif defined (TARGET_MIPS)
1058 #define NUM_CORE_REGS 73
1060 static int cpu_gdb_read_register(CPUMIPSState
*env
, uint8_t *mem_buf
, int n
)
1063 GET_REGL(env
->active_tc
.gpr
[n
]);
1065 if (env
->CP0_Config1
& (1 << CP0C1_FP
)) {
1066 if (n
>= 38 && n
< 70) {
1067 if (env
->CP0_Status
& (1 << CP0St_FR
))
1068 GET_REGL(env
->active_fpu
.fpr
[n
- 38].d
);
1070 GET_REGL(env
->active_fpu
.fpr
[n
- 38].w
[FP_ENDIAN_IDX
]);
1073 case 70: GET_REGL((int32_t)env
->active_fpu
.fcr31
);
1074 case 71: GET_REGL((int32_t)env
->active_fpu
.fcr0
);
1078 case 32: GET_REGL((int32_t)env
->CP0_Status
);
1079 case 33: GET_REGL(env
->active_tc
.LO
[0]);
1080 case 34: GET_REGL(env
->active_tc
.HI
[0]);
1081 case 35: GET_REGL(env
->CP0_BadVAddr
);
1082 case 36: GET_REGL((int32_t)env
->CP0_Cause
);
1083 case 37: GET_REGL(env
->active_tc
.PC
| !!(env
->hflags
& MIPS_HFLAG_M16
));
1084 case 72: GET_REGL(0); /* fp */
1085 case 89: GET_REGL((int32_t)env
->CP0_PRid
);
1087 if (n
>= 73 && n
<= 88) {
1088 /* 16 embedded regs. */
1095 /* convert MIPS rounding mode in FCR31 to IEEE library */
1096 static unsigned int ieee_rm
[] =
1098 float_round_nearest_even
,
1099 float_round_to_zero
,
1103 #define RESTORE_ROUNDING_MODE \
1104 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
1106 static int cpu_gdb_write_register(CPUMIPSState
*env
, uint8_t *mem_buf
, int n
)
1110 tmp
= ldtul_p(mem_buf
);
1113 env
->active_tc
.gpr
[n
] = tmp
;
1114 return sizeof(target_ulong
);
1116 if (env
->CP0_Config1
& (1 << CP0C1_FP
)
1117 && n
>= 38 && n
< 73) {
1119 if (env
->CP0_Status
& (1 << CP0St_FR
))
1120 env
->active_fpu
.fpr
[n
- 38].d
= tmp
;
1122 env
->active_fpu
.fpr
[n
- 38].w
[FP_ENDIAN_IDX
] = tmp
;
1126 env
->active_fpu
.fcr31
= tmp
& 0xFF83FFFF;
1127 /* set rounding mode */
1128 RESTORE_ROUNDING_MODE
;
1130 case 71: env
->active_fpu
.fcr0
= tmp
; break;
1132 return sizeof(target_ulong
);
1135 case 32: env
->CP0_Status
= tmp
; break;
1136 case 33: env
->active_tc
.LO
[0] = tmp
; break;
1137 case 34: env
->active_tc
.HI
[0] = tmp
; break;
1138 case 35: env
->CP0_BadVAddr
= tmp
; break;
1139 case 36: env
->CP0_Cause
= tmp
; break;
1141 env
->active_tc
.PC
= tmp
& ~(target_ulong
)1;
1143 env
->hflags
|= MIPS_HFLAG_M16
;
1145 env
->hflags
&= ~(MIPS_HFLAG_M16
);
1148 case 72: /* fp, ignored */ break;
1152 /* Other registers are readonly. Ignore writes. */
1156 return sizeof(target_ulong
);
1158 #elif defined(TARGET_OPENRISC)
1160 #define NUM_CORE_REGS (32 + 3)
1162 static int cpu_gdb_read_register(CPUOpenRISCState
*env
, uint8_t *mem_buf
, int n
)
1165 GET_REG32(env
->gpr
[n
]);
1169 GET_REG32(env
->ppc
);
1173 GET_REG32(env
->npc
);
1187 static int cpu_gdb_write_register(CPUOpenRISCState
*env
,
1188 uint8_t *mem_buf
, int n
)
1192 if (n
> NUM_CORE_REGS
) {
1196 tmp
= ldl_p(mem_buf
);
1220 #elif defined (TARGET_SH4)
1222 /* Hint: Use "set architecture sh4" in GDB to see fpu registers */
1223 /* FIXME: We should use XML for this. */
1225 #define NUM_CORE_REGS 59
1227 static int cpu_gdb_read_register(CPUSH4State
*env
, uint8_t *mem_buf
, int n
)
1230 if ((env
->sr
& (SR_MD
| SR_RB
)) == (SR_MD
| SR_RB
)) {
1231 GET_REGL(env
->gregs
[n
+ 16]);
1233 GET_REGL(env
->gregs
[n
]);
1235 } else if (n
< 16) {
1236 GET_REGL(env
->gregs
[n
]);
1237 } else if (n
>= 25 && n
< 41) {
1238 GET_REGL(env
->fregs
[(n
- 25) + ((env
->fpscr
& FPSCR_FR
) ? 16 : 0)]);
1239 } else if (n
>= 43 && n
< 51) {
1240 GET_REGL(env
->gregs
[n
- 43]);
1241 } else if (n
>= 51 && n
< 59) {
1242 GET_REGL(env
->gregs
[n
- (51 - 16)]);
1245 case 16: GET_REGL(env
->pc
);
1246 case 17: GET_REGL(env
->pr
);
1247 case 18: GET_REGL(env
->gbr
);
1248 case 19: GET_REGL(env
->vbr
);
1249 case 20: GET_REGL(env
->mach
);
1250 case 21: GET_REGL(env
->macl
);
1251 case 22: GET_REGL(env
->sr
);
1252 case 23: GET_REGL(env
->fpul
);
1253 case 24: GET_REGL(env
->fpscr
);
1254 case 41: GET_REGL(env
->ssr
);
1255 case 42: GET_REGL(env
->spc
);
1261 static int cpu_gdb_write_register(CPUSH4State
*env
, uint8_t *mem_buf
, int n
)
1265 tmp
= ldl_p(mem_buf
);
1268 if ((env
->sr
& (SR_MD
| SR_RB
)) == (SR_MD
| SR_RB
)) {
1269 env
->gregs
[n
+ 16] = tmp
;
1271 env
->gregs
[n
] = tmp
;
1274 } else if (n
< 16) {
1275 env
->gregs
[n
] = tmp
;
1277 } else if (n
>= 25 && n
< 41) {
1278 env
->fregs
[(n
- 25) + ((env
->fpscr
& FPSCR_FR
) ? 16 : 0)] = tmp
;
1280 } else if (n
>= 43 && n
< 51) {
1281 env
->gregs
[n
- 43] = tmp
;
1283 } else if (n
>= 51 && n
< 59) {
1284 env
->gregs
[n
- (51 - 16)] = tmp
;
1288 case 16: env
->pc
= tmp
; break;
1289 case 17: env
->pr
= tmp
; break;
1290 case 18: env
->gbr
= tmp
; break;
1291 case 19: env
->vbr
= tmp
; break;
1292 case 20: env
->mach
= tmp
; break;
1293 case 21: env
->macl
= tmp
; break;
1294 case 22: env
->sr
= tmp
; break;
1295 case 23: env
->fpul
= tmp
; break;
1296 case 24: env
->fpscr
= tmp
; break;
1297 case 41: env
->ssr
= tmp
; break;
1298 case 42: env
->spc
= tmp
; break;
1304 #elif defined (TARGET_MICROBLAZE)
1306 #define NUM_CORE_REGS (32 + 5)
1308 static int cpu_gdb_read_register(CPUMBState
*env
, uint8_t *mem_buf
, int n
)
1311 GET_REG32(env
->regs
[n
]);
1313 GET_REG32(env
->sregs
[n
- 32]);
1318 static int cpu_gdb_write_register(CPUMBState
*env
, uint8_t *mem_buf
, int n
)
1322 if (n
> NUM_CORE_REGS
)
1325 tmp
= ldl_p(mem_buf
);
1330 env
->sregs
[n
- 32] = tmp
;
1334 #elif defined (TARGET_CRIS)
1336 #define NUM_CORE_REGS 49
1339 read_register_crisv10(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1342 GET_REG32(env
->regs
[n
]);
1352 GET_REG8(env
->pregs
[n
- 16]);
1355 GET_REG8(env
->pregs
[n
- 16]);
1359 GET_REG16(env
->pregs
[n
- 16]);
1363 GET_REG32(env
->pregs
[n
- 16]);
1371 static int cpu_gdb_read_register(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1375 if (env
->pregs
[PR_VR
] < 32)
1376 return read_register_crisv10(env
, mem_buf
, n
);
1378 srs
= env
->pregs
[PR_SRS
];
1380 GET_REG32(env
->regs
[n
]);
1383 if (n
>= 21 && n
< 32) {
1384 GET_REG32(env
->pregs
[n
- 16]);
1386 if (n
>= 33 && n
< 49) {
1387 GET_REG32(env
->sregs
[srs
][n
- 33]);
1390 case 16: GET_REG8(env
->pregs
[0]);
1391 case 17: GET_REG8(env
->pregs
[1]);
1392 case 18: GET_REG32(env
->pregs
[2]);
1393 case 19: GET_REG8(srs
);
1394 case 20: GET_REG16(env
->pregs
[4]);
1395 case 32: GET_REG32(env
->pc
);
1401 static int cpu_gdb_write_register(CPUCRISState
*env
, uint8_t *mem_buf
, int n
)
1408 tmp
= ldl_p(mem_buf
);
1414 if (n
>= 21 && n
< 32) {
1415 env
->pregs
[n
- 16] = tmp
;
1418 /* FIXME: Should support function regs be writable? */
1422 case 18: env
->pregs
[PR_PID
] = tmp
; break;
1425 case 32: env
->pc
= tmp
; break;
1430 #elif defined (TARGET_ALPHA)
1432 #define NUM_CORE_REGS 67
1434 static int cpu_gdb_read_register(CPUAlphaState
*env
, uint8_t *mem_buf
, int n
)
1444 d
.d
= env
->fir
[n
- 32];
1448 val
= cpu_alpha_load_fpcr(env
);
1458 /* 31 really is the zero register; 65 is unassigned in the
1459 gdb protocol, but is still required to occupy 8 bytes. */
1468 static int cpu_gdb_write_register(CPUAlphaState
*env
, uint8_t *mem_buf
, int n
)
1470 target_ulong tmp
= ldtul_p(mem_buf
);
1479 env
->fir
[n
- 32] = d
.d
;
1482 cpu_alpha_store_fpcr(env
, tmp
);
1492 /* 31 really is the zero register; 65 is unassigned in the
1493 gdb protocol, but is still required to occupy 8 bytes. */
1500 #elif defined (TARGET_S390X)
1502 #define NUM_CORE_REGS S390_NUM_TOTAL_REGS
1504 static int cpu_gdb_read_register(CPUS390XState
*env
, uint8_t *mem_buf
, int n
)
1507 case S390_PSWM_REGNUM
: GET_REGL(env
->psw
.mask
); break;
1508 case S390_PSWA_REGNUM
: GET_REGL(env
->psw
.addr
); break;
1509 case S390_R0_REGNUM
... S390_R15_REGNUM
:
1510 GET_REGL(env
->regs
[n
-S390_R0_REGNUM
]); break;
1511 case S390_A0_REGNUM
... S390_A15_REGNUM
:
1512 GET_REG32(env
->aregs
[n
-S390_A0_REGNUM
]); break;
1513 case S390_FPC_REGNUM
: GET_REG32(env
->fpc
); break;
1514 case S390_F0_REGNUM
... S390_F15_REGNUM
:
1517 case S390_PC_REGNUM
: GET_REGL(env
->psw
.addr
); break;
1518 case S390_CC_REGNUM
:
1519 env
->cc_op
= calc_cc(env
, env
->cc_op
, env
->cc_src
, env
->cc_dst
,
1521 GET_REG32(env
->cc_op
);
1528 static int cpu_gdb_write_register(CPUS390XState
*env
, uint8_t *mem_buf
, int n
)
1533 tmpl
= ldtul_p(mem_buf
);
1534 tmp32
= ldl_p(mem_buf
);
1537 case S390_PSWM_REGNUM
: env
->psw
.mask
= tmpl
; break;
1538 case S390_PSWA_REGNUM
: env
->psw
.addr
= tmpl
; break;
1539 case S390_R0_REGNUM
... S390_R15_REGNUM
:
1540 env
->regs
[n
-S390_R0_REGNUM
] = tmpl
; break;
1541 case S390_A0_REGNUM
... S390_A15_REGNUM
:
1542 env
->aregs
[n
-S390_A0_REGNUM
] = tmp32
; r
=4; break;
1543 case S390_FPC_REGNUM
: env
->fpc
= tmp32
; r
=4; break;
1544 case S390_F0_REGNUM
... S390_F15_REGNUM
:
1547 case S390_PC_REGNUM
: env
->psw
.addr
= tmpl
; break;
1548 case S390_CC_REGNUM
: env
->cc_op
= tmp32
; r
=4; break;
1553 #elif defined (TARGET_LM32)
1555 #include "hw/lm32_pic.h"
1556 #define NUM_CORE_REGS (32 + 7)
1558 static int cpu_gdb_read_register(CPULM32State
*env
, uint8_t *mem_buf
, int n
)
1561 GET_REG32(env
->regs
[n
]);
1567 /* FIXME: put in right exception ID */
1572 GET_REG32(env
->eba
);
1575 GET_REG32(env
->deba
);
1581 GET_REG32(lm32_pic_get_im(env
->pic_state
));
1584 GET_REG32(lm32_pic_get_ip(env
->pic_state
));
1591 static int cpu_gdb_write_register(CPULM32State
*env
, uint8_t *mem_buf
, int n
)
1595 if (n
> NUM_CORE_REGS
) {
1599 tmp
= ldl_p(mem_buf
);
1618 lm32_pic_set_im(env
->pic_state
, tmp
);
1621 lm32_pic_set_ip(env
->pic_state
, tmp
);
1627 #elif defined(TARGET_XTENSA)
1629 /* Use num_core_regs to see only non-privileged registers in an unmodified gdb.
1630 * Use num_regs to see all registers. gdb modification is required for that:
1631 * reset bit 0 in the 'flags' field of the registers definitions in the
1632 * gdb/xtensa-config.c inside gdb source tree or inside gdb overlay.
1634 #define NUM_CORE_REGS (env->config->gdb_regmap.num_regs)
1635 #define num_g_regs NUM_CORE_REGS
1637 static int cpu_gdb_read_register(CPUXtensaState
*env
, uint8_t *mem_buf
, int n
)
1639 const XtensaGdbReg
*reg
= env
->config
->gdb_regmap
.reg
+ n
;
1641 if (n
< 0 || n
>= env
->config
->gdb_regmap
.num_regs
) {
1645 switch (reg
->type
) {
1651 xtensa_sync_phys_from_window(env
);
1652 GET_REG32(env
->phys_regs
[(reg
->targno
& 0xff) % env
->config
->nareg
]);
1656 GET_REG32(env
->sregs
[reg
->targno
& 0xff]);
1660 GET_REG32(env
->uregs
[reg
->targno
& 0xff]);
1664 GET_REG32(env
->regs
[reg
->targno
& 0x0f]);
1668 qemu_log("%s from reg %d of unsupported type %d\n",
1669 __func__
, n
, reg
->type
);
1674 static int cpu_gdb_write_register(CPUXtensaState
*env
, uint8_t *mem_buf
, int n
)
1677 const XtensaGdbReg
*reg
= env
->config
->gdb_regmap
.reg
+ n
;
1679 if (n
< 0 || n
>= env
->config
->gdb_regmap
.num_regs
) {
1683 tmp
= ldl_p(mem_buf
);
1685 switch (reg
->type
) {
1691 env
->phys_regs
[(reg
->targno
& 0xff) % env
->config
->nareg
] = tmp
;
1692 xtensa_sync_window_from_phys(env
);
1696 env
->sregs
[reg
->targno
& 0xff] = tmp
;
1700 env
->uregs
[reg
->targno
& 0xff] = tmp
;
1704 env
->regs
[reg
->targno
& 0x0f] = tmp
;
1708 qemu_log("%s to reg %d of unsupported type %d\n",
1709 __func__
, n
, reg
->type
);
1717 #define NUM_CORE_REGS 0
1719 static int cpu_gdb_read_register(CPUArchState
*env
, uint8_t *mem_buf
, int n
)
1724 static int cpu_gdb_write_register(CPUArchState
*env
, uint8_t *mem_buf
, int n
)
1731 #if !defined(TARGET_XTENSA)
1732 static int num_g_regs
= NUM_CORE_REGS
;
1736 /* Encode data using the encoding for 'x' packets. */
1737 static int memtox(char *buf
, const char *mem
, int len
)
1745 case '#': case '$': case '*': case '}':
1757 static const char *get_feature_xml(const char *p
, const char **newp
)
1762 static char target_xml
[1024];
1765 while (p
[len
] && p
[len
] != ':')
1770 if (strncmp(p
, "target.xml", len
) == 0) {
1771 /* Generate the XML description for this CPU. */
1772 if (!target_xml
[0]) {
1773 GDBRegisterState
*r
;
1775 snprintf(target_xml
, sizeof(target_xml
),
1776 "<?xml version=\"1.0\"?>"
1777 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
1779 "<xi:include href=\"%s\"/>",
1782 for (r
= first_cpu
->gdb_regs
; r
; r
= r
->next
) {
1783 pstrcat(target_xml
, sizeof(target_xml
), "<xi:include href=\"");
1784 pstrcat(target_xml
, sizeof(target_xml
), r
->xml
);
1785 pstrcat(target_xml
, sizeof(target_xml
), "\"/>");
1787 pstrcat(target_xml
, sizeof(target_xml
), "</target>");
1791 for (i
= 0; ; i
++) {
1792 name
= xml_builtin
[i
][0];
1793 if (!name
|| (strncmp(name
, p
, len
) == 0 && strlen(name
) == len
))
1796 return name
? xml_builtin
[i
][1] : NULL
;
1800 static int gdb_read_register(CPUArchState
*env
, uint8_t *mem_buf
, int reg
)
1802 GDBRegisterState
*r
;
1804 if (reg
< NUM_CORE_REGS
)
1805 return cpu_gdb_read_register(env
, mem_buf
, reg
);
1807 for (r
= env
->gdb_regs
; r
; r
= r
->next
) {
1808 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
1809 return r
->get_reg(env
, mem_buf
, reg
- r
->base_reg
);
1815 static int gdb_write_register(CPUArchState
*env
, uint8_t *mem_buf
, int reg
)
1817 GDBRegisterState
*r
;
1819 if (reg
< NUM_CORE_REGS
)
1820 return cpu_gdb_write_register(env
, mem_buf
, reg
);
1822 for (r
= env
->gdb_regs
; r
; r
= r
->next
) {
1823 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
1824 return r
->set_reg(env
, mem_buf
, reg
- r
->base_reg
);
1830 #if !defined(TARGET_XTENSA)
1831 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
1832 specifies the first register number and these registers are included in
1833 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
1834 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
1837 void gdb_register_coprocessor(CPUArchState
* env
,
1838 gdb_reg_cb get_reg
, gdb_reg_cb set_reg
,
1839 int num_regs
, const char *xml
, int g_pos
)
1841 GDBRegisterState
*s
;
1842 GDBRegisterState
**p
;
1843 static int last_reg
= NUM_CORE_REGS
;
1847 /* Check for duplicates. */
1848 if (strcmp((*p
)->xml
, xml
) == 0)
1853 s
= g_new0(GDBRegisterState
, 1);
1854 s
->base_reg
= last_reg
;
1855 s
->num_regs
= num_regs
;
1856 s
->get_reg
= get_reg
;
1857 s
->set_reg
= set_reg
;
1860 /* Add to end of list. */
1861 last_reg
+= num_regs
;
1864 if (g_pos
!= s
->base_reg
) {
1865 fprintf(stderr
, "Error: Bad gdb register numbering for '%s'\n"
1866 "Expected %d got %d\n", xml
, g_pos
, s
->base_reg
);
1868 num_g_regs
= last_reg
;
1874 #ifndef CONFIG_USER_ONLY
1875 static const int xlat_gdb_type
[] = {
1876 [GDB_WATCHPOINT_WRITE
] = BP_GDB
| BP_MEM_WRITE
,
1877 [GDB_WATCHPOINT_READ
] = BP_GDB
| BP_MEM_READ
,
1878 [GDB_WATCHPOINT_ACCESS
] = BP_GDB
| BP_MEM_ACCESS
,
1882 static int gdb_breakpoint_insert(target_ulong addr
, target_ulong len
, int type
)
1888 return kvm_insert_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
1891 case GDB_BREAKPOINT_SW
:
1892 case GDB_BREAKPOINT_HW
:
1893 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1894 err
= cpu_breakpoint_insert(env
, addr
, BP_GDB
, NULL
);
1899 #ifndef CONFIG_USER_ONLY
1900 case GDB_WATCHPOINT_WRITE
:
1901 case GDB_WATCHPOINT_READ
:
1902 case GDB_WATCHPOINT_ACCESS
:
1903 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1904 err
= cpu_watchpoint_insert(env
, addr
, len
, xlat_gdb_type
[type
],
1916 static int gdb_breakpoint_remove(target_ulong addr
, target_ulong len
, int type
)
1922 return kvm_remove_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
1925 case GDB_BREAKPOINT_SW
:
1926 case GDB_BREAKPOINT_HW
:
1927 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1928 err
= cpu_breakpoint_remove(env
, addr
, BP_GDB
);
1933 #ifndef CONFIG_USER_ONLY
1934 case GDB_WATCHPOINT_WRITE
:
1935 case GDB_WATCHPOINT_READ
:
1936 case GDB_WATCHPOINT_ACCESS
:
1937 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1938 err
= cpu_watchpoint_remove(env
, addr
, len
, xlat_gdb_type
[type
]);
1949 static void gdb_breakpoint_remove_all(void)
1953 if (kvm_enabled()) {
1954 kvm_remove_all_breakpoints(gdbserver_state
->c_cpu
);
1958 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1959 cpu_breakpoint_remove_all(env
, BP_GDB
);
1960 #ifndef CONFIG_USER_ONLY
1961 cpu_watchpoint_remove_all(env
, BP_GDB
);
1966 static void gdb_set_cpu_pc(GDBState
*s
, target_ulong pc
)
1968 cpu_synchronize_state(s
->c_cpu
);
1969 #if defined(TARGET_I386)
1971 #elif defined (TARGET_PPC)
1973 #elif defined (TARGET_SPARC)
1975 s
->c_cpu
->npc
= pc
+ 4;
1976 #elif defined (TARGET_ARM)
1977 s
->c_cpu
->regs
[15] = pc
;
1978 #elif defined (TARGET_SH4)
1980 #elif defined (TARGET_MIPS)
1981 s
->c_cpu
->active_tc
.PC
= pc
& ~(target_ulong
)1;
1983 s
->c_cpu
->hflags
|= MIPS_HFLAG_M16
;
1985 s
->c_cpu
->hflags
&= ~(MIPS_HFLAG_M16
);
1987 #elif defined (TARGET_MICROBLAZE)
1988 s
->c_cpu
->sregs
[SR_PC
] = pc
;
1989 #elif defined(TARGET_OPENRISC)
1991 #elif defined (TARGET_CRIS)
1993 #elif defined (TARGET_ALPHA)
1995 #elif defined (TARGET_S390X)
1996 s
->c_cpu
->psw
.addr
= pc
;
1997 #elif defined (TARGET_LM32)
1999 #elif defined(TARGET_XTENSA)
2004 static CPUArchState
*find_cpu(uint32_t thread_id
)
2008 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2009 if (cpu_index(env
) == thread_id
) {
2017 static int gdb_handle_packet(GDBState
*s
, const char *line_buf
)
2022 int ch
, reg_size
, type
, res
;
2023 char buf
[MAX_PACKET_LENGTH
];
2024 uint8_t mem_buf
[MAX_PACKET_LENGTH
];
2026 target_ulong addr
, len
;
2029 printf("command='%s'\n", line_buf
);
2035 /* TODO: Make this return the correct value for user-mode. */
2036 snprintf(buf
, sizeof(buf
), "T%02xthread:%02x;", GDB_SIGNAL_TRAP
,
2037 cpu_index(s
->c_cpu
));
2039 /* Remove all the breakpoints when this query is issued,
2040 * because gdb is doing and initial connect and the state
2041 * should be cleaned up.
2043 gdb_breakpoint_remove_all();
2047 addr
= strtoull(p
, (char **)&p
, 16);
2048 gdb_set_cpu_pc(s
, addr
);
2054 s
->signal
= gdb_signal_to_target (strtoul(p
, (char **)&p
, 16));
2055 if (s
->signal
== -1)
2060 if (strncmp(p
, "Cont", 4) == 0) {
2061 int res_signal
, res_thread
;
2065 put_packet(s
, "vCont;c;C;s;S");
2080 if (action
== 'C' || action
== 'S') {
2081 signal
= strtoul(p
, (char **)&p
, 16);
2082 } else if (action
!= 'c' && action
!= 's') {
2088 thread
= strtoull(p
+1, (char **)&p
, 16);
2090 action
= tolower(action
);
2091 if (res
== 0 || (res
== 'c' && action
== 's')) {
2093 res_signal
= signal
;
2094 res_thread
= thread
;
2098 if (res_thread
!= -1 && res_thread
!= 0) {
2099 env
= find_cpu(res_thread
);
2101 put_packet(s
, "E22");
2107 cpu_single_step(s
->c_cpu
, sstep_flags
);
2109 s
->signal
= res_signal
;
2115 goto unknown_command
;
2118 #ifdef CONFIG_USER_ONLY
2119 /* Kill the target */
2120 fprintf(stderr
, "\nQEMU: Terminated via GDBstub\n");
2125 gdb_breakpoint_remove_all();
2126 gdb_syscall_mode
= GDB_SYS_DISABLED
;
2128 put_packet(s
, "OK");
2132 addr
= strtoull(p
, (char **)&p
, 16);
2133 gdb_set_cpu_pc(s
, addr
);
2135 cpu_single_step(s
->c_cpu
, sstep_flags
);
2143 ret
= strtoull(p
, (char **)&p
, 16);
2146 err
= strtoull(p
, (char **)&p
, 16);
2153 if (s
->current_syscall_cb
) {
2154 s
->current_syscall_cb(s
->c_cpu
, ret
, err
);
2155 s
->current_syscall_cb
= NULL
;
2158 put_packet(s
, "T02");
2165 cpu_synchronize_state(s
->g_cpu
);
2168 for (addr
= 0; addr
< num_g_regs
; addr
++) {
2169 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
+ len
, addr
);
2172 memtohex(buf
, mem_buf
, len
);
2176 cpu_synchronize_state(s
->g_cpu
);
2178 registers
= mem_buf
;
2179 len
= strlen(p
) / 2;
2180 hextomem((uint8_t *)registers
, p
, len
);
2181 for (addr
= 0; addr
< num_g_regs
&& len
> 0; addr
++) {
2182 reg_size
= gdb_write_register(s
->g_cpu
, registers
, addr
);
2184 registers
+= reg_size
;
2186 put_packet(s
, "OK");
2189 addr
= strtoull(p
, (char **)&p
, 16);
2192 len
= strtoull(p
, NULL
, 16);
2193 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
, 0) != 0) {
2194 put_packet (s
, "E14");
2196 memtohex(buf
, mem_buf
, len
);
2201 addr
= strtoull(p
, (char **)&p
, 16);
2204 len
= strtoull(p
, (char **)&p
, 16);
2207 hextomem(mem_buf
, p
, len
);
2208 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
, 1) != 0) {
2209 put_packet(s
, "E14");
2211 put_packet(s
, "OK");
2215 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
2216 This works, but can be very slow. Anything new enough to
2217 understand XML also knows how to use this properly. */
2219 goto unknown_command
;
2220 addr
= strtoull(p
, (char **)&p
, 16);
2221 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
, addr
);
2223 memtohex(buf
, mem_buf
, reg_size
);
2226 put_packet(s
, "E14");
2231 goto unknown_command
;
2232 addr
= strtoull(p
, (char **)&p
, 16);
2235 reg_size
= strlen(p
) / 2;
2236 hextomem(mem_buf
, p
, reg_size
);
2237 gdb_write_register(s
->g_cpu
, mem_buf
, addr
);
2238 put_packet(s
, "OK");
2242 type
= strtoul(p
, (char **)&p
, 16);
2245 addr
= strtoull(p
, (char **)&p
, 16);
2248 len
= strtoull(p
, (char **)&p
, 16);
2250 res
= gdb_breakpoint_insert(addr
, len
, type
);
2252 res
= gdb_breakpoint_remove(addr
, len
, type
);
2254 put_packet(s
, "OK");
2255 else if (res
== -ENOSYS
)
2258 put_packet(s
, "E22");
2262 thread
= strtoull(p
, (char **)&p
, 16);
2263 if (thread
== -1 || thread
== 0) {
2264 put_packet(s
, "OK");
2267 env
= find_cpu(thread
);
2269 put_packet(s
, "E22");
2275 put_packet(s
, "OK");
2279 put_packet(s
, "OK");
2282 put_packet(s
, "E22");
2287 thread
= strtoull(p
, (char **)&p
, 16);
2288 env
= find_cpu(thread
);
2291 put_packet(s
, "OK");
2293 put_packet(s
, "E22");
2298 /* parse any 'q' packets here */
2299 if (!strcmp(p
,"qemu.sstepbits")) {
2300 /* Query Breakpoint bit definitions */
2301 snprintf(buf
, sizeof(buf
), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
2307 } else if (strncmp(p
,"qemu.sstep",10) == 0) {
2308 /* Display or change the sstep_flags */
2311 /* Display current setting */
2312 snprintf(buf
, sizeof(buf
), "0x%x", sstep_flags
);
2317 type
= strtoul(p
, (char **)&p
, 16);
2319 put_packet(s
, "OK");
2321 } else if (strcmp(p
,"C") == 0) {
2322 /* "Current thread" remains vague in the spec, so always return
2323 * the first CPU (gdb returns the first thread). */
2324 put_packet(s
, "QC1");
2326 } else if (strcmp(p
,"fThreadInfo") == 0) {
2327 s
->query_cpu
= first_cpu
;
2328 goto report_cpuinfo
;
2329 } else if (strcmp(p
,"sThreadInfo") == 0) {
2332 snprintf(buf
, sizeof(buf
), "m%x", cpu_index(s
->query_cpu
));
2334 s
->query_cpu
= s
->query_cpu
->next_cpu
;
2338 } else if (strncmp(p
,"ThreadExtraInfo,", 16) == 0) {
2339 thread
= strtoull(p
+16, (char **)&p
, 16);
2340 env
= find_cpu(thread
);
2342 cpu_synchronize_state(env
);
2343 len
= snprintf((char *)mem_buf
, sizeof(mem_buf
),
2344 "CPU#%d [%s]", env
->cpu_index
,
2345 env
->halted
? "halted " : "running");
2346 memtohex(buf
, mem_buf
, len
);
2351 #ifdef CONFIG_USER_ONLY
2352 else if (strncmp(p
, "Offsets", 7) == 0) {
2353 TaskState
*ts
= s
->c_cpu
->opaque
;
2355 snprintf(buf
, sizeof(buf
),
2356 "Text=" TARGET_ABI_FMT_lx
";Data=" TARGET_ABI_FMT_lx
2357 ";Bss=" TARGET_ABI_FMT_lx
,
2358 ts
->info
->code_offset
,
2359 ts
->info
->data_offset
,
2360 ts
->info
->data_offset
);
2364 #else /* !CONFIG_USER_ONLY */
2365 else if (strncmp(p
, "Rcmd,", 5) == 0) {
2366 int len
= strlen(p
+ 5);
2368 if ((len
% 2) != 0) {
2369 put_packet(s
, "E01");
2372 hextomem(mem_buf
, p
+ 5, len
);
2375 qemu_chr_be_write(s
->mon_chr
, mem_buf
, len
);
2376 put_packet(s
, "OK");
2379 #endif /* !CONFIG_USER_ONLY */
2380 if (strncmp(p
, "Supported", 9) == 0) {
2381 snprintf(buf
, sizeof(buf
), "PacketSize=%x", MAX_PACKET_LENGTH
);
2383 pstrcat(buf
, sizeof(buf
), ";qXfer:features:read+");
2389 if (strncmp(p
, "Xfer:features:read:", 19) == 0) {
2391 target_ulong total_len
;
2395 xml
= get_feature_xml(p
, &p
);
2397 snprintf(buf
, sizeof(buf
), "E00");
2404 addr
= strtoul(p
, (char **)&p
, 16);
2407 len
= strtoul(p
, (char **)&p
, 16);
2409 total_len
= strlen(xml
);
2410 if (addr
> total_len
) {
2411 snprintf(buf
, sizeof(buf
), "E00");
2415 if (len
> (MAX_PACKET_LENGTH
- 5) / 2)
2416 len
= (MAX_PACKET_LENGTH
- 5) / 2;
2417 if (len
< total_len
- addr
) {
2419 len
= memtox(buf
+ 1, xml
+ addr
, len
);
2422 len
= memtox(buf
+ 1, xml
+ addr
, total_len
- addr
);
2424 put_packet_binary(s
, buf
, len
+ 1);
2428 /* Unrecognised 'q' command. */
2429 goto unknown_command
;
2433 /* put empty packet */
2441 void gdb_set_stop_cpu(CPUArchState
*env
)
2443 gdbserver_state
->c_cpu
= env
;
2444 gdbserver_state
->g_cpu
= env
;
2447 #ifndef CONFIG_USER_ONLY
2448 static void gdb_vm_state_change(void *opaque
, int running
, RunState state
)
2450 GDBState
*s
= gdbserver_state
;
2451 CPUArchState
*env
= s
->c_cpu
;
2456 if (running
|| s
->state
== RS_INACTIVE
) {
2459 /* Is there a GDB syscall waiting to be sent? */
2460 if (s
->current_syscall_cb
) {
2461 put_packet(s
, s
->syscall_buf
);
2465 case RUN_STATE_DEBUG
:
2466 if (env
->watchpoint_hit
) {
2467 switch (env
->watchpoint_hit
->flags
& BP_MEM_ACCESS
) {
2478 snprintf(buf
, sizeof(buf
),
2479 "T%02xthread:%02x;%swatch:" TARGET_FMT_lx
";",
2480 GDB_SIGNAL_TRAP
, cpu_index(env
), type
,
2481 env
->watchpoint_hit
->vaddr
);
2482 env
->watchpoint_hit
= NULL
;
2486 ret
= GDB_SIGNAL_TRAP
;
2488 case RUN_STATE_PAUSED
:
2489 ret
= GDB_SIGNAL_INT
;
2491 case RUN_STATE_SHUTDOWN
:
2492 ret
= GDB_SIGNAL_QUIT
;
2494 case RUN_STATE_IO_ERROR
:
2495 ret
= GDB_SIGNAL_IO
;
2497 case RUN_STATE_WATCHDOG
:
2498 ret
= GDB_SIGNAL_ALRM
;
2500 case RUN_STATE_INTERNAL_ERROR
:
2501 ret
= GDB_SIGNAL_ABRT
;
2503 case RUN_STATE_SAVE_VM
:
2504 case RUN_STATE_RESTORE_VM
:
2506 case RUN_STATE_FINISH_MIGRATE
:
2507 ret
= GDB_SIGNAL_XCPU
;
2510 ret
= GDB_SIGNAL_UNKNOWN
;
2513 snprintf(buf
, sizeof(buf
), "T%02xthread:%02x;", ret
, cpu_index(env
));
2518 /* disable single step if it was enabled */
2519 cpu_single_step(env
, 0);
2523 /* Send a gdb syscall request.
2524 This accepts limited printf-style format specifiers, specifically:
2525 %x - target_ulong argument printed in hex.
2526 %lx - 64-bit argument printed in hex.
2527 %s - string pointer (target_ulong) and length (int) pair. */
2528 void gdb_do_syscall(gdb_syscall_complete_cb cb
, const char *fmt
, ...)
2537 s
= gdbserver_state
;
2540 s
->current_syscall_cb
= cb
;
2541 #ifndef CONFIG_USER_ONLY
2542 vm_stop(RUN_STATE_DEBUG
);
2546 p_end
= &s
->syscall_buf
[sizeof(s
->syscall_buf
)];
2553 addr
= va_arg(va
, target_ulong
);
2554 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
, addr
);
2557 if (*(fmt
++) != 'x')
2559 i64
= va_arg(va
, uint64_t);
2560 p
+= snprintf(p
, p_end
- p
, "%" PRIx64
, i64
);
2563 addr
= va_arg(va
, target_ulong
);
2564 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
"/%x",
2565 addr
, va_arg(va
, int));
2569 fprintf(stderr
, "gdbstub: Bad syscall format string '%s'\n",
2579 #ifdef CONFIG_USER_ONLY
2580 put_packet(s
, s
->syscall_buf
);
2581 gdb_handlesig(s
->c_cpu
, 0);
2583 /* In this case wait to send the syscall packet until notification that
2584 the CPU has stopped. This must be done because if the packet is sent
2585 now the reply from the syscall request could be received while the CPU
2586 is still in the running state, which can cause packets to be dropped
2587 and state transition 'T' packets to be sent while the syscall is still
2593 static void gdb_read_byte(GDBState
*s
, int ch
)
2598 #ifndef CONFIG_USER_ONLY
2599 if (s
->last_packet_len
) {
2600 /* Waiting for a response to the last packet. If we see the start
2601 of a new command then abandon the previous response. */
2604 printf("Got NACK, retransmitting\n");
2606 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
2610 printf("Got ACK\n");
2612 printf("Got '%c' when expecting ACK/NACK\n", ch
);
2614 if (ch
== '+' || ch
== '$')
2615 s
->last_packet_len
= 0;
2619 if (runstate_is_running()) {
2620 /* when the CPU is running, we cannot do anything except stop
2621 it when receiving a char */
2622 vm_stop(RUN_STATE_PAUSED
);
2629 s
->line_buf_index
= 0;
2630 s
->state
= RS_GETLINE
;
2635 s
->state
= RS_CHKSUM1
;
2636 } else if (s
->line_buf_index
>= sizeof(s
->line_buf
) - 1) {
2639 s
->line_buf
[s
->line_buf_index
++] = ch
;
2643 s
->line_buf
[s
->line_buf_index
] = '\0';
2644 s
->line_csum
= fromhex(ch
) << 4;
2645 s
->state
= RS_CHKSUM2
;
2648 s
->line_csum
|= fromhex(ch
);
2650 for(i
= 0; i
< s
->line_buf_index
; i
++) {
2651 csum
+= s
->line_buf
[i
];
2653 if (s
->line_csum
!= (csum
& 0xff)) {
2655 put_buffer(s
, &reply
, 1);
2659 put_buffer(s
, &reply
, 1);
2660 s
->state
= gdb_handle_packet(s
, s
->line_buf
);
2669 /* Tell the remote gdb that the process has exited. */
2670 void gdb_exit(CPUArchState
*env
, int code
)
2675 s
= gdbserver_state
;
2679 #ifdef CONFIG_USER_ONLY
2680 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2685 snprintf(buf
, sizeof(buf
), "W%02x", (uint8_t)code
);
2688 #ifndef CONFIG_USER_ONLY
2690 qemu_chr_delete(s
->chr
);
2695 #ifdef CONFIG_USER_ONLY
2701 s
= gdbserver_state
;
2703 if (gdbserver_fd
< 0 || s
->fd
< 0)
2710 gdb_handlesig (CPUArchState
*env
, int sig
)
2716 s
= gdbserver_state
;
2717 if (gdbserver_fd
< 0 || s
->fd
< 0)
2720 /* disable single step if it was enabled */
2721 cpu_single_step(env
, 0);
2726 snprintf(buf
, sizeof(buf
), "S%02x", target_signal_to_gdb (sig
));
2729 /* put_packet() might have detected that the peer terminated the
2736 s
->running_state
= 0;
2737 while (s
->running_state
== 0) {
2738 n
= read (s
->fd
, buf
, 256);
2743 for (i
= 0; i
< n
; i
++)
2744 gdb_read_byte (s
, buf
[i
]);
2746 else if (n
== 0 || errno
!= EAGAIN
)
2748 /* XXX: Connection closed. Should probably wait for another
2749 connection before continuing. */
2758 /* Tell the remote gdb that the process has exited due to SIG. */
2759 void gdb_signalled(CPUArchState
*env
, int sig
)
2764 s
= gdbserver_state
;
2765 if (gdbserver_fd
< 0 || s
->fd
< 0)
2768 snprintf(buf
, sizeof(buf
), "X%02x", target_signal_to_gdb (sig
));
2772 static void gdb_accept(void)
2775 struct sockaddr_in sockaddr
;
2780 len
= sizeof(sockaddr
);
2781 fd
= accept(gdbserver_fd
, (struct sockaddr
*)&sockaddr
, &len
);
2782 if (fd
< 0 && errno
!= EINTR
) {
2785 } else if (fd
>= 0) {
2787 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
2793 /* set short latency */
2795 setsockopt(fd
, IPPROTO_TCP
, TCP_NODELAY
, (char *)&val
, sizeof(val
));
2797 s
= g_malloc0(sizeof(GDBState
));
2798 s
->c_cpu
= first_cpu
;
2799 s
->g_cpu
= first_cpu
;
2803 gdbserver_state
= s
;
2805 fcntl(fd
, F_SETFL
, O_NONBLOCK
);
2808 static int gdbserver_open(int port
)
2810 struct sockaddr_in sockaddr
;
2813 fd
= socket(PF_INET
, SOCK_STREAM
, 0);
2819 fcntl(fd
, F_SETFD
, FD_CLOEXEC
);
2822 /* allow fast reuse */
2824 setsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, (char *)&val
, sizeof(val
));
2826 sockaddr
.sin_family
= AF_INET
;
2827 sockaddr
.sin_port
= htons(port
);
2828 sockaddr
.sin_addr
.s_addr
= 0;
2829 ret
= bind(fd
, (struct sockaddr
*)&sockaddr
, sizeof(sockaddr
));
2835 ret
= listen(fd
, 0);
2844 int gdbserver_start(int port
)
2846 gdbserver_fd
= gdbserver_open(port
);
2847 if (gdbserver_fd
< 0)
2849 /* accept connections */
2854 /* Disable gdb stub for child processes. */
2855 void gdbserver_fork(CPUArchState
*env
)
2857 GDBState
*s
= gdbserver_state
;
2858 if (gdbserver_fd
< 0 || s
->fd
< 0)
2862 cpu_breakpoint_remove_all(env
, BP_GDB
);
2863 cpu_watchpoint_remove_all(env
, BP_GDB
);
2866 static int gdb_chr_can_receive(void *opaque
)
2868 /* We can handle an arbitrarily large amount of data.
2869 Pick the maximum packet size, which is as good as anything. */
2870 return MAX_PACKET_LENGTH
;
2873 static void gdb_chr_receive(void *opaque
, const uint8_t *buf
, int size
)
2877 for (i
= 0; i
< size
; i
++) {
2878 gdb_read_byte(gdbserver_state
, buf
[i
]);
2882 static void gdb_chr_event(void *opaque
, int event
)
2885 case CHR_EVENT_OPENED
:
2886 vm_stop(RUN_STATE_PAUSED
);
2894 static void gdb_monitor_output(GDBState
*s
, const char *msg
, int len
)
2896 char buf
[MAX_PACKET_LENGTH
];
2899 if (len
> (MAX_PACKET_LENGTH
/2) - 1)
2900 len
= (MAX_PACKET_LENGTH
/2) - 1;
2901 memtohex(buf
+ 1, (uint8_t *)msg
, len
);
2905 static int gdb_monitor_write(CharDriverState
*chr
, const uint8_t *buf
, int len
)
2907 const char *p
= (const char *)buf
;
2910 max_sz
= (sizeof(gdbserver_state
->last_packet
) - 2) / 2;
2912 if (len
<= max_sz
) {
2913 gdb_monitor_output(gdbserver_state
, p
, len
);
2916 gdb_monitor_output(gdbserver_state
, p
, max_sz
);
2924 static void gdb_sigterm_handler(int signal
)
2926 if (runstate_is_running()) {
2927 vm_stop(RUN_STATE_PAUSED
);
2932 int gdbserver_start(const char *device
)
2935 char gdbstub_device_name
[128];
2936 CharDriverState
*chr
= NULL
;
2937 CharDriverState
*mon_chr
;
2941 if (strcmp(device
, "none") != 0) {
2942 if (strstart(device
, "tcp:", NULL
)) {
2943 /* enforce required TCP attributes */
2944 snprintf(gdbstub_device_name
, sizeof(gdbstub_device_name
),
2945 "%s,nowait,nodelay,server", device
);
2946 device
= gdbstub_device_name
;
2949 else if (strcmp(device
, "stdio") == 0) {
2950 struct sigaction act
;
2952 memset(&act
, 0, sizeof(act
));
2953 act
.sa_handler
= gdb_sigterm_handler
;
2954 sigaction(SIGINT
, &act
, NULL
);
2957 chr
= qemu_chr_new("gdb", device
, NULL
);
2961 qemu_chr_add_handlers(chr
, gdb_chr_can_receive
, gdb_chr_receive
,
2962 gdb_chr_event
, NULL
);
2965 s
= gdbserver_state
;
2967 s
= g_malloc0(sizeof(GDBState
));
2968 gdbserver_state
= s
;
2970 qemu_add_vm_change_state_handler(gdb_vm_state_change
, NULL
);
2972 /* Initialize a monitor terminal for gdb */
2973 mon_chr
= g_malloc0(sizeof(*mon_chr
));
2974 mon_chr
->chr_write
= gdb_monitor_write
;
2975 monitor_init(mon_chr
, 0);
2978 qemu_chr_delete(s
->chr
);
2979 mon_chr
= s
->mon_chr
;
2980 memset(s
, 0, sizeof(GDBState
));
2982 s
->c_cpu
= first_cpu
;
2983 s
->g_cpu
= first_cpu
;
2985 s
->state
= chr
? RS_IDLE
: RS_INACTIVE
;
2986 s
->mon_chr
= mon_chr
;
2987 s
->current_syscall_cb
= NULL
;