4 * Copyright (c) 2003-2005 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qapi/error.h"
21 #include "qemu/error-report.h"
22 #include "qemu/cutils.h"
23 #include "trace-root.h"
24 #ifdef CONFIG_USER_ONLY
27 #include "monitor/monitor.h"
28 #include "chardev/char.h"
29 #include "chardev/char-fe.h"
30 #include "sysemu/sysemu.h"
31 #include "exec/gdbstub.h"
32 #include "hw/cpu/cluster.h"
35 #define MAX_PACKET_LENGTH 4096
37 #include "qemu/sockets.h"
38 #include "sysemu/hw_accel.h"
39 #include "sysemu/kvm.h"
40 #include "exec/semihost.h"
41 #include "exec/exec-all.h"
43 #ifdef CONFIG_USER_ONLY
44 #define GDB_ATTACHED "0"
46 #define GDB_ATTACHED "1"
49 static inline int target_memory_rw_debug(CPUState
*cpu
, target_ulong addr
,
50 uint8_t *buf
, int len
, bool is_write
)
52 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
54 if (cc
->memory_rw_debug
) {
55 return cc
->memory_rw_debug(cpu
, addr
, buf
, len
, is_write
);
57 return cpu_memory_rw_debug(cpu
, addr
, buf
, len
, is_write
);
60 /* Return the GDB index for a given vCPU state.
62 * For user mode this is simply the thread id. In system mode GDB
63 * numbers CPUs from 1 as 0 is reserved as an "any cpu" index.
65 static inline int cpu_gdb_index(CPUState
*cpu
)
67 #if defined(CONFIG_USER_ONLY)
68 TaskState
*ts
= (TaskState
*) cpu
->opaque
;
71 return cpu
->cpu_index
+ 1;
84 GDB_SIGNAL_UNKNOWN
= 143
87 #ifdef CONFIG_USER_ONLY
89 /* Map target signal numbers to GDB protocol signal numbers and vice
90 * versa. For user emulation's currently supported systems, we can
91 * assume most signals are defined.
94 static int gdb_signal_table
[] = {
254 /* In system mode we only need SIGINT and SIGTRAP; other signals
255 are not yet supported. */
262 static int gdb_signal_table
[] = {
272 #ifdef CONFIG_USER_ONLY
273 static int target_signal_to_gdb (int sig
)
276 for (i
= 0; i
< ARRAY_SIZE (gdb_signal_table
); i
++)
277 if (gdb_signal_table
[i
] == sig
)
279 return GDB_SIGNAL_UNKNOWN
;
283 static int gdb_signal_to_target (int sig
)
285 if (sig
< ARRAY_SIZE (gdb_signal_table
))
286 return gdb_signal_table
[sig
];
291 typedef struct GDBRegisterState
{
297 struct GDBRegisterState
*next
;
300 typedef struct GDBProcess
{
304 char target_xml
[1024];
316 typedef struct GDBState
{
317 CPUState
*c_cpu
; /* current CPU for step/continue ops */
318 CPUState
*g_cpu
; /* current CPU for other ops */
319 CPUState
*query_cpu
; /* for q{f|s}ThreadInfo */
320 enum RSState state
; /* parsing state */
321 char line_buf
[MAX_PACKET_LENGTH
];
323 int line_sum
; /* running checksum */
324 int line_csum
; /* checksum at the end of the packet */
325 uint8_t last_packet
[MAX_PACKET_LENGTH
+ 4];
328 #ifdef CONFIG_USER_ONLY
336 GDBProcess
*processes
;
338 char syscall_buf
[256];
339 gdb_syscall_complete_cb current_syscall_cb
;
342 /* By default use no IRQs and no timers while single stepping so as to
343 * make single stepping like an ICE HW step.
345 static int sstep_flags
= SSTEP_ENABLE
|SSTEP_NOIRQ
|SSTEP_NOTIMER
;
347 static GDBState
*gdbserver_state
;
351 #ifdef CONFIG_USER_ONLY
352 /* XXX: This is not thread safe. Do we care? */
353 static int gdbserver_fd
= -1;
355 static int get_char(GDBState
*s
)
361 ret
= qemu_recv(s
->fd
, &ch
, 1, 0);
363 if (errno
== ECONNRESET
)
367 } else if (ret
== 0) {
385 /* Decide if either remote gdb syscalls or native file IO should be used. */
386 int use_gdb_syscalls(void)
388 SemihostingTarget target
= semihosting_get_target();
389 if (target
== SEMIHOSTING_TARGET_NATIVE
) {
390 /* -semihosting-config target=native */
392 } else if (target
== SEMIHOSTING_TARGET_GDB
) {
393 /* -semihosting-config target=gdb */
397 /* -semihosting-config target=auto */
398 /* On the first call check if gdb is connected and remember. */
399 if (gdb_syscall_mode
== GDB_SYS_UNKNOWN
) {
400 gdb_syscall_mode
= (gdbserver_state
? GDB_SYS_ENABLED
403 return gdb_syscall_mode
== GDB_SYS_ENABLED
;
406 /* Resume execution. */
407 static inline void gdb_continue(GDBState
*s
)
410 #ifdef CONFIG_USER_ONLY
411 s
->running_state
= 1;
412 trace_gdbstub_op_continue();
414 if (!runstate_needs_reset()) {
415 trace_gdbstub_op_continue();
422 * Resume execution, per CPU actions. For user-mode emulation it's
423 * equivalent to gdb_continue.
425 static int gdb_continue_partial(GDBState
*s
, char *newstates
)
429 #ifdef CONFIG_USER_ONLY
431 * This is not exactly accurate, but it's an improvement compared to the
432 * previous situation, where only one CPU would be single-stepped.
435 if (newstates
[cpu
->cpu_index
] == 's') {
436 trace_gdbstub_op_stepping(cpu
->cpu_index
);
437 cpu_single_step(cpu
, sstep_flags
);
440 s
->running_state
= 1;
444 if (!runstate_needs_reset()) {
445 if (vm_prepare_start()) {
450 switch (newstates
[cpu
->cpu_index
]) {
453 break; /* nothing to do here */
455 trace_gdbstub_op_stepping(cpu
->cpu_index
);
456 cpu_single_step(cpu
, sstep_flags
);
461 trace_gdbstub_op_continue_cpu(cpu
->cpu_index
);
472 qemu_clock_enable(QEMU_CLOCK_VIRTUAL
, true);
478 static void put_buffer(GDBState
*s
, const uint8_t *buf
, int len
)
480 #ifdef CONFIG_USER_ONLY
484 ret
= send(s
->fd
, buf
, len
, 0);
494 /* XXX this blocks entire thread. Rewrite to use
495 * qemu_chr_fe_write and background I/O callbacks */
496 qemu_chr_fe_write_all(&s
->chr
, buf
, len
);
500 static inline int fromhex(int v
)
502 if (v
>= '0' && v
<= '9')
504 else if (v
>= 'A' && v
<= 'F')
506 else if (v
>= 'a' && v
<= 'f')
512 static inline int tohex(int v
)
520 /* writes 2*len+1 bytes in buf */
521 static void memtohex(char *buf
, const uint8_t *mem
, int len
)
526 for(i
= 0; i
< len
; i
++) {
528 *q
++ = tohex(c
>> 4);
529 *q
++ = tohex(c
& 0xf);
534 static void hextomem(uint8_t *mem
, const char *buf
, int len
)
538 for(i
= 0; i
< len
; i
++) {
539 mem
[i
] = (fromhex(buf
[0]) << 4) | fromhex(buf
[1]);
544 static void hexdump(const char *buf
, int len
,
545 void (*trace_fn
)(size_t ofs
, char const *text
))
547 char line_buffer
[3 * 16 + 4 + 16 + 1];
550 for (i
= 0; i
< len
|| (i
& 0xF); ++i
) {
551 size_t byte_ofs
= i
& 15;
554 memset(line_buffer
, ' ', 3 * 16 + 4 + 16);
555 line_buffer
[3 * 16 + 4 + 16] = 0;
558 size_t col_group
= (i
>> 2) & 3;
559 size_t hex_col
= byte_ofs
* 3 + col_group
;
560 size_t txt_col
= 3 * 16 + 4 + byte_ofs
;
565 line_buffer
[hex_col
+ 0] = tohex((value
>> 4) & 0xF);
566 line_buffer
[hex_col
+ 1] = tohex((value
>> 0) & 0xF);
567 line_buffer
[txt_col
+ 0] = (value
>= ' ' && value
< 127)
573 trace_fn(i
& -16, line_buffer
);
577 /* return -1 if error, 0 if OK */
578 static int put_packet_binary(GDBState
*s
, const char *buf
, int len
, bool dump
)
583 if (dump
&& trace_event_get_state_backends(TRACE_GDBSTUB_IO_BINARYREPLY
)) {
584 hexdump(buf
, len
, trace_gdbstub_io_binaryreply
);
593 for(i
= 0; i
< len
; i
++) {
597 *(p
++) = tohex((csum
>> 4) & 0xf);
598 *(p
++) = tohex((csum
) & 0xf);
600 s
->last_packet_len
= p
- s
->last_packet
;
601 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
603 #ifdef CONFIG_USER_ONLY
616 /* return -1 if error, 0 if OK */
617 static int put_packet(GDBState
*s
, const char *buf
)
619 trace_gdbstub_io_reply(buf
);
621 return put_packet_binary(s
, buf
, strlen(buf
), false);
624 /* Encode data using the encoding for 'x' packets. */
625 static int memtox(char *buf
, const char *mem
, int len
)
633 case '#': case '$': case '*': case '}':
645 static uint32_t gdb_get_cpu_pid(const GDBState
*s
, CPUState
*cpu
)
647 #ifndef CONFIG_USER_ONLY
648 gchar
*path
, *name
= NULL
;
650 CPUClusterState
*cluster
;
653 path
= object_get_canonical_path(OBJECT(cpu
));
656 /* Return the default process' PID */
657 ret
= s
->processes
[s
->process_num
- 1].pid
;
661 name
= object_get_canonical_path_component(OBJECT(cpu
));
662 assert(name
!= NULL
);
665 * Retrieve the CPU parent path by removing the last '/' and the CPU name
666 * from the CPU canonical path.
668 path
[strlen(path
) - strlen(name
) - 1] = '\0';
670 obj
= object_resolve_path_type(path
, TYPE_CPU_CLUSTER
, NULL
);
673 /* Return the default process' PID */
674 ret
= s
->processes
[s
->process_num
- 1].pid
;
678 cluster
= CPU_CLUSTER(obj
);
679 ret
= cluster
->cluster_id
+ 1;
688 /* TODO: In user mode, we should use the task state PID */
689 return s
->processes
[s
->process_num
- 1].pid
;
693 static GDBProcess
*gdb_get_process(const GDBState
*s
, uint32_t pid
)
698 /* 0 means any process, we take the first one */
699 return &s
->processes
[0];
702 for (i
= 0; i
< s
->process_num
; i
++) {
703 if (s
->processes
[i
].pid
== pid
) {
704 return &s
->processes
[i
];
711 static GDBProcess
*gdb_get_cpu_process(const GDBState
*s
, CPUState
*cpu
)
713 return gdb_get_process(s
, gdb_get_cpu_pid(s
, cpu
));
716 static CPUState
*find_cpu(uint32_t thread_id
)
721 if (cpu_gdb_index(cpu
) == thread_id
) {
729 static CPUState
*get_first_cpu_in_process(const GDBState
*s
,
735 if (gdb_get_cpu_pid(s
, cpu
) == process
->pid
) {
743 static CPUState
*gdb_next_cpu_in_process(const GDBState
*s
, CPUState
*cpu
)
745 uint32_t pid
= gdb_get_cpu_pid(s
, cpu
);
749 if (gdb_get_cpu_pid(s
, cpu
) == pid
) {
759 static CPUState
*gdb_get_cpu(const GDBState
*s
, uint32_t pid
, uint32_t tid
)
765 /* 0 means any thread, we take the first one */
775 process
= gdb_get_cpu_process(s
, cpu
);
777 if (process
->pid
!= pid
) {
781 if (!process
->attached
) {
788 /* Return the cpu following @cpu, while ignoring unattached processes. */
789 static CPUState
*gdb_next_attached_cpu(const GDBState
*s
, CPUState
*cpu
)
794 if (gdb_get_cpu_process(s
, cpu
)->attached
) {
804 /* Return the first attached cpu */
805 static CPUState
*gdb_first_attached_cpu(const GDBState
*s
)
807 CPUState
*cpu
= first_cpu
;
808 GDBProcess
*process
= gdb_get_cpu_process(s
, cpu
);
810 if (!process
->attached
) {
811 return gdb_next_attached_cpu(s
, cpu
);
817 static const char *get_feature_xml(const GDBState
*s
, const char *p
,
818 const char **newp
, GDBProcess
*process
)
823 CPUState
*cpu
= get_first_cpu_in_process(s
, process
);
824 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
827 while (p
[len
] && p
[len
] != ':')
832 if (strncmp(p
, "target.xml", len
) == 0) {
833 char *buf
= process
->target_xml
;
834 const size_t buf_sz
= sizeof(process
->target_xml
);
836 /* Generate the XML description for this CPU. */
841 "<?xml version=\"1.0\"?>"
842 "<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
844 if (cc
->gdb_arch_name
) {
845 gchar
*arch
= cc
->gdb_arch_name(cpu
);
846 pstrcat(buf
, buf_sz
, "<architecture>");
847 pstrcat(buf
, buf_sz
, arch
);
848 pstrcat(buf
, buf_sz
, "</architecture>");
851 pstrcat(buf
, buf_sz
, "<xi:include href=\"");
852 pstrcat(buf
, buf_sz
, cc
->gdb_core_xml_file
);
853 pstrcat(buf
, buf_sz
, "\"/>");
854 for (r
= cpu
->gdb_regs
; r
; r
= r
->next
) {
855 pstrcat(buf
, buf_sz
, "<xi:include href=\"");
856 pstrcat(buf
, buf_sz
, r
->xml
);
857 pstrcat(buf
, buf_sz
, "\"/>");
859 pstrcat(buf
, buf_sz
, "</target>");
863 if (cc
->gdb_get_dynamic_xml
) {
864 char *xmlname
= g_strndup(p
, len
);
865 const char *xml
= cc
->gdb_get_dynamic_xml(cpu
, xmlname
);
873 name
= xml_builtin
[i
][0];
874 if (!name
|| (strncmp(name
, p
, len
) == 0 && strlen(name
) == len
))
877 return name
? xml_builtin
[i
][1] : NULL
;
880 static int gdb_read_register(CPUState
*cpu
, uint8_t *mem_buf
, int reg
)
882 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
883 CPUArchState
*env
= cpu
->env_ptr
;
886 if (reg
< cc
->gdb_num_core_regs
) {
887 return cc
->gdb_read_register(cpu
, mem_buf
, reg
);
890 for (r
= cpu
->gdb_regs
; r
; r
= r
->next
) {
891 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
892 return r
->get_reg(env
, mem_buf
, reg
- r
->base_reg
);
898 static int gdb_write_register(CPUState
*cpu
, uint8_t *mem_buf
, int reg
)
900 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
901 CPUArchState
*env
= cpu
->env_ptr
;
904 if (reg
< cc
->gdb_num_core_regs
) {
905 return cc
->gdb_write_register(cpu
, mem_buf
, reg
);
908 for (r
= cpu
->gdb_regs
; r
; r
= r
->next
) {
909 if (r
->base_reg
<= reg
&& reg
< r
->base_reg
+ r
->num_regs
) {
910 return r
->set_reg(env
, mem_buf
, reg
- r
->base_reg
);
916 /* Register a supplemental set of CPU registers. If g_pos is nonzero it
917 specifies the first register number and these registers are included in
918 a standard "g" packet. Direction is relative to gdb, i.e. get_reg is
919 gdb reading a CPU register, and set_reg is gdb modifying a CPU register.
922 void gdb_register_coprocessor(CPUState
*cpu
,
923 gdb_reg_cb get_reg
, gdb_reg_cb set_reg
,
924 int num_regs
, const char *xml
, int g_pos
)
927 GDBRegisterState
**p
;
931 /* Check for duplicates. */
932 if (strcmp((*p
)->xml
, xml
) == 0)
937 s
= g_new0(GDBRegisterState
, 1);
938 s
->base_reg
= cpu
->gdb_num_regs
;
939 s
->num_regs
= num_regs
;
940 s
->get_reg
= get_reg
;
941 s
->set_reg
= set_reg
;
944 /* Add to end of list. */
945 cpu
->gdb_num_regs
+= num_regs
;
948 if (g_pos
!= s
->base_reg
) {
949 error_report("Error: Bad gdb register numbering for '%s', "
950 "expected %d got %d", xml
, g_pos
, s
->base_reg
);
952 cpu
->gdb_num_g_regs
= cpu
->gdb_num_regs
;
957 #ifndef CONFIG_USER_ONLY
958 /* Translate GDB watchpoint type to a flags value for cpu_watchpoint_* */
959 static inline int xlat_gdb_type(CPUState
*cpu
, int gdbtype
)
961 static const int xlat
[] = {
962 [GDB_WATCHPOINT_WRITE
] = BP_GDB
| BP_MEM_WRITE
,
963 [GDB_WATCHPOINT_READ
] = BP_GDB
| BP_MEM_READ
,
964 [GDB_WATCHPOINT_ACCESS
] = BP_GDB
| BP_MEM_ACCESS
,
967 CPUClass
*cc
= CPU_GET_CLASS(cpu
);
968 int cputype
= xlat
[gdbtype
];
970 if (cc
->gdb_stop_before_watchpoint
) {
971 cputype
|= BP_STOP_BEFORE_ACCESS
;
977 static int gdb_breakpoint_insert(target_ulong addr
, target_ulong len
, int type
)
983 return kvm_insert_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
987 case GDB_BREAKPOINT_SW
:
988 case GDB_BREAKPOINT_HW
:
990 err
= cpu_breakpoint_insert(cpu
, addr
, BP_GDB
, NULL
);
996 #ifndef CONFIG_USER_ONLY
997 case GDB_WATCHPOINT_WRITE
:
998 case GDB_WATCHPOINT_READ
:
999 case GDB_WATCHPOINT_ACCESS
:
1001 err
= cpu_watchpoint_insert(cpu
, addr
, len
,
1002 xlat_gdb_type(cpu
, type
), NULL
);
1014 static int gdb_breakpoint_remove(target_ulong addr
, target_ulong len
, int type
)
1019 if (kvm_enabled()) {
1020 return kvm_remove_breakpoint(gdbserver_state
->c_cpu
, addr
, len
, type
);
1024 case GDB_BREAKPOINT_SW
:
1025 case GDB_BREAKPOINT_HW
:
1027 err
= cpu_breakpoint_remove(cpu
, addr
, BP_GDB
);
1033 #ifndef CONFIG_USER_ONLY
1034 case GDB_WATCHPOINT_WRITE
:
1035 case GDB_WATCHPOINT_READ
:
1036 case GDB_WATCHPOINT_ACCESS
:
1038 err
= cpu_watchpoint_remove(cpu
, addr
, len
,
1039 xlat_gdb_type(cpu
, type
));
1050 static void gdb_breakpoint_remove_all(void)
1054 if (kvm_enabled()) {
1055 kvm_remove_all_breakpoints(gdbserver_state
->c_cpu
);
1060 cpu_breakpoint_remove_all(cpu
, BP_GDB
);
1061 #ifndef CONFIG_USER_ONLY
1062 cpu_watchpoint_remove_all(cpu
, BP_GDB
);
1067 static void gdb_set_cpu_pc(GDBState
*s
, target_ulong pc
)
1069 CPUState
*cpu
= s
->c_cpu
;
1071 cpu_synchronize_state(cpu
);
1072 cpu_set_pc(cpu
, pc
);
1075 static char *gdb_fmt_thread_id(const GDBState
*s
, CPUState
*cpu
,
1076 char *buf
, size_t buf_size
)
1078 if (s
->multiprocess
) {
1079 snprintf(buf
, buf_size
, "p%02x.%02x",
1080 gdb_get_cpu_pid(s
, cpu
), cpu_gdb_index(cpu
));
1082 snprintf(buf
, buf_size
, "%02x", cpu_gdb_index(cpu
));
1088 typedef enum GDBThreadIdKind
{
1090 GDB_ALL_THREADS
, /* One process, all threads */
1095 static GDBThreadIdKind
read_thread_id(const char *buf
, const char **end_buf
,
1096 uint32_t *pid
, uint32_t *tid
)
1103 ret
= qemu_strtoul(buf
, &buf
, 16, &p
);
1106 return GDB_READ_THREAD_ERR
;
1115 ret
= qemu_strtoul(buf
, &buf
, 16, &t
);
1118 return GDB_READ_THREAD_ERR
;
1124 return GDB_ALL_PROCESSES
;
1132 return GDB_ALL_THREADS
;
1139 return GDB_ONE_THREAD
;
1142 static int is_query_packet(const char *p
, const char *query
, char separator
)
1144 unsigned int query_len
= strlen(query
);
1146 return strncmp(p
, query
, query_len
) == 0 &&
1147 (p
[query_len
] == '\0' || p
[query_len
] == separator
);
1151 * gdb_handle_vcont - Parses and handles a vCont packet.
1152 * returns -ENOTSUP if a command is unsupported, -EINVAL or -ERANGE if there is
1153 * a format error, 0 on success.
1155 static int gdb_handle_vcont(GDBState
*s
, const char *p
)
1157 int res
, signal
= 0;
1162 GDBProcess
*process
;
1164 #ifdef CONFIG_USER_ONLY
1165 int max_cpus
= 1; /* global variable max_cpus exists only in system mode */
1168 max_cpus
= max_cpus
<= cpu
->cpu_index
? cpu
->cpu_index
+ 1 : max_cpus
;
1171 /* uninitialised CPUs stay 0 */
1172 newstates
= g_new0(char, max_cpus
);
1174 /* mark valid CPUs with 1 */
1176 newstates
[cpu
->cpu_index
] = 1;
1180 * res keeps track of what error we are returning, with -ENOTSUP meaning
1181 * that the command is unknown or unsupported, thus returning an empty
1182 * packet, while -EINVAL and -ERANGE cause an E22 packet, due to invalid,
1183 * or incorrect parameters passed.
1193 if (cur_action
== 'C' || cur_action
== 'S') {
1194 cur_action
= qemu_tolower(cur_action
);
1195 res
= qemu_strtoul(p
+ 1, &p
, 16, &tmp
);
1199 signal
= gdb_signal_to_target(tmp
);
1200 } else if (cur_action
!= 'c' && cur_action
!= 's') {
1201 /* unknown/invalid/unsupported command */
1211 switch (read_thread_id(p
, &p
, &pid
, &tid
)) {
1212 case GDB_READ_THREAD_ERR
:
1216 case GDB_ALL_PROCESSES
:
1217 cpu
= gdb_first_attached_cpu(s
);
1219 if (newstates
[cpu
->cpu_index
] == 1) {
1220 newstates
[cpu
->cpu_index
] = cur_action
;
1223 cpu
= gdb_next_attached_cpu(s
, cpu
);
1227 case GDB_ALL_THREADS
:
1228 process
= gdb_get_process(s
, pid
);
1230 if (!process
->attached
) {
1235 cpu
= get_first_cpu_in_process(s
, process
);
1237 if (newstates
[cpu
->cpu_index
] == 1) {
1238 newstates
[cpu
->cpu_index
] = cur_action
;
1241 cpu
= gdb_next_cpu_in_process(s
, cpu
);
1245 case GDB_ONE_THREAD
:
1246 cpu
= gdb_get_cpu(s
, pid
, tid
);
1248 /* invalid CPU/thread specified */
1254 /* only use if no previous match occourred */
1255 if (newstates
[cpu
->cpu_index
] == 1) {
1256 newstates
[cpu
->cpu_index
] = cur_action
;
1262 gdb_continue_partial(s
, newstates
);
1270 static int gdb_handle_packet(GDBState
*s
, const char *line_buf
)
1273 GDBProcess
*process
;
1277 int ch
, reg_size
, type
, res
;
1278 uint8_t mem_buf
[MAX_PACKET_LENGTH
];
1279 char buf
[sizeof(mem_buf
) + 1 /* trailing NUL */];
1282 target_ulong addr
, len
;
1283 GDBThreadIdKind thread_kind
;
1285 trace_gdbstub_io_command(line_buf
);
1291 /* TODO: Make this return the correct value for user-mode. */
1292 snprintf(buf
, sizeof(buf
), "T%02xthread:%s;", GDB_SIGNAL_TRAP
,
1293 gdb_fmt_thread_id(s
, s
->c_cpu
, thread_id
, sizeof(thread_id
)));
1295 /* Remove all the breakpoints when this query is issued,
1296 * because gdb is doing and initial connect and the state
1297 * should be cleaned up.
1299 gdb_breakpoint_remove_all();
1303 addr
= strtoull(p
, (char **)&p
, 16);
1304 gdb_set_cpu_pc(s
, addr
);
1310 s
->signal
= gdb_signal_to_target (strtoul(p
, (char **)&p
, 16));
1311 if (s
->signal
== -1)
1316 if (strncmp(p
, "Cont", 4) == 0) {
1319 put_packet(s
, "vCont;c;C;s;S");
1323 res
= gdb_handle_vcont(s
, p
);
1326 if ((res
== -EINVAL
) || (res
== -ERANGE
)) {
1327 put_packet(s
, "E22");
1330 goto unknown_command
;
1334 goto unknown_command
;
1337 /* Kill the target */
1338 error_report("QEMU: Terminated via GDBstub");
1342 gdb_breakpoint_remove_all();
1343 gdb_syscall_mode
= GDB_SYS_DISABLED
;
1345 put_packet(s
, "OK");
1349 addr
= strtoull(p
, (char **)&p
, 16);
1350 gdb_set_cpu_pc(s
, addr
);
1352 cpu_single_step(s
->c_cpu
, sstep_flags
);
1360 ret
= strtoull(p
, (char **)&p
, 16);
1363 err
= strtoull(p
, (char **)&p
, 16);
1370 if (s
->current_syscall_cb
) {
1371 s
->current_syscall_cb(s
->c_cpu
, ret
, err
);
1372 s
->current_syscall_cb
= NULL
;
1375 put_packet(s
, "T02");
1382 cpu_synchronize_state(s
->g_cpu
);
1384 for (addr
= 0; addr
< s
->g_cpu
->gdb_num_g_regs
; addr
++) {
1385 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
+ len
, addr
);
1388 memtohex(buf
, mem_buf
, len
);
1392 cpu_synchronize_state(s
->g_cpu
);
1393 registers
= mem_buf
;
1394 len
= strlen(p
) / 2;
1395 hextomem((uint8_t *)registers
, p
, len
);
1396 for (addr
= 0; addr
< s
->g_cpu
->gdb_num_g_regs
&& len
> 0; addr
++) {
1397 reg_size
= gdb_write_register(s
->g_cpu
, registers
, addr
);
1399 registers
+= reg_size
;
1401 put_packet(s
, "OK");
1404 addr
= strtoull(p
, (char **)&p
, 16);
1407 len
= strtoull(p
, NULL
, 16);
1409 /* memtohex() doubles the required space */
1410 if (len
> MAX_PACKET_LENGTH
/ 2) {
1411 put_packet (s
, "E22");
1415 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
, false) != 0) {
1416 put_packet (s
, "E14");
1418 memtohex(buf
, mem_buf
, len
);
1423 addr
= strtoull(p
, (char **)&p
, 16);
1426 len
= strtoull(p
, (char **)&p
, 16);
1430 /* hextomem() reads 2*len bytes */
1431 if (len
> strlen(p
) / 2) {
1432 put_packet (s
, "E22");
1435 hextomem(mem_buf
, p
, len
);
1436 if (target_memory_rw_debug(s
->g_cpu
, addr
, mem_buf
, len
,
1438 put_packet(s
, "E14");
1440 put_packet(s
, "OK");
1444 /* Older gdb are really dumb, and don't use 'g' if 'p' is avaialable.
1445 This works, but can be very slow. Anything new enough to
1446 understand XML also knows how to use this properly. */
1448 goto unknown_command
;
1449 addr
= strtoull(p
, (char **)&p
, 16);
1450 reg_size
= gdb_read_register(s
->g_cpu
, mem_buf
, addr
);
1452 memtohex(buf
, mem_buf
, reg_size
);
1455 put_packet(s
, "E14");
1460 goto unknown_command
;
1461 addr
= strtoull(p
, (char **)&p
, 16);
1464 reg_size
= strlen(p
) / 2;
1465 hextomem(mem_buf
, p
, reg_size
);
1466 gdb_write_register(s
->g_cpu
, mem_buf
, addr
);
1467 put_packet(s
, "OK");
1471 type
= strtoul(p
, (char **)&p
, 16);
1474 addr
= strtoull(p
, (char **)&p
, 16);
1477 len
= strtoull(p
, (char **)&p
, 16);
1479 res
= gdb_breakpoint_insert(addr
, len
, type
);
1481 res
= gdb_breakpoint_remove(addr
, len
, type
);
1483 put_packet(s
, "OK");
1484 else if (res
== -ENOSYS
)
1487 put_packet(s
, "E22");
1492 thread_kind
= read_thread_id(p
, &p
, &pid
, &tid
);
1493 if (thread_kind
== GDB_READ_THREAD_ERR
) {
1494 put_packet(s
, "E22");
1498 if (thread_kind
!= GDB_ONE_THREAD
) {
1499 put_packet(s
, "OK");
1502 cpu
= gdb_get_cpu(s
, pid
, tid
);
1504 put_packet(s
, "E22");
1510 put_packet(s
, "OK");
1514 put_packet(s
, "OK");
1517 put_packet(s
, "E22");
1522 thread_kind
= read_thread_id(p
, &p
, &pid
, &tid
);
1523 if (thread_kind
== GDB_READ_THREAD_ERR
) {
1524 put_packet(s
, "E22");
1527 cpu
= gdb_get_cpu(s
, pid
, tid
);
1530 put_packet(s
, "OK");
1532 put_packet(s
, "E22");
1537 /* parse any 'q' packets here */
1538 if (!strcmp(p
,"qemu.sstepbits")) {
1539 /* Query Breakpoint bit definitions */
1540 snprintf(buf
, sizeof(buf
), "ENABLE=%x,NOIRQ=%x,NOTIMER=%x",
1546 } else if (is_query_packet(p
, "qemu.sstep", '=')) {
1547 /* Display or change the sstep_flags */
1550 /* Display current setting */
1551 snprintf(buf
, sizeof(buf
), "0x%x", sstep_flags
);
1556 type
= strtoul(p
, (char **)&p
, 16);
1558 put_packet(s
, "OK");
1560 } else if (strcmp(p
,"C") == 0) {
1562 * "Current thread" remains vague in the spec, so always return
1563 * the first thread of the current process (gdb returns the
1566 cpu
= get_first_cpu_in_process(s
, gdb_get_cpu_process(s
, s
->g_cpu
));
1567 snprintf(buf
, sizeof(buf
), "QC%s",
1568 gdb_fmt_thread_id(s
, cpu
, thread_id
, sizeof(thread_id
)));
1571 } else if (strcmp(p
,"fThreadInfo") == 0) {
1572 s
->query_cpu
= gdb_first_attached_cpu(s
);
1573 goto report_cpuinfo
;
1574 } else if (strcmp(p
,"sThreadInfo") == 0) {
1577 snprintf(buf
, sizeof(buf
), "m%s",
1578 gdb_fmt_thread_id(s
, s
->query_cpu
,
1579 thread_id
, sizeof(thread_id
)));
1581 s
->query_cpu
= gdb_next_attached_cpu(s
, s
->query_cpu
);
1585 } else if (strncmp(p
,"ThreadExtraInfo,", 16) == 0) {
1586 if (read_thread_id(p
+ 16, &p
, &pid
, &tid
) == GDB_READ_THREAD_ERR
) {
1587 put_packet(s
, "E22");
1590 cpu
= gdb_get_cpu(s
, pid
, tid
);
1592 cpu_synchronize_state(cpu
);
1594 if (s
->multiprocess
&& (s
->process_num
> 1)) {
1595 /* Print the CPU model and name in multiprocess mode */
1596 ObjectClass
*oc
= object_get_class(OBJECT(cpu
));
1597 const char *cpu_model
= object_class_get_name(oc
);
1599 object_get_canonical_path_component(OBJECT(cpu
));
1600 len
= snprintf((char *)mem_buf
, sizeof(buf
) / 2,
1601 "%s %s [%s]", cpu_model
, cpu_name
,
1602 cpu
->halted
? "halted " : "running");
1605 /* memtohex() doubles the required space */
1606 len
= snprintf((char *)mem_buf
, sizeof(buf
) / 2,
1607 "CPU#%d [%s]", cpu
->cpu_index
,
1608 cpu
->halted
? "halted " : "running");
1610 trace_gdbstub_op_extra_info((char *)mem_buf
);
1611 memtohex(buf
, mem_buf
, len
);
1616 #ifdef CONFIG_USER_ONLY
1617 else if (strcmp(p
, "Offsets") == 0) {
1618 TaskState
*ts
= s
->c_cpu
->opaque
;
1620 snprintf(buf
, sizeof(buf
),
1621 "Text=" TARGET_ABI_FMT_lx
";Data=" TARGET_ABI_FMT_lx
1622 ";Bss=" TARGET_ABI_FMT_lx
,
1623 ts
->info
->code_offset
,
1624 ts
->info
->data_offset
,
1625 ts
->info
->data_offset
);
1629 #else /* !CONFIG_USER_ONLY */
1630 else if (strncmp(p
, "Rcmd,", 5) == 0) {
1631 int len
= strlen(p
+ 5);
1633 if ((len
% 2) != 0) {
1634 put_packet(s
, "E01");
1638 hextomem(mem_buf
, p
+ 5, len
);
1640 qemu_chr_be_write(s
->mon_chr
, mem_buf
, len
);
1641 put_packet(s
, "OK");
1644 #endif /* !CONFIG_USER_ONLY */
1645 if (is_query_packet(p
, "Supported", ':')) {
1646 snprintf(buf
, sizeof(buf
), "PacketSize=%x", MAX_PACKET_LENGTH
);
1647 cc
= CPU_GET_CLASS(first_cpu
);
1648 if (cc
->gdb_core_xml_file
!= NULL
) {
1649 pstrcat(buf
, sizeof(buf
), ";qXfer:features:read+");
1654 if (strncmp(p
, "Xfer:features:read:", 19) == 0) {
1656 target_ulong total_len
;
1658 process
= gdb_get_cpu_process(s
, s
->g_cpu
);
1659 cc
= CPU_GET_CLASS(s
->g_cpu
);
1660 if (cc
->gdb_core_xml_file
== NULL
) {
1661 goto unknown_command
;
1666 xml
= get_feature_xml(s
, p
, &p
, process
);
1668 snprintf(buf
, sizeof(buf
), "E00");
1675 addr
= strtoul(p
, (char **)&p
, 16);
1678 len
= strtoul(p
, (char **)&p
, 16);
1680 total_len
= strlen(xml
);
1681 if (addr
> total_len
) {
1682 snprintf(buf
, sizeof(buf
), "E00");
1686 if (len
> (MAX_PACKET_LENGTH
- 5) / 2)
1687 len
= (MAX_PACKET_LENGTH
- 5) / 2;
1688 if (len
< total_len
- addr
) {
1690 len
= memtox(buf
+ 1, xml
+ addr
, len
);
1693 len
= memtox(buf
+ 1, xml
+ addr
, total_len
- addr
);
1695 put_packet_binary(s
, buf
, len
+ 1, true);
1698 if (is_query_packet(p
, "Attached", ':')) {
1699 put_packet(s
, GDB_ATTACHED
);
1702 /* Unrecognised 'q' command. */
1703 goto unknown_command
;
1707 /* put empty packet */
1715 void gdb_set_stop_cpu(CPUState
*cpu
)
1717 gdbserver_state
->c_cpu
= cpu
;
1718 gdbserver_state
->g_cpu
= cpu
;
1721 #ifndef CONFIG_USER_ONLY
1722 static void gdb_vm_state_change(void *opaque
, int running
, RunState state
)
1724 GDBState
*s
= gdbserver_state
;
1725 CPUState
*cpu
= s
->c_cpu
;
1731 if (running
|| s
->state
== RS_INACTIVE
) {
1734 /* Is there a GDB syscall waiting to be sent? */
1735 if (s
->current_syscall_cb
) {
1736 put_packet(s
, s
->syscall_buf
);
1741 /* No process attached */
1745 gdb_fmt_thread_id(s
, cpu
, thread_id
, sizeof(thread_id
));
1748 case RUN_STATE_DEBUG
:
1749 if (cpu
->watchpoint_hit
) {
1750 switch (cpu
->watchpoint_hit
->flags
& BP_MEM_ACCESS
) {
1761 trace_gdbstub_hit_watchpoint(type
, cpu_gdb_index(cpu
),
1762 (target_ulong
)cpu
->watchpoint_hit
->vaddr
);
1763 snprintf(buf
, sizeof(buf
),
1764 "T%02xthread:%s;%swatch:" TARGET_FMT_lx
";",
1765 GDB_SIGNAL_TRAP
, thread_id
, type
,
1766 (target_ulong
)cpu
->watchpoint_hit
->vaddr
);
1767 cpu
->watchpoint_hit
= NULL
;
1770 trace_gdbstub_hit_break();
1773 ret
= GDB_SIGNAL_TRAP
;
1775 case RUN_STATE_PAUSED
:
1776 trace_gdbstub_hit_paused();
1777 ret
= GDB_SIGNAL_INT
;
1779 case RUN_STATE_SHUTDOWN
:
1780 trace_gdbstub_hit_shutdown();
1781 ret
= GDB_SIGNAL_QUIT
;
1783 case RUN_STATE_IO_ERROR
:
1784 trace_gdbstub_hit_io_error();
1785 ret
= GDB_SIGNAL_IO
;
1787 case RUN_STATE_WATCHDOG
:
1788 trace_gdbstub_hit_watchdog();
1789 ret
= GDB_SIGNAL_ALRM
;
1791 case RUN_STATE_INTERNAL_ERROR
:
1792 trace_gdbstub_hit_internal_error();
1793 ret
= GDB_SIGNAL_ABRT
;
1795 case RUN_STATE_SAVE_VM
:
1796 case RUN_STATE_RESTORE_VM
:
1798 case RUN_STATE_FINISH_MIGRATE
:
1799 ret
= GDB_SIGNAL_XCPU
;
1802 trace_gdbstub_hit_unknown(state
);
1803 ret
= GDB_SIGNAL_UNKNOWN
;
1806 gdb_set_stop_cpu(cpu
);
1807 snprintf(buf
, sizeof(buf
), "T%02xthread:%s;", ret
, thread_id
);
1812 /* disable single step if it was enabled */
1813 cpu_single_step(cpu
, 0);
1817 /* Send a gdb syscall request.
1818 This accepts limited printf-style format specifiers, specifically:
1819 %x - target_ulong argument printed in hex.
1820 %lx - 64-bit argument printed in hex.
1821 %s - string pointer (target_ulong) and length (int) pair. */
1822 void gdb_do_syscallv(gdb_syscall_complete_cb cb
, const char *fmt
, va_list va
)
1830 s
= gdbserver_state
;
1833 s
->current_syscall_cb
= cb
;
1834 #ifndef CONFIG_USER_ONLY
1835 vm_stop(RUN_STATE_DEBUG
);
1838 p_end
= &s
->syscall_buf
[sizeof(s
->syscall_buf
)];
1845 addr
= va_arg(va
, target_ulong
);
1846 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
, addr
);
1849 if (*(fmt
++) != 'x')
1851 i64
= va_arg(va
, uint64_t);
1852 p
+= snprintf(p
, p_end
- p
, "%" PRIx64
, i64
);
1855 addr
= va_arg(va
, target_ulong
);
1856 p
+= snprintf(p
, p_end
- p
, TARGET_FMT_lx
"/%x",
1857 addr
, va_arg(va
, int));
1861 error_report("gdbstub: Bad syscall format string '%s'",
1870 #ifdef CONFIG_USER_ONLY
1871 put_packet(s
, s
->syscall_buf
);
1872 /* Return control to gdb for it to process the syscall request.
1873 * Since the protocol requires that gdb hands control back to us
1874 * using a "here are the results" F packet, we don't need to check
1875 * gdb_handlesig's return value (which is the signal to deliver if
1876 * execution was resumed via a continue packet).
1878 gdb_handlesig(s
->c_cpu
, 0);
1880 /* In this case wait to send the syscall packet until notification that
1881 the CPU has stopped. This must be done because if the packet is sent
1882 now the reply from the syscall request could be received while the CPU
1883 is still in the running state, which can cause packets to be dropped
1884 and state transition 'T' packets to be sent while the syscall is still
1886 qemu_cpu_kick(s
->c_cpu
);
1890 void gdb_do_syscall(gdb_syscall_complete_cb cb
, const char *fmt
, ...)
1895 gdb_do_syscallv(cb
, fmt
, va
);
1899 static void gdb_read_byte(GDBState
*s
, int ch
)
1903 #ifndef CONFIG_USER_ONLY
1904 if (s
->last_packet_len
) {
1905 /* Waiting for a response to the last packet. If we see the start
1906 of a new command then abandon the previous response. */
1908 trace_gdbstub_err_got_nack();
1909 put_buffer(s
, (uint8_t *)s
->last_packet
, s
->last_packet_len
);
1910 } else if (ch
== '+') {
1911 trace_gdbstub_io_got_ack();
1913 trace_gdbstub_io_got_unexpected((uint8_t)ch
);
1916 if (ch
== '+' || ch
== '$')
1917 s
->last_packet_len
= 0;
1921 if (runstate_is_running()) {
1922 /* when the CPU is running, we cannot do anything except stop
1923 it when receiving a char */
1924 vm_stop(RUN_STATE_PAUSED
);
1931 /* start of command packet */
1932 s
->line_buf_index
= 0;
1934 s
->state
= RS_GETLINE
;
1936 trace_gdbstub_err_garbage((uint8_t)ch
);
1941 /* start escape sequence */
1942 s
->state
= RS_GETLINE_ESC
;
1944 } else if (ch
== '*') {
1945 /* start run length encoding sequence */
1946 s
->state
= RS_GETLINE_RLE
;
1948 } else if (ch
== '#') {
1949 /* end of command, start of checksum*/
1950 s
->state
= RS_CHKSUM1
;
1951 } else if (s
->line_buf_index
>= sizeof(s
->line_buf
) - 1) {
1952 trace_gdbstub_err_overrun();
1955 /* unescaped command character */
1956 s
->line_buf
[s
->line_buf_index
++] = ch
;
1960 case RS_GETLINE_ESC
:
1962 /* unexpected end of command in escape sequence */
1963 s
->state
= RS_CHKSUM1
;
1964 } else if (s
->line_buf_index
>= sizeof(s
->line_buf
) - 1) {
1965 /* command buffer overrun */
1966 trace_gdbstub_err_overrun();
1969 /* parse escaped character and leave escape state */
1970 s
->line_buf
[s
->line_buf_index
++] = ch
^ 0x20;
1972 s
->state
= RS_GETLINE
;
1975 case RS_GETLINE_RLE
:
1977 /* invalid RLE count encoding */
1978 trace_gdbstub_err_invalid_repeat((uint8_t)ch
);
1979 s
->state
= RS_GETLINE
;
1981 /* decode repeat length */
1982 int repeat
= (unsigned char)ch
- ' ' + 3;
1983 if (s
->line_buf_index
+ repeat
>= sizeof(s
->line_buf
) - 1) {
1984 /* that many repeats would overrun the command buffer */
1985 trace_gdbstub_err_overrun();
1987 } else if (s
->line_buf_index
< 1) {
1988 /* got a repeat but we have nothing to repeat */
1989 trace_gdbstub_err_invalid_rle();
1990 s
->state
= RS_GETLINE
;
1992 /* repeat the last character */
1993 memset(s
->line_buf
+ s
->line_buf_index
,
1994 s
->line_buf
[s
->line_buf_index
- 1], repeat
);
1995 s
->line_buf_index
+= repeat
;
1997 s
->state
= RS_GETLINE
;
2002 /* get high hex digit of checksum */
2003 if (!isxdigit(ch
)) {
2004 trace_gdbstub_err_checksum_invalid((uint8_t)ch
);
2005 s
->state
= RS_GETLINE
;
2008 s
->line_buf
[s
->line_buf_index
] = '\0';
2009 s
->line_csum
= fromhex(ch
) << 4;
2010 s
->state
= RS_CHKSUM2
;
2013 /* get low hex digit of checksum */
2014 if (!isxdigit(ch
)) {
2015 trace_gdbstub_err_checksum_invalid((uint8_t)ch
);
2016 s
->state
= RS_GETLINE
;
2019 s
->line_csum
|= fromhex(ch
);
2021 if (s
->line_csum
!= (s
->line_sum
& 0xff)) {
2022 trace_gdbstub_err_checksum_incorrect(s
->line_sum
, s
->line_csum
);
2023 /* send NAK reply */
2025 put_buffer(s
, &reply
, 1);
2028 /* send ACK reply */
2030 put_buffer(s
, &reply
, 1);
2031 s
->state
= gdb_handle_packet(s
, s
->line_buf
);
2040 /* Tell the remote gdb that the process has exited. */
2041 void gdb_exit(CPUArchState
*env
, int code
)
2046 s
= gdbserver_state
;
2050 #ifdef CONFIG_USER_ONLY
2051 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2056 trace_gdbstub_op_exiting((uint8_t)code
);
2058 snprintf(buf
, sizeof(buf
), "W%02x", (uint8_t)code
);
2061 #ifndef CONFIG_USER_ONLY
2062 qemu_chr_fe_deinit(&s
->chr
, true);
2067 * Create the process that will contain all the "orphan" CPUs (that are not
2068 * part of a CPU cluster). Note that if this process contains no CPUs, it won't
2069 * be attachable and thus will be invisible to the user.
2071 static void create_default_process(GDBState
*s
)
2073 GDBProcess
*process
;
2076 if (s
->process_num
) {
2077 max_pid
= s
->processes
[s
->process_num
- 1].pid
;
2080 s
->processes
= g_renew(GDBProcess
, s
->processes
, ++s
->process_num
);
2081 process
= &s
->processes
[s
->process_num
- 1];
2083 /* We need an available PID slot for this process */
2084 assert(max_pid
< UINT32_MAX
);
2086 process
->pid
= max_pid
+ 1;
2087 process
->attached
= false;
2088 process
->target_xml
[0] = '\0';
2091 #ifdef CONFIG_USER_ONLY
2093 gdb_handlesig(CPUState
*cpu
, int sig
)
2099 s
= gdbserver_state
;
2100 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2104 /* disable single step if it was enabled */
2105 cpu_single_step(cpu
, 0);
2109 snprintf(buf
, sizeof(buf
), "S%02x", target_signal_to_gdb(sig
));
2112 /* put_packet() might have detected that the peer terminated the
2120 s
->running_state
= 0;
2121 while (s
->running_state
== 0) {
2122 n
= read(s
->fd
, buf
, 256);
2126 for (i
= 0; i
< n
; i
++) {
2127 gdb_read_byte(s
, buf
[i
]);
2130 /* XXX: Connection closed. Should probably wait for another
2131 connection before continuing. */
2144 /* Tell the remote gdb that the process has exited due to SIG. */
2145 void gdb_signalled(CPUArchState
*env
, int sig
)
2150 s
= gdbserver_state
;
2151 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2155 snprintf(buf
, sizeof(buf
), "X%02x", target_signal_to_gdb(sig
));
2159 static bool gdb_accept(void)
2162 struct sockaddr_in sockaddr
;
2167 len
= sizeof(sockaddr
);
2168 fd
= accept(gdbserver_fd
, (struct sockaddr
*)&sockaddr
, &len
);
2169 if (fd
< 0 && errno
!= EINTR
) {
2172 } else if (fd
>= 0) {
2173 qemu_set_cloexec(fd
);
2178 /* set short latency */
2179 if (socket_set_nodelay(fd
)) {
2180 perror("setsockopt");
2185 s
= g_malloc0(sizeof(GDBState
));
2186 s
->c_cpu
= first_cpu
;
2187 s
->g_cpu
= first_cpu
;
2188 create_default_process(s
);
2190 gdb_has_xml
= false;
2192 gdbserver_state
= s
;
2196 static int gdbserver_open(int port
)
2198 struct sockaddr_in sockaddr
;
2201 fd
= socket(PF_INET
, SOCK_STREAM
, 0);
2206 qemu_set_cloexec(fd
);
2208 socket_set_fast_reuse(fd
);
2210 sockaddr
.sin_family
= AF_INET
;
2211 sockaddr
.sin_port
= htons(port
);
2212 sockaddr
.sin_addr
.s_addr
= 0;
2213 ret
= bind(fd
, (struct sockaddr
*)&sockaddr
, sizeof(sockaddr
));
2219 ret
= listen(fd
, 1);
2228 int gdbserver_start(int port
)
2230 gdbserver_fd
= gdbserver_open(port
);
2231 if (gdbserver_fd
< 0)
2233 /* accept connections */
2234 if (!gdb_accept()) {
2235 close(gdbserver_fd
);
2242 /* Disable gdb stub for child processes. */
2243 void gdbserver_fork(CPUState
*cpu
)
2245 GDBState
*s
= gdbserver_state
;
2247 if (gdbserver_fd
< 0 || s
->fd
< 0) {
2252 cpu_breakpoint_remove_all(cpu
, BP_GDB
);
2253 cpu_watchpoint_remove_all(cpu
, BP_GDB
);
2256 static int gdb_chr_can_receive(void *opaque
)
2258 /* We can handle an arbitrarily large amount of data.
2259 Pick the maximum packet size, which is as good as anything. */
2260 return MAX_PACKET_LENGTH
;
2263 static void gdb_chr_receive(void *opaque
, const uint8_t *buf
, int size
)
2267 for (i
= 0; i
< size
; i
++) {
2268 gdb_read_byte(gdbserver_state
, buf
[i
]);
2272 static void gdb_chr_event(void *opaque
, int event
)
2275 case CHR_EVENT_OPENED
:
2276 vm_stop(RUN_STATE_PAUSED
);
2277 gdb_has_xml
= false;
2284 static void gdb_monitor_output(GDBState
*s
, const char *msg
, int len
)
2286 char buf
[MAX_PACKET_LENGTH
];
2289 if (len
> (MAX_PACKET_LENGTH
/2) - 1)
2290 len
= (MAX_PACKET_LENGTH
/2) - 1;
2291 memtohex(buf
+ 1, (uint8_t *)msg
, len
);
2295 static int gdb_monitor_write(Chardev
*chr
, const uint8_t *buf
, int len
)
2297 const char *p
= (const char *)buf
;
2300 max_sz
= (sizeof(gdbserver_state
->last_packet
) - 2) / 2;
2302 if (len
<= max_sz
) {
2303 gdb_monitor_output(gdbserver_state
, p
, len
);
2306 gdb_monitor_output(gdbserver_state
, p
, max_sz
);
2314 static void gdb_sigterm_handler(int signal
)
2316 if (runstate_is_running()) {
2317 vm_stop(RUN_STATE_PAUSED
);
2322 static void gdb_monitor_open(Chardev
*chr
, ChardevBackend
*backend
,
2323 bool *be_opened
, Error
**errp
)
2328 static void char_gdb_class_init(ObjectClass
*oc
, void *data
)
2330 ChardevClass
*cc
= CHARDEV_CLASS(oc
);
2332 cc
->internal
= true;
2333 cc
->open
= gdb_monitor_open
;
2334 cc
->chr_write
= gdb_monitor_write
;
2337 #define TYPE_CHARDEV_GDB "chardev-gdb"
2339 static const TypeInfo char_gdb_type_info
= {
2340 .name
= TYPE_CHARDEV_GDB
,
2341 .parent
= TYPE_CHARDEV
,
2342 .class_init
= char_gdb_class_init
,
2345 static int find_cpu_clusters(Object
*child
, void *opaque
)
2347 if (object_dynamic_cast(child
, TYPE_CPU_CLUSTER
)) {
2348 GDBState
*s
= (GDBState
*) opaque
;
2349 CPUClusterState
*cluster
= CPU_CLUSTER(child
);
2350 GDBProcess
*process
;
2352 s
->processes
= g_renew(GDBProcess
, s
->processes
, ++s
->process_num
);
2354 process
= &s
->processes
[s
->process_num
- 1];
2357 * GDB process IDs -1 and 0 are reserved. To avoid subtle errors at
2358 * runtime, we enforce here that the machine does not use a cluster ID
2359 * that would lead to PID 0.
2361 assert(cluster
->cluster_id
!= UINT32_MAX
);
2362 process
->pid
= cluster
->cluster_id
+ 1;
2363 process
->attached
= false;
2364 process
->target_xml
[0] = '\0';
2369 return object_child_foreach(child
, find_cpu_clusters
, opaque
);
2372 static int pid_order(const void *a
, const void *b
)
2374 GDBProcess
*pa
= (GDBProcess
*) a
;
2375 GDBProcess
*pb
= (GDBProcess
*) b
;
2377 if (pa
->pid
< pb
->pid
) {
2379 } else if (pa
->pid
> pb
->pid
) {
2386 static void create_processes(GDBState
*s
)
2388 object_child_foreach(object_get_root(), find_cpu_clusters
, s
);
2392 qsort(s
->processes
, s
->process_num
, sizeof(s
->processes
[0]), pid_order
);
2395 create_default_process(s
);
2398 static void cleanup_processes(GDBState
*s
)
2400 g_free(s
->processes
);
2402 s
->processes
= NULL
;
2405 int gdbserver_start(const char *device
)
2407 trace_gdbstub_op_start(device
);
2410 char gdbstub_device_name
[128];
2411 Chardev
*chr
= NULL
;
2415 error_report("gdbstub: meaningless to attach gdb to a "
2416 "machine without any CPU.");
2422 if (strcmp(device
, "none") != 0) {
2423 if (strstart(device
, "tcp:", NULL
)) {
2424 /* enforce required TCP attributes */
2425 snprintf(gdbstub_device_name
, sizeof(gdbstub_device_name
),
2426 "%s,nowait,nodelay,server", device
);
2427 device
= gdbstub_device_name
;
2430 else if (strcmp(device
, "stdio") == 0) {
2431 struct sigaction act
;
2433 memset(&act
, 0, sizeof(act
));
2434 act
.sa_handler
= gdb_sigterm_handler
;
2435 sigaction(SIGINT
, &act
, NULL
);
2439 * FIXME: it's a bit weird to allow using a mux chardev here
2440 * and implicitly setup a monitor. We may want to break this.
2442 chr
= qemu_chr_new_noreplay("gdb", device
, true);
2447 s
= gdbserver_state
;
2449 s
= g_malloc0(sizeof(GDBState
));
2450 gdbserver_state
= s
;
2452 qemu_add_vm_change_state_handler(gdb_vm_state_change
, NULL
);
2454 /* Initialize a monitor terminal for gdb */
2455 mon_chr
= qemu_chardev_new(NULL
, TYPE_CHARDEV_GDB
,
2456 NULL
, &error_abort
);
2457 monitor_init(mon_chr
, 0);
2459 qemu_chr_fe_deinit(&s
->chr
, true);
2460 mon_chr
= s
->mon_chr
;
2461 cleanup_processes(s
);
2462 memset(s
, 0, sizeof(GDBState
));
2463 s
->mon_chr
= mon_chr
;
2465 s
->c_cpu
= first_cpu
;
2466 s
->g_cpu
= first_cpu
;
2468 create_processes(s
);
2471 qemu_chr_fe_init(&s
->chr
, chr
, &error_abort
);
2472 qemu_chr_fe_set_handlers(&s
->chr
, gdb_chr_can_receive
, gdb_chr_receive
,
2473 gdb_chr_event
, NULL
, NULL
, NULL
, true);
2475 s
->state
= chr
? RS_IDLE
: RS_INACTIVE
;
2476 s
->mon_chr
= mon_chr
;
2477 s
->current_syscall_cb
= NULL
;
2482 void gdbserver_cleanup(void)
2484 if (gdbserver_state
) {
2485 put_packet(gdbserver_state
, "W00");
2489 static void register_types(void)
2491 type_register_static(&char_gdb_type_info
);
2494 type_init(register_types
);