2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
36 #include <sys/endian.h>
40 #include <sys/ioctl.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <machine/atomic.h>
45 #include <machine/specialreg.h>
46 #include <machine/vmm.h>
47 #include <netinet/in.h>
49 #ifndef WITHOUT_CAPSICUM
50 #include <capsicum_helpers.h>
57 #include <pthread_np.h>
73 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
76 #define GDB_SIGNAL_TRAP 5
78 static void gdb_resume_vcpus(void);
79 static void check_command(int fd
);
81 static struct mevent
*read_event
, *write_event
;
83 static cpuset_t vcpus_active
, vcpus_suspended
, vcpus_waiting
;
84 static pthread_mutex_t gdb_lock
;
85 static pthread_cond_t idle_vcpus
;
86 static bool first_stop
, report_next_stop
, swbreak_enabled
;
89 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
90 * read buffer, 'start' is unused and 'len' contains the number of
91 * valid bytes in the buffer. For a write buffer, 'start' is set to
92 * the index of the next byte in 'data' to send, and 'len' contains
93 * the remaining number of valid bytes to send.
105 TAILQ_ENTRY(breakpoint
) link
;
109 * When a vCPU stops to due to an event that should be reported to the
110 * debugger, information about the event is stored in this structure.
111 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
112 * and stops other vCPUs so the event can be reported. The
113 * report_stop() function reports the event for the 'stopped_vcpu'
114 * vCPU. When the debugger resumes execution via continue or step,
115 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their
116 * event handlers until the associated event is reported or disabled.
118 * An idle vCPU will have all of the boolean fields set to false.
120 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
121 * released to execute the stepped instruction. When the vCPU reports
122 * the stepping trap, 'stepped' is set.
124 * When a vCPU hits a breakpoint set by the debug server,
125 * 'hit_swbreak' is set to true.
133 static struct io_buffer cur_comm
, cur_resp
;
134 static uint8_t cur_csum
;
135 static struct vmctx
*ctx
;
136 static int cur_fd
= -1;
137 static TAILQ_HEAD(, breakpoint
) breakpoints
;
138 static struct vcpu_state
*vcpu_state
;
139 static int cur_vcpu
, stopped_vcpu
;
140 static bool gdb_active
= false;
142 static const int gdb_regset
[] = {
169 static const int gdb_regsize
[] = {
200 static void __printflike(1, 2)
201 debug(const char *fmt
, ...)
203 static FILE *logfile
;
206 if (logfile
== NULL
) {
207 logfile
= fopen("/tmp/bhyve_gdb.log", "w");
210 #ifndef WITHOUT_CAPSICUM
211 if (caph_limit_stream(fileno(logfile
), CAPH_WRITE
) == -1) {
220 vfprintf(logfile
, fmt
, ap
);
226 * A totally empty debug() makes the compiler grumpy due to how its used with
227 * some control flow here.
229 #define debug(...) do { } while (0)
235 static void remove_all_sw_breakpoints(void);
238 guest_paging_info(int vcpu
, struct vm_guest_paging
*paging
)
241 const int regset
[4] = {
248 if (vm_get_register_set(ctx
, vcpu
, nitems(regset
), regset
, regs
) == -1)
252 * For the debugger, always pretend to be the kernel (CPL 0),
253 * and if long-mode is enabled, always parse addresses as if
256 paging
->cr3
= regs
[1];
258 if (regs
[3] & EFER_LMA
)
259 paging
->cpu_mode
= CPU_MODE_64BIT
;
260 else if (regs
[0] & CR0_PE
)
261 paging
->cpu_mode
= CPU_MODE_PROTECTED
;
263 paging
->cpu_mode
= CPU_MODE_REAL
;
264 if (!(regs
[0] & CR0_PG
))
265 paging
->paging_mode
= PAGING_MODE_FLAT
;
266 else if (!(regs
[2] & CR4_PAE
))
267 paging
->paging_mode
= PAGING_MODE_32
;
268 else if (regs
[3] & EFER_LME
)
269 paging
->paging_mode
= PAGING_MODE_64
;
271 paging
->paging_mode
= PAGING_MODE_PAE
;
276 * Map a guest virtual address to a physical address (for a given vcpu).
277 * If a guest virtual address is valid, return 1. If the address is
278 * not valid, return 0. If an error occurs obtaining the mapping,
282 guest_vaddr2paddr(int vcpu
, uint64_t vaddr
, uint64_t *paddr
)
284 struct vm_guest_paging paging
;
287 if (guest_paging_info(vcpu
, &paging
) == -1)
291 * Always use PROT_READ. We really care if the VA is
292 * accessible, not if the current vCPU can write.
294 if (vm_gla2gpa_nofault(ctx
, vcpu
, &paging
, vaddr
, PROT_READ
, paddr
,
303 io_buffer_reset(struct io_buffer
*io
)
310 /* Available room for adding data. */
312 io_buffer_avail(struct io_buffer
*io
)
315 return (io
->capacity
- (io
->start
+ io
->len
));
319 io_buffer_head(struct io_buffer
*io
)
322 return (io
->data
+ io
->start
);
326 io_buffer_tail(struct io_buffer
*io
)
329 return (io
->data
+ io
->start
+ io
->len
);
333 io_buffer_advance(struct io_buffer
*io
, size_t amount
)
336 assert(amount
<= io
->len
);
342 io_buffer_consume(struct io_buffer
*io
, size_t amount
)
345 io_buffer_advance(io
, amount
);
352 * XXX: Consider making this move optional and compacting on a
353 * future read() before realloc().
355 memmove(io
->data
, io_buffer_head(io
), io
->len
);
360 io_buffer_grow(struct io_buffer
*io
, size_t newsize
)
363 size_t avail
, new_cap
;
365 avail
= io_buffer_avail(io
);
366 if (newsize
<= avail
)
369 new_cap
= io
->capacity
+ (newsize
- avail
);
370 new_data
= realloc(io
->data
, new_cap
);
371 if (new_data
== NULL
)
372 err(1, "Failed to grow GDB I/O buffer");
374 io
->capacity
= new_cap
;
378 response_pending(void)
381 if (cur_resp
.start
== 0 && cur_resp
.len
== 0)
383 if (cur_resp
.start
+ cur_resp
.len
== 1 && cur_resp
.data
[0] == '+')
389 close_connection(void)
393 * XXX: This triggers a warning because mevent does the close
394 * before the EV_DELETE.
396 pthread_mutex_lock(&gdb_lock
);
397 mevent_delete(write_event
);
398 mevent_delete_close(read_event
);
401 io_buffer_reset(&cur_comm
);
402 io_buffer_reset(&cur_resp
);
405 remove_all_sw_breakpoints();
407 /* Clear any pending events. */
408 memset(vcpu_state
, 0, guest_ncpus
* sizeof(*vcpu_state
));
410 /* Resume any stopped vCPUs. */
412 pthread_mutex_unlock(&gdb_lock
);
416 hex_digit(uint8_t nibble
)
420 return (nibble
+ '0');
422 return (nibble
+ 'a' - 10);
426 parse_digit(uint8_t v
)
429 if (v
>= '0' && v
<= '9')
431 if (v
>= 'a' && v
<= 'f')
432 return (v
- 'a' + 10);
433 if (v
>= 'A' && v
<= 'F')
434 return (v
- 'A' + 10);
438 /* Parses big-endian hexadecimal. */
440 parse_integer(const uint8_t *p
, size_t len
)
447 v
|= parse_digit(*p
);
455 parse_byte(const uint8_t *p
)
458 return (parse_digit(p
[0]) << 4 | parse_digit(p
[1]));
462 send_pending_data(int fd
)
466 if (cur_resp
.len
== 0) {
467 mevent_disable(write_event
);
470 nwritten
= write(fd
, io_buffer_head(&cur_resp
), cur_resp
.len
);
471 if (nwritten
== -1) {
472 warn("Write to GDB socket failed");
475 io_buffer_advance(&cur_resp
, nwritten
);
476 if (cur_resp
.len
== 0)
477 mevent_disable(write_event
);
479 mevent_enable(write_event
);
483 /* Append a single character to the output buffer. */
485 send_char(uint8_t data
)
487 io_buffer_grow(&cur_resp
, 1);
488 *io_buffer_tail(&cur_resp
) = data
;
492 /* Append an array of bytes to the output buffer. */
494 send_data(const uint8_t *data
, size_t len
)
497 io_buffer_grow(&cur_resp
, len
);
498 memcpy(io_buffer_tail(&cur_resp
), data
, len
);
503 format_byte(uint8_t v
, uint8_t *buf
)
506 buf
[0] = hex_digit(v
>> 4);
507 buf
[1] = hex_digit(v
& 0xf);
511 * Append a single byte (formatted as two hex characters) to the
520 send_data(buf
, sizeof(buf
));
537 debug("-> %.*s\n", (int)cur_resp
.len
, io_buffer_head(&cur_resp
));
541 * Append a single character (for the packet payload) and update the
545 append_char(uint8_t v
)
553 * Append an array of bytes (for the packet payload) and update the
557 append_packet_data(const uint8_t *data
, size_t len
)
560 send_data(data
, len
);
569 append_string(const char *str
)
573 append_packet_data(str
, strlen(str
));
575 append_packet_data((const uint8_t *)str
, strlen(str
));
580 append_byte(uint8_t v
)
585 append_packet_data(buf
, sizeof(buf
));
589 append_unsigned_native(uintmax_t value
, size_t len
)
593 for (i
= 0; i
< len
; i
++) {
600 append_unsigned_be(uintmax_t value
, size_t len
)
605 for (i
= 0; i
< len
; i
++) {
607 format_byte(value
, buf
+ (len
- i
- 1) * 2);
609 format_byte(value
, (uint8_t *)(buf
+ (len
- i
- 1) * 2));
614 append_packet_data(buf
, sizeof(buf
));
616 append_packet_data((const uint8_t *)buf
, sizeof(buf
));
621 append_integer(unsigned int value
)
627 append_unsigned_be(value
, (fls(value
) + 7) / 8);
631 append_asciihex(const char *str
)
634 while (*str
!= '\0') {
641 send_empty_response(void)
649 send_error(int error
)
668 parse_threadid(const uint8_t *data
, size_t len
)
671 if (len
== 1 && *data
== '0')
673 if (len
== 2 && memcmp(data
, "-1", 2) == 0)
677 return (parse_integer(data
, len
));
681 * Report the current stop event to the debugger. If the stop is due
682 * to an event triggered on a specific vCPU such as a breakpoint or
683 * stepping trap, stopped_vcpu will be set to the vCPU triggering the
684 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
685 * the reporting vCPU for vCPU events.
688 report_stop(bool set_cur_vcpu
)
690 struct vcpu_state
*vs
;
693 if (stopped_vcpu
== -1) {
695 append_byte(GDB_SIGNAL_TRAP
);
697 vs
= &vcpu_state
[stopped_vcpu
];
699 cur_vcpu
= stopped_vcpu
;
701 append_byte(GDB_SIGNAL_TRAP
);
702 append_string("thread:");
703 append_integer(stopped_vcpu
+ 1);
705 if (vs
->hit_swbreak
) {
706 debug("$vCPU %d reporting swbreak\n", stopped_vcpu
);
708 append_string("swbreak:;");
709 } else if (vs
->stepped
)
710 debug("$vCPU %d reporting step\n", stopped_vcpu
);
712 debug("$vCPU %d reporting ???\n", stopped_vcpu
);
715 report_next_stop
= false;
719 * If this stop is due to a vCPU event, clear that event to mark it as
725 struct vcpu_state
*vs
;
727 if (stopped_vcpu
!= -1) {
728 vs
= &vcpu_state
[stopped_vcpu
];
729 vs
->hit_swbreak
= false;
733 report_next_stop
= true;
737 gdb_finish_suspend_vcpus(void)
743 } else if (report_next_stop
) {
744 assert(!response_pending());
746 send_pending_data(cur_fd
);
751 * vCPU threads invoke this function whenever the vCPU enters the
752 * debug server to pause or report an event. vCPU threads wait here
753 * as long as the debug server keeps them suspended.
756 _gdb_cpu_suspend(int vcpu
, bool report_stop
)
759 debug("$vCPU %d suspending\n", vcpu
);
760 CPU_SET(vcpu
, &vcpus_waiting
);
761 if (report_stop
&& CPU_CMP(&vcpus_waiting
, &vcpus_suspended
) == 0)
762 gdb_finish_suspend_vcpus();
763 while (CPU_ISSET(vcpu
, &vcpus_suspended
))
764 pthread_cond_wait(&idle_vcpus
, &gdb_lock
);
765 CPU_CLR(vcpu
, &vcpus_waiting
);
766 debug("$vCPU %d resuming\n", vcpu
);
770 * Invoked at the start of a vCPU thread's execution to inform the
771 * debug server about the new thread.
774 gdb_cpu_add(int vcpu
)
779 debug("$vCPU %d starting\n", vcpu
);
780 pthread_mutex_lock(&gdb_lock
);
781 assert(vcpu
< guest_ncpus
);
782 CPU_SET(vcpu
, &vcpus_active
);
783 if (!TAILQ_EMPTY(&breakpoints
)) {
784 vm_set_capability(ctx
, vcpu
, VM_CAP_BPT_EXIT
, 1);
785 debug("$vCPU %d enabled breakpoint exits\n", vcpu
);
789 * If a vcpu is added while vcpus are stopped, suspend the new
790 * vcpu so that it will pop back out with a debug exit before
791 * executing the first instruction.
793 if (!CPU_EMPTY(&vcpus_suspended
)) {
794 CPU_SET(vcpu
, &vcpus_suspended
);
795 _gdb_cpu_suspend(vcpu
, false);
797 pthread_mutex_unlock(&gdb_lock
);
801 * Invoked by vCPU before resuming execution. This enables stepping
802 * if the vCPU is marked as stepping.
805 gdb_cpu_resume(int vcpu
)
807 struct vcpu_state
*vs
;
810 vs
= &vcpu_state
[vcpu
];
813 * Any pending event should already be reported before
816 assert(vs
->hit_swbreak
== false);
817 assert(vs
->stepped
== false);
819 error
= vm_set_capability(ctx
, vcpu
, VM_CAP_MTRAP_EXIT
, 1);
825 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
826 * has been suspended due to an event on different vCPU or in response
827 * to a guest-wide suspend such as Ctrl-C or the stop on attach.
830 gdb_cpu_suspend(int vcpu
)
835 pthread_mutex_lock(&gdb_lock
);
836 _gdb_cpu_suspend(vcpu
, true);
837 gdb_cpu_resume(vcpu
);
838 pthread_mutex_unlock(&gdb_lock
);
842 gdb_suspend_vcpus(void)
845 assert(pthread_mutex_isowned_np(&gdb_lock
));
846 debug("suspending all CPUs\n");
847 vcpus_suspended
= vcpus_active
;
848 vm_suspend_cpu(ctx
, -1);
849 if (CPU_CMP(&vcpus_waiting
, &vcpus_suspended
) == 0)
850 gdb_finish_suspend_vcpus();
854 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
855 * the VT-x-specific MTRAP exit.
858 gdb_cpu_mtrap(int vcpu
)
860 struct vcpu_state
*vs
;
864 debug("$vCPU %d MTRAP\n", vcpu
);
865 pthread_mutex_lock(&gdb_lock
);
866 vs
= &vcpu_state
[vcpu
];
868 vs
->stepping
= false;
870 vm_set_capability(ctx
, vcpu
, VM_CAP_MTRAP_EXIT
, 0);
871 while (vs
->stepped
) {
872 if (stopped_vcpu
== -1) {
873 debug("$vCPU %d reporting step\n", vcpu
);
877 _gdb_cpu_suspend(vcpu
, true);
879 gdb_cpu_resume(vcpu
);
881 pthread_mutex_unlock(&gdb_lock
);
884 static struct breakpoint
*
885 find_breakpoint(uint64_t gpa
)
887 struct breakpoint
*bp
;
889 TAILQ_FOREACH(bp
, &breakpoints
, link
) {
897 gdb_cpu_breakpoint(int vcpu
, struct vm_exit
*vmexit
)
899 struct breakpoint
*bp
;
900 struct vcpu_state
*vs
;
905 fprintf(stderr
, "vm_loop: unexpected VMEXIT_DEBUG\n");
908 pthread_mutex_lock(&gdb_lock
);
909 error
= guest_vaddr2paddr(vcpu
, vmexit
->rip
, &gpa
);
911 bp
= find_breakpoint(gpa
);
913 vs
= &vcpu_state
[vcpu
];
914 assert(vs
->stepping
== false);
915 assert(vs
->stepped
== false);
916 assert(vs
->hit_swbreak
== false);
917 vs
->hit_swbreak
= true;
918 vm_set_register(ctx
, vcpu
, VM_REG_GUEST_RIP
, vmexit
->rip
);
920 if (stopped_vcpu
== -1) {
921 debug("$vCPU %d reporting breakpoint at rip %#lx\n", vcpu
,
926 _gdb_cpu_suspend(vcpu
, true);
927 if (!vs
->hit_swbreak
) {
928 /* Breakpoint reported. */
931 bp
= find_breakpoint(gpa
);
933 /* Breakpoint was removed. */
934 vs
->hit_swbreak
= false;
938 gdb_cpu_resume(vcpu
);
940 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpu
,
942 error
= vm_set_register(ctx
, vcpu
,
943 VM_REG_GUEST_ENTRY_INST_LENGTH
, vmexit
->u
.bpt
.inst_length
);
945 error
= vm_inject_exception(ctx
, vcpu
, IDT_BP
, 0, 0, 0);
948 pthread_mutex_unlock(&gdb_lock
);
952 gdb_step_vcpu(int vcpu
)
956 debug("$vCPU %d step\n", vcpu
);
957 error
= vm_get_capability(ctx
, vcpu
, VM_CAP_MTRAP_EXIT
, &val
);
962 vcpu_state
[vcpu
].stepping
= true;
963 vm_resume_cpu(ctx
, vcpu
);
964 CPU_CLR(vcpu
, &vcpus_suspended
);
965 pthread_cond_broadcast(&idle_vcpus
);
970 gdb_resume_vcpus(void)
973 assert(pthread_mutex_isowned_np(&gdb_lock
));
974 vm_resume_cpu(ctx
, -1);
975 debug("resuming all CPUs\n");
976 CPU_ZERO(&vcpus_suspended
);
977 pthread_cond_broadcast(&idle_vcpus
);
983 uint64_t regvals
[nitems(gdb_regset
)];
985 if (vm_get_register_set(ctx
, cur_vcpu
, nitems(gdb_regset
),
986 gdb_regset
, regvals
) == -1) {
991 for (size_t i
= 0; i
< nitems(regvals
); i
++)
992 append_unsigned_native(regvals
[i
], gdb_regsize
[i
]);
997 gdb_read_mem(const uint8_t *data
, size_t len
)
999 uint64_t gpa
, gva
, val
;
1001 size_t resid
, todo
, bytes
;
1009 /* Parse and consume address. */
1010 cp
= memchr(data
, ',', len
);
1011 if (cp
== NULL
|| cp
== data
) {
1015 gva
= parse_integer(data
, cp
- data
);
1016 len
-= (cp
- data
) + 1;
1017 data
+= (cp
- data
) + 1;
1020 resid
= parse_integer(data
, len
);
1024 error
= guest_vaddr2paddr(cur_vcpu
, gva
, &gpa
);
1040 /* Read bytes from current page. */
1041 todo
= getpagesize() - gpa
% getpagesize();
1045 cp
= paddr_guest2host(ctx
, gpa
, todo
);
1048 * If this page is guest RAM, read it a byte
1065 * If this page isn't guest RAM, try to handle
1066 * it via MMIO. For MMIO requests, use
1067 * aligned reads of words when possible.
1070 if (gpa
& 1 || todo
== 1)
1072 else if (gpa
& 2 || todo
== 2)
1076 error
= read_mem(ctx
, cur_vcpu
, gpa
, &val
,
1101 assert(resid
== 0 || gpa
% getpagesize() == 0);
1109 gdb_write_mem(const uint8_t *data
, size_t len
)
1111 uint64_t gpa
, gva
, val
;
1113 size_t resid
, todo
, bytes
;
1120 /* Parse and consume address. */
1121 cp
= memchr(data
, ',', len
);
1122 if (cp
== NULL
|| cp
== data
) {
1126 gva
= parse_integer(data
, cp
- data
);
1127 len
-= (cp
- data
) + 1;
1128 data
+= (cp
- data
) + 1;
1130 /* Parse and consume length. */
1131 cp
= memchr(data
, ':', len
);
1132 if (cp
== NULL
|| cp
== data
) {
1136 resid
= parse_integer(data
, cp
- data
);
1137 len
-= (cp
- data
) + 1;
1138 data
+= (cp
- data
) + 1;
1140 /* Verify the available bytes match the length. */
1141 if (len
!= resid
* 2) {
1147 error
= guest_vaddr2paddr(cur_vcpu
, gva
, &gpa
);
1157 /* Write bytes to current page. */
1158 todo
= getpagesize() - gpa
% getpagesize();
1162 cp
= paddr_guest2host(ctx
, gpa
, todo
);
1165 * If this page is guest RAM, write it a byte
1170 *cp
= parse_byte(data
);
1181 * If this page isn't guest RAM, try to handle
1182 * it via MMIO. For MMIO requests, use
1183 * aligned writes of words when possible.
1186 if (gpa
& 1 || todo
== 1) {
1188 val
= parse_byte(data
);
1189 } else if (gpa
& 2 || todo
== 2) {
1191 val
= be16toh(parse_integer(data
, 4));
1194 val
= be32toh(parse_integer(data
, 8));
1196 error
= write_mem(ctx
, cur_vcpu
, gpa
, val
,
1211 assert(resid
== 0 || gpa
% getpagesize() == 0);
1218 set_breakpoint_caps(bool enable
)
1223 mask
= vcpus_active
;
1224 while (!CPU_EMPTY(&mask
)) {
1225 vcpu
= CPU_FFS(&mask
) - 1;
1226 CPU_CLR(vcpu
, &mask
);
1227 if (vm_set_capability(ctx
, vcpu
, VM_CAP_BPT_EXIT
,
1228 enable
? 1 : 0) < 0)
1230 debug("$vCPU %d %sabled breakpoint exits\n", vcpu
,
1231 enable
? "en" : "dis");
1237 remove_all_sw_breakpoints(void)
1239 struct breakpoint
*bp
, *nbp
;
1242 if (TAILQ_EMPTY(&breakpoints
))
1245 TAILQ_FOREACH_SAFE(bp
, &breakpoints
, link
, nbp
) {
1246 debug("remove breakpoint at %#lx\n", bp
->gpa
);
1247 cp
= paddr_guest2host(ctx
, bp
->gpa
, 1);
1248 *cp
= bp
->shadow_inst
;
1249 TAILQ_REMOVE(&breakpoints
, bp
, link
);
1252 TAILQ_INIT(&breakpoints
);
1253 set_breakpoint_caps(false);
1257 update_sw_breakpoint(uint64_t gva
, int kind
, bool insert
)
1259 struct breakpoint
*bp
;
1269 error
= guest_vaddr2paddr(cur_vcpu
, gva
, &gpa
);
1279 cp
= paddr_guest2host(ctx
, gpa
, 1);
1281 /* Only permit breakpoints in guest RAM. */
1287 /* Find any existing breakpoint. */
1288 bp
= find_breakpoint(gpa
);
1291 * Silently ignore duplicate commands since the protocol
1292 * requires these packets to be idempotent.
1296 if (TAILQ_EMPTY(&breakpoints
) &&
1297 !set_breakpoint_caps(true)) {
1298 send_empty_response();
1301 bp
= malloc(sizeof(*bp
));
1303 bp
->shadow_inst
= *cp
;
1304 *cp
= 0xcc; /* INT 3 */
1305 TAILQ_INSERT_TAIL(&breakpoints
, bp
, link
);
1306 debug("new breakpoint at %#lx\n", gpa
);
1310 debug("remove breakpoint at %#lx\n", gpa
);
1311 *cp
= bp
->shadow_inst
;
1312 TAILQ_REMOVE(&breakpoints
, bp
, link
);
1314 if (TAILQ_EMPTY(&breakpoints
))
1315 set_breakpoint_caps(false);
1322 parse_breakpoint(const uint8_t *data
, size_t len
)
1329 insert
= data
[0] == 'Z';
1335 /* Parse and consume type. */
1336 cp
= memchr(data
, ',', len
);
1337 if (cp
== NULL
|| cp
== data
) {
1341 type
= parse_integer(data
, cp
- data
);
1342 len
-= (cp
- data
) + 1;
1343 data
+= (cp
- data
) + 1;
1345 /* Parse and consume address. */
1346 cp
= memchr(data
, ',', len
);
1347 if (cp
== NULL
|| cp
== data
) {
1351 gva
= parse_integer(data
, cp
- data
);
1352 len
-= (cp
- data
) + 1;
1353 data
+= (cp
- data
) + 1;
1355 /* Parse and consume kind. */
1356 cp
= memchr(data
, ';', len
);
1363 * We do not advertise support for either the
1364 * ConditionalBreakpoints or BreakpointCommands
1365 * features, so we should not be getting conditions or
1366 * commands from the remote end.
1368 send_empty_response();
1371 kind
= parse_integer(data
, len
);
1377 update_sw_breakpoint(gva
, kind
, insert
);
1380 send_empty_response();
1386 command_equals(const uint8_t *data
, size_t len
, const char *cmd
)
1389 if (strlen(cmd
) > len
)
1391 return (memcmp(data
, cmd
, strlen(cmd
)) == 0);
1395 check_features(const uint8_t *data
, size_t len
)
1397 char *feature
, *next_feature
, *str
, *value
;
1400 str
= malloc(len
+ 1);
1401 memcpy(str
, data
, len
);
1405 while ((feature
= strsep(&next_feature
, ";")) != NULL
) {
1407 * Null features shouldn't exist, but skip if they
1410 if (strcmp(feature
, "") == 0)
1414 * Look for the value or supported / not supported
1417 value
= strchr(feature
, '=');
1418 if (value
!= NULL
) {
1423 value
= feature
+ strlen(feature
) - 1;
1433 * This is really a protocol error,
1434 * but we just ignore malformed
1435 * features for ease of
1443 if (strcmp(feature
, "swbreak") == 0)
1444 swbreak_enabled
= supported
;
1448 * The compiler dislikes 'supported' being set but never used.
1449 * Make it happy here.
1452 debug("feature '%s' supported\n", feature
);
1454 #endif /* __FreeBSD__ */
1460 /* This is an arbitrary limit. */
1461 append_string("PacketSize=4096");
1462 append_string(";swbreak+");
1467 gdb_query(const uint8_t *data
, size_t len
)
1474 if (command_equals(data
, len
, "qAttached")) {
1478 } else if (command_equals(data
, len
, "qC")) {
1480 append_string("QC");
1481 append_integer(cur_vcpu
+ 1);
1483 } else if (command_equals(data
, len
, "qfThreadInfo")) {
1488 if (CPU_EMPTY(&vcpus_active
)) {
1492 mask
= vcpus_active
;
1496 while (!CPU_EMPTY(&mask
)) {
1497 vcpu
= CPU_FFS(&mask
) - 1;
1498 CPU_CLR(vcpu
, &mask
);
1503 append_integer(vcpu
+ 1);
1506 } else if (command_equals(data
, len
, "qsThreadInfo")) {
1510 } else if (command_equals(data
, len
, "qSupported")) {
1511 data
+= strlen("qSupported");
1512 len
-= strlen("qSupported");
1513 check_features(data
, len
);
1514 } else if (command_equals(data
, len
, "qThreadExtraInfo")) {
1518 data
+= strlen("qThreadExtraInfo");
1519 len
-= strlen("qThreadExtraInfo");
1524 tid
= parse_threadid(data
+ 1, len
- 1);
1525 if (tid
<= 0 || !CPU_ISSET(tid
- 1, &vcpus_active
)) {
1530 snprintf(buf
, sizeof(buf
), "vCPU %d", tid
- 1);
1532 append_asciihex(buf
);
1535 send_empty_response();
1539 handle_command(const uint8_t *data
, size_t len
)
1542 /* Reject packets with a sequence-id. */
1543 if (len
>= 3 && data
[0] >= '0' && data
[0] <= '9' &&
1544 data
[0] >= '0' && data
[0] <= '9' && data
[2] == ':') {
1545 send_empty_response();
1562 /* TODO: Resume any stopped CPUs. */
1571 if (data
[1] != 'g' && data
[1] != 'c') {
1575 tid
= parse_threadid(data
+ 2, len
- 2);
1581 if (CPU_EMPTY(&vcpus_active
)) {
1585 if (tid
== -1 || tid
== 0)
1586 cur_vcpu
= CPU_FFS(&vcpus_active
) - 1;
1587 else if (CPU_ISSET(tid
- 1, &vcpus_active
))
1597 gdb_read_mem(data
, len
);
1600 gdb_write_mem(data
, len
);
1605 tid
= parse_threadid(data
+ 1, len
- 1);
1606 if (tid
<= 0 || !CPU_ISSET(tid
- 1, &vcpus_active
)) {
1614 gdb_query(data
, len
);
1622 /* Don't send a reply until a stop occurs. */
1623 if (!gdb_step_vcpu(cur_vcpu
)) {
1624 send_error(EOPNOTSUPP
);
1630 parse_breakpoint(data
, len
);
1635 case 'G': /* TODO */
1637 /* Handle 'vCont' */
1639 case 'p': /* TODO */
1640 case 'P': /* TODO */
1641 case 'Q': /* TODO */
1642 case 't': /* TODO */
1643 case 'X': /* TODO */
1645 send_empty_response();
1649 /* Check for a valid packet in the command buffer. */
1651 check_command(int fd
)
1653 uint8_t *head
, *hash
, *p
, sum
;
1657 avail
= cur_comm
.len
;
1660 head
= io_buffer_head(&cur_comm
);
1663 debug("<- Ctrl-C\n");
1664 io_buffer_consume(&cur_comm
, 1);
1666 gdb_suspend_vcpus();
1669 /* ACK of previous response. */
1671 if (response_pending())
1672 io_buffer_reset(&cur_resp
);
1673 io_buffer_consume(&cur_comm
, 1);
1674 if (stopped_vcpu
!= -1 && report_next_stop
) {
1676 send_pending_data(fd
);
1680 /* NACK of previous response. */
1682 if (response_pending()) {
1683 cur_resp
.len
+= cur_resp
.start
;
1685 if (cur_resp
.data
[0] == '+')
1686 io_buffer_advance(&cur_resp
, 1);
1687 debug("-> %.*s\n", (int)cur_resp
.len
,
1688 io_buffer_head(&cur_resp
));
1690 io_buffer_consume(&cur_comm
, 1);
1691 send_pending_data(fd
);
1696 if (response_pending()) {
1697 warnx("New GDB command while response in "
1699 io_buffer_reset(&cur_resp
);
1702 /* Is packet complete? */
1703 hash
= memchr(head
, '#', avail
);
1706 plen
= (hash
- head
+ 1) + 2;
1709 debug("<- %.*s\n", (int)plen
, head
);
1711 /* Verify checksum. */
1712 for (sum
= 0, p
= head
+ 1; p
< hash
; p
++)
1714 if (sum
!= parse_byte(hash
+ 1)) {
1715 io_buffer_consume(&cur_comm
, plen
);
1718 send_pending_data(fd
);
1723 handle_command(head
+ 1, hash
- (head
+ 1));
1724 io_buffer_consume(&cur_comm
, plen
);
1725 if (!response_pending()) {
1728 send_pending_data(fd
);
1731 /* XXX: Possibly drop connection instead. */
1732 debug("-> %02x\n", *head
);
1733 io_buffer_consume(&cur_comm
, 1);
1740 gdb_readable(int fd
, enum ev_type event __unused
, void *arg __unused
)
1746 if (ioctl(fd
, FIONREAD
, &n
) == -1) {
1747 warn("FIONREAD on GDB socket");
1754 * 'pending' might be zero due to EOF. We need to call read
1755 * with a non-zero length to detect EOF.
1760 /* Ensure there is room in the command buffer. */
1761 io_buffer_grow(&cur_comm
, pending
);
1762 assert(io_buffer_avail(&cur_comm
) >= pending
);
1764 nread
= read(fd
, io_buffer_tail(&cur_comm
), io_buffer_avail(&cur_comm
));
1767 } else if (nread
== -1) {
1768 if (errno
== EAGAIN
)
1771 warn("Read from GDB socket");
1774 cur_comm
.len
+= nread
;
1775 pthread_mutex_lock(&gdb_lock
);
1777 pthread_mutex_unlock(&gdb_lock
);
1782 gdb_writable(int fd
, enum ev_type event __unused
, void *arg __unused
)
1785 send_pending_data(fd
);
1789 new_connection(int fd
, enum ev_type event __unused
, void *arg
)
1793 s
= accept4(fd
, NULL
, NULL
, SOCK_NONBLOCK
);
1796 err(1, "Failed accepting initial GDB connection");
1798 /* Silently ignore errors post-startup. */
1803 if (setsockopt(s
, SOL_SOCKET
, SO_NOSIGPIPE
, &optval
, sizeof(optval
)) ==
1805 warn("Failed to disable SIGPIPE for GDB connection");
1810 pthread_mutex_lock(&gdb_lock
);
1813 warnx("Ignoring additional GDB connection.");
1816 read_event
= mevent_add(s
, EVF_READ
, gdb_readable
, NULL
);
1817 if (read_event
== NULL
) {
1819 err(1, "Failed to setup initial GDB connection");
1820 pthread_mutex_unlock(&gdb_lock
);
1823 write_event
= mevent_add(s
, EVF_WRITE
, gdb_writable
, NULL
);
1824 if (write_event
== NULL
) {
1826 err(1, "Failed to setup initial GDB connection");
1827 mevent_delete_close(read_event
);
1835 /* Break on attach. */
1837 report_next_stop
= false;
1838 gdb_suspend_vcpus();
1839 pthread_mutex_unlock(&gdb_lock
);
1842 #ifndef WITHOUT_CAPSICUM
1844 limit_gdb_socket(int s
)
1846 cap_rights_t rights
;
1847 unsigned long ioctls
[] = { FIONREAD
};
1849 cap_rights_init(&rights
, CAP_ACCEPT
, CAP_EVENT
, CAP_READ
, CAP_WRITE
,
1850 CAP_SETSOCKOPT
, CAP_IOCTL
);
1851 if (caph_rights_limit(s
, &rights
) == -1)
1852 errx(EX_OSERR
, "Unable to apply rights for sandbox");
1853 if (caph_ioctls_limit(s
, ioctls
, nitems(ioctls
)) == -1)
1854 errx(EX_OSERR
, "Unable to apply rights for sandbox");
1861 * Equivalent to init_gdb() below, but without configuring the listening socket.
1862 * This will allow the bhyve process to tolerate mdb attaching/detaching from
1863 * the instance while it is running.
1866 init_mdb(struct vmctx
*_ctx
)
1871 wait
= get_config_bool_default("gdb.wait", false);
1873 error
= pthread_mutex_init(&gdb_lock
, NULL
);
1875 errc(1, error
, "gdb mutex init");
1876 error
= pthread_cond_init(&idle_vcpus
, NULL
);
1878 errc(1, error
, "gdb cv init");
1882 TAILQ_INIT(&breakpoints
);
1883 vcpu_state
= calloc(guest_ncpus
, sizeof(*vcpu_state
));
1886 * Set vcpu 0 in vcpus_suspended. This will trigger the
1887 * logic in gdb_cpu_add() to suspend the first vcpu before
1888 * it starts execution. The vcpu will remain suspended
1889 * until a debugger connects.
1891 CPU_SET(0, &vcpus_suspended
);
1898 init_gdb(struct vmctx
*_ctx
)
1900 int error
, flags
, optval
, s
;
1901 struct addrinfo hints
;
1902 struct addrinfo
*gdbaddr
;
1903 const char *saddr
, *value
;
1907 value
= get_config_value("gdb.port");
1910 sport
= strdup(value
);
1912 errx(4, "Failed to allocate memory");
1914 wait
= get_config_bool_default("gdb.wait", false);
1916 saddr
= get_config_value("gdb.address");
1917 if (saddr
== NULL
) {
1918 saddr
= "localhost";
1921 debug("==> starting on %s:%s, %swaiting\n",
1922 saddr
, sport
, wait
? "" : "not ");
1924 error
= pthread_mutex_init(&gdb_lock
, NULL
);
1926 errc(1, error
, "gdb mutex init");
1927 error
= pthread_cond_init(&idle_vcpus
, NULL
);
1929 errc(1, error
, "gdb cv init");
1931 memset(&hints
, 0, sizeof(hints
));
1932 hints
.ai_family
= AF_UNSPEC
;
1933 hints
.ai_socktype
= SOCK_STREAM
;
1934 hints
.ai_flags
= AI_NUMERICSERV
| AI_PASSIVE
;
1936 error
= getaddrinfo(saddr
, sport
, &hints
, &gdbaddr
);
1938 errx(1, "gdb address resolution: %s", gai_strerror(error
));
1941 s
= socket(gdbaddr
->ai_family
, gdbaddr
->ai_socktype
, 0);
1943 err(1, "gdb socket create");
1946 (void)setsockopt(s
, SOL_SOCKET
, SO_REUSEADDR
, &optval
, sizeof(optval
));
1948 if (bind(s
, gdbaddr
->ai_addr
, gdbaddr
->ai_addrlen
) < 0)
1949 err(1, "gdb socket bind");
1951 if (listen(s
, 1) < 0)
1952 err(1, "gdb socket listen");
1955 TAILQ_INIT(&breakpoints
);
1956 vcpu_state
= calloc(guest_ncpus
, sizeof(*vcpu_state
));
1959 * Set vcpu 0 in vcpus_suspended. This will trigger the
1960 * logic in gdb_cpu_add() to suspend the first vcpu before
1961 * it starts execution. The vcpu will remain suspended
1962 * until a debugger connects.
1964 CPU_SET(0, &vcpus_suspended
);
1968 flags
= fcntl(s
, F_GETFL
);
1969 if (fcntl(s
, F_SETFL
, flags
| O_NONBLOCK
) == -1)
1970 err(1, "Failed to mark gdb socket non-blocking");
1972 #ifndef WITHOUT_CAPSICUM
1973 limit_gdb_socket(s
);
1975 mevent_add(s
, EVF_READ
, new_connection
, NULL
);
1977 freeaddrinfo(gdbaddr
);