15325 bhyve upstream sync 2023 January
[illumos-gate.git] / usr / src / cmd / bhyve / gdb.c
blob05b14b4b8abb02fb27f5105bb6693f6ac0d58964
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017-2018 John H. Baldwin <jhb@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
31 #include <sys/param.h>
32 #ifndef WITHOUT_CAPSICUM
33 #include <sys/capsicum.h>
34 #endif
35 #ifdef __FreeBSD__
36 #include <sys/endian.h>
37 #else
38 #include <endian.h>
39 #endif
40 #include <sys/ioctl.h>
41 #include <sys/mman.h>
42 #include <sys/queue.h>
43 #include <sys/socket.h>
44 #include <machine/atomic.h>
45 #include <machine/specialreg.h>
46 #include <machine/vmm.h>
47 #include <netinet/in.h>
48 #include <assert.h>
49 #ifndef WITHOUT_CAPSICUM
50 #include <capsicum_helpers.h>
51 #endif
52 #include <err.h>
53 #include <errno.h>
54 #include <fcntl.h>
55 #include <netdb.h>
56 #include <pthread.h>
57 #include <pthread_np.h>
58 #include <stdbool.h>
59 #include <stdio.h>
60 #include <stdlib.h>
61 #include <string.h>
62 #include <sysexits.h>
63 #include <unistd.h>
64 #include <vmmapi.h>
66 #include "bhyverun.h"
67 #include "config.h"
68 #include "gdb.h"
69 #include "mem.h"
70 #include "mevent.h"
73 * GDB_SIGNAL_* numbers are part of the GDB remote protocol. Most stops
74 * use SIGTRAP.
76 #define GDB_SIGNAL_TRAP 5
78 static void gdb_resume_vcpus(void);
79 static void check_command(int fd);
81 static struct mevent *read_event, *write_event;
83 static cpuset_t vcpus_active, vcpus_suspended, vcpus_waiting;
84 static pthread_mutex_t gdb_lock;
85 static pthread_cond_t idle_vcpus;
86 static bool first_stop, report_next_stop, swbreak_enabled;
89 * An I/O buffer contains 'capacity' bytes of room at 'data'. For a
90 * read buffer, 'start' is unused and 'len' contains the number of
91 * valid bytes in the buffer. For a write buffer, 'start' is set to
92 * the index of the next byte in 'data' to send, and 'len' contains
93 * the remaining number of valid bytes to send.
95 struct io_buffer {
96 uint8_t *data;
97 size_t capacity;
98 size_t start;
99 size_t len;
102 struct breakpoint {
103 uint64_t gpa;
104 uint8_t shadow_inst;
105 TAILQ_ENTRY(breakpoint) link;
109 * When a vCPU stops to due to an event that should be reported to the
110 * debugger, information about the event is stored in this structure.
111 * The vCPU thread then sets 'stopped_vcpu' if it is not already set
112 * and stops other vCPUs so the event can be reported. The
113 * report_stop() function reports the event for the 'stopped_vcpu'
114 * vCPU. When the debugger resumes execution via continue or step,
115 * the event for 'stopped_vcpu' is cleared. vCPUs will loop in their
116 * event handlers until the associated event is reported or disabled.
118 * An idle vCPU will have all of the boolean fields set to false.
120 * When a vCPU is stepped, 'stepping' is set to true when the vCPU is
121 * released to execute the stepped instruction. When the vCPU reports
122 * the stepping trap, 'stepped' is set.
124 * When a vCPU hits a breakpoint set by the debug server,
125 * 'hit_swbreak' is set to true.
127 struct vcpu_state {
128 bool stepping;
129 bool stepped;
130 bool hit_swbreak;
133 static struct io_buffer cur_comm, cur_resp;
134 static uint8_t cur_csum;
135 static struct vmctx *ctx;
136 static int cur_fd = -1;
137 static TAILQ_HEAD(, breakpoint) breakpoints;
138 static struct vcpu_state *vcpu_state;
139 static int cur_vcpu, stopped_vcpu;
140 static bool gdb_active = false;
142 static const int gdb_regset[] = {
143 VM_REG_GUEST_RAX,
144 VM_REG_GUEST_RBX,
145 VM_REG_GUEST_RCX,
146 VM_REG_GUEST_RDX,
147 VM_REG_GUEST_RSI,
148 VM_REG_GUEST_RDI,
149 VM_REG_GUEST_RBP,
150 VM_REG_GUEST_RSP,
151 VM_REG_GUEST_R8,
152 VM_REG_GUEST_R9,
153 VM_REG_GUEST_R10,
154 VM_REG_GUEST_R11,
155 VM_REG_GUEST_R12,
156 VM_REG_GUEST_R13,
157 VM_REG_GUEST_R14,
158 VM_REG_GUEST_R15,
159 VM_REG_GUEST_RIP,
160 VM_REG_GUEST_RFLAGS,
161 VM_REG_GUEST_CS,
162 VM_REG_GUEST_SS,
163 VM_REG_GUEST_DS,
164 VM_REG_GUEST_ES,
165 VM_REG_GUEST_FS,
166 VM_REG_GUEST_GS
169 static const int gdb_regsize[] = {
196 #ifdef GDB_LOG
197 #include <stdarg.h>
198 #include <stdio.h>
200 static void __printflike(1, 2)
201 debug(const char *fmt, ...)
203 static FILE *logfile;
204 va_list ap;
206 if (logfile == NULL) {
207 logfile = fopen("/tmp/bhyve_gdb.log", "w");
208 if (logfile == NULL)
209 return;
210 #ifndef WITHOUT_CAPSICUM
211 if (caph_limit_stream(fileno(logfile), CAPH_WRITE) == -1) {
212 fclose(logfile);
213 logfile = NULL;
214 return;
216 #endif
217 setlinebuf(logfile);
219 va_start(ap, fmt);
220 vfprintf(logfile, fmt, ap);
221 va_end(ap);
223 #else
224 #ifndef __FreeBSD__
226 * A totally empty debug() makes the compiler grumpy due to how its used with
227 * some control flow here.
229 #define debug(...) do { } while (0)
230 #else
231 #define debug(...)
232 #endif
233 #endif
235 static void remove_all_sw_breakpoints(void);
237 static int
238 guest_paging_info(int vcpu, struct vm_guest_paging *paging)
240 uint64_t regs[4];
241 const int regset[4] = {
242 VM_REG_GUEST_CR0,
243 VM_REG_GUEST_CR3,
244 VM_REG_GUEST_CR4,
245 VM_REG_GUEST_EFER
248 if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
249 return (-1);
252 * For the debugger, always pretend to be the kernel (CPL 0),
253 * and if long-mode is enabled, always parse addresses as if
254 * in 64-bit mode.
256 paging->cr3 = regs[1];
257 paging->cpl = 0;
258 if (regs[3] & EFER_LMA)
259 paging->cpu_mode = CPU_MODE_64BIT;
260 else if (regs[0] & CR0_PE)
261 paging->cpu_mode = CPU_MODE_PROTECTED;
262 else
263 paging->cpu_mode = CPU_MODE_REAL;
264 if (!(regs[0] & CR0_PG))
265 paging->paging_mode = PAGING_MODE_FLAT;
266 else if (!(regs[2] & CR4_PAE))
267 paging->paging_mode = PAGING_MODE_32;
268 else if (regs[3] & EFER_LME)
269 paging->paging_mode = PAGING_MODE_64;
270 else
271 paging->paging_mode = PAGING_MODE_PAE;
272 return (0);
276 * Map a guest virtual address to a physical address (for a given vcpu).
277 * If a guest virtual address is valid, return 1. If the address is
278 * not valid, return 0. If an error occurs obtaining the mapping,
279 * return -1.
281 static int
282 guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
284 struct vm_guest_paging paging;
285 int fault;
287 if (guest_paging_info(vcpu, &paging) == -1)
288 return (-1);
291 * Always use PROT_READ. We really care if the VA is
292 * accessible, not if the current vCPU can write.
294 if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
295 &fault) == -1)
296 return (-1);
297 if (fault)
298 return (0);
299 return (1);
302 static void
303 io_buffer_reset(struct io_buffer *io)
306 io->start = 0;
307 io->len = 0;
310 /* Available room for adding data. */
311 static size_t
312 io_buffer_avail(struct io_buffer *io)
315 return (io->capacity - (io->start + io->len));
318 static uint8_t *
319 io_buffer_head(struct io_buffer *io)
322 return (io->data + io->start);
325 static uint8_t *
326 io_buffer_tail(struct io_buffer *io)
329 return (io->data + io->start + io->len);
332 static void
333 io_buffer_advance(struct io_buffer *io, size_t amount)
336 assert(amount <= io->len);
337 io->start += amount;
338 io->len -= amount;
341 static void
342 io_buffer_consume(struct io_buffer *io, size_t amount)
345 io_buffer_advance(io, amount);
346 if (io->len == 0) {
347 io->start = 0;
348 return;
352 * XXX: Consider making this move optional and compacting on a
353 * future read() before realloc().
355 memmove(io->data, io_buffer_head(io), io->len);
356 io->start = 0;
359 static void
360 io_buffer_grow(struct io_buffer *io, size_t newsize)
362 uint8_t *new_data;
363 size_t avail, new_cap;
365 avail = io_buffer_avail(io);
366 if (newsize <= avail)
367 return;
369 new_cap = io->capacity + (newsize - avail);
370 new_data = realloc(io->data, new_cap);
371 if (new_data == NULL)
372 err(1, "Failed to grow GDB I/O buffer");
373 io->data = new_data;
374 io->capacity = new_cap;
377 static bool
378 response_pending(void)
381 if (cur_resp.start == 0 && cur_resp.len == 0)
382 return (false);
383 if (cur_resp.start + cur_resp.len == 1 && cur_resp.data[0] == '+')
384 return (false);
385 return (true);
388 static void
389 close_connection(void)
393 * XXX: This triggers a warning because mevent does the close
394 * before the EV_DELETE.
396 pthread_mutex_lock(&gdb_lock);
397 mevent_delete(write_event);
398 mevent_delete_close(read_event);
399 write_event = NULL;
400 read_event = NULL;
401 io_buffer_reset(&cur_comm);
402 io_buffer_reset(&cur_resp);
403 cur_fd = -1;
405 remove_all_sw_breakpoints();
407 /* Clear any pending events. */
408 memset(vcpu_state, 0, guest_ncpus * sizeof(*vcpu_state));
410 /* Resume any stopped vCPUs. */
411 gdb_resume_vcpus();
412 pthread_mutex_unlock(&gdb_lock);
415 static uint8_t
416 hex_digit(uint8_t nibble)
419 if (nibble <= 9)
420 return (nibble + '0');
421 else
422 return (nibble + 'a' - 10);
425 static uint8_t
426 parse_digit(uint8_t v)
429 if (v >= '0' && v <= '9')
430 return (v - '0');
431 if (v >= 'a' && v <= 'f')
432 return (v - 'a' + 10);
433 if (v >= 'A' && v <= 'F')
434 return (v - 'A' + 10);
435 return (0xF);
438 /* Parses big-endian hexadecimal. */
439 static uintmax_t
440 parse_integer(const uint8_t *p, size_t len)
442 uintmax_t v;
444 v = 0;
445 while (len > 0) {
446 v <<= 4;
447 v |= parse_digit(*p);
448 p++;
449 len--;
451 return (v);
454 static uint8_t
455 parse_byte(const uint8_t *p)
458 return (parse_digit(p[0]) << 4 | parse_digit(p[1]));
461 static void
462 send_pending_data(int fd)
464 ssize_t nwritten;
466 if (cur_resp.len == 0) {
467 mevent_disable(write_event);
468 return;
470 nwritten = write(fd, io_buffer_head(&cur_resp), cur_resp.len);
471 if (nwritten == -1) {
472 warn("Write to GDB socket failed");
473 close_connection();
474 } else {
475 io_buffer_advance(&cur_resp, nwritten);
476 if (cur_resp.len == 0)
477 mevent_disable(write_event);
478 else
479 mevent_enable(write_event);
483 /* Append a single character to the output buffer. */
484 static void
485 send_char(uint8_t data)
487 io_buffer_grow(&cur_resp, 1);
488 *io_buffer_tail(&cur_resp) = data;
489 cur_resp.len++;
492 /* Append an array of bytes to the output buffer. */
493 static void
494 send_data(const uint8_t *data, size_t len)
497 io_buffer_grow(&cur_resp, len);
498 memcpy(io_buffer_tail(&cur_resp), data, len);
499 cur_resp.len += len;
502 static void
503 format_byte(uint8_t v, uint8_t *buf)
506 buf[0] = hex_digit(v >> 4);
507 buf[1] = hex_digit(v & 0xf);
511 * Append a single byte (formatted as two hex characters) to the
512 * output buffer.
514 static void
515 send_byte(uint8_t v)
517 uint8_t buf[2];
519 format_byte(v, buf);
520 send_data(buf, sizeof(buf));
523 static void
524 start_packet(void)
527 send_char('$');
528 cur_csum = 0;
531 static void
532 finish_packet(void)
535 send_char('#');
536 send_byte(cur_csum);
537 debug("-> %.*s\n", (int)cur_resp.len, io_buffer_head(&cur_resp));
541 * Append a single character (for the packet payload) and update the
542 * checksum.
544 static void
545 append_char(uint8_t v)
548 send_char(v);
549 cur_csum += v;
553 * Append an array of bytes (for the packet payload) and update the
554 * checksum.
556 static void
557 append_packet_data(const uint8_t *data, size_t len)
560 send_data(data, len);
561 while (len > 0) {
562 cur_csum += *data;
563 data++;
564 len--;
568 static void
569 append_string(const char *str)
572 #ifdef __FreeBSD__
573 append_packet_data(str, strlen(str));
574 #else
575 append_packet_data((const uint8_t *)str, strlen(str));
576 #endif
579 static void
580 append_byte(uint8_t v)
582 uint8_t buf[2];
584 format_byte(v, buf);
585 append_packet_data(buf, sizeof(buf));
588 static void
589 append_unsigned_native(uintmax_t value, size_t len)
591 size_t i;
593 for (i = 0; i < len; i++) {
594 append_byte(value);
595 value >>= 8;
599 static void
600 append_unsigned_be(uintmax_t value, size_t len)
602 char buf[len * 2];
603 size_t i;
605 for (i = 0; i < len; i++) {
606 #ifdef __FreeBSD__
607 format_byte(value, buf + (len - i - 1) * 2);
608 #else
609 format_byte(value, (uint8_t *)(buf + (len - i - 1) * 2));
610 #endif
611 value >>= 8;
613 #ifdef __FreeBSD__
614 append_packet_data(buf, sizeof(buf));
615 #else
616 append_packet_data((const uint8_t *)buf, sizeof(buf));
617 #endif
620 static void
621 append_integer(unsigned int value)
624 if (value == 0)
625 append_char('0');
626 else
627 append_unsigned_be(value, (fls(value) + 7) / 8);
630 static void
631 append_asciihex(const char *str)
634 while (*str != '\0') {
635 append_byte(*str);
636 str++;
640 static void
641 send_empty_response(void)
644 start_packet();
645 finish_packet();
648 static void
649 send_error(int error)
652 start_packet();
653 append_char('E');
654 append_byte(error);
655 finish_packet();
658 static void
659 send_ok(void)
662 start_packet();
663 append_string("OK");
664 finish_packet();
667 static int
668 parse_threadid(const uint8_t *data, size_t len)
671 if (len == 1 && *data == '0')
672 return (0);
673 if (len == 2 && memcmp(data, "-1", 2) == 0)
674 return (-1);
675 if (len == 0)
676 return (-2);
677 return (parse_integer(data, len));
681 * Report the current stop event to the debugger. If the stop is due
682 * to an event triggered on a specific vCPU such as a breakpoint or
683 * stepping trap, stopped_vcpu will be set to the vCPU triggering the
684 * stop. If 'set_cur_vcpu' is true, then cur_vcpu will be updated to
685 * the reporting vCPU for vCPU events.
687 static void
688 report_stop(bool set_cur_vcpu)
690 struct vcpu_state *vs;
692 start_packet();
693 if (stopped_vcpu == -1) {
694 append_char('S');
695 append_byte(GDB_SIGNAL_TRAP);
696 } else {
697 vs = &vcpu_state[stopped_vcpu];
698 if (set_cur_vcpu)
699 cur_vcpu = stopped_vcpu;
700 append_char('T');
701 append_byte(GDB_SIGNAL_TRAP);
702 append_string("thread:");
703 append_integer(stopped_vcpu + 1);
704 append_char(';');
705 if (vs->hit_swbreak) {
706 debug("$vCPU %d reporting swbreak\n", stopped_vcpu);
707 if (swbreak_enabled)
708 append_string("swbreak:;");
709 } else if (vs->stepped)
710 debug("$vCPU %d reporting step\n", stopped_vcpu);
711 else
712 debug("$vCPU %d reporting ???\n", stopped_vcpu);
714 finish_packet();
715 report_next_stop = false;
719 * If this stop is due to a vCPU event, clear that event to mark it as
720 * acknowledged.
722 static void
723 discard_stop(void)
725 struct vcpu_state *vs;
727 if (stopped_vcpu != -1) {
728 vs = &vcpu_state[stopped_vcpu];
729 vs->hit_swbreak = false;
730 vs->stepped = false;
731 stopped_vcpu = -1;
733 report_next_stop = true;
736 static void
737 gdb_finish_suspend_vcpus(void)
740 if (first_stop) {
741 first_stop = false;
742 stopped_vcpu = -1;
743 } else if (report_next_stop) {
744 assert(!response_pending());
745 report_stop(true);
746 send_pending_data(cur_fd);
751 * vCPU threads invoke this function whenever the vCPU enters the
752 * debug server to pause or report an event. vCPU threads wait here
753 * as long as the debug server keeps them suspended.
755 static void
756 _gdb_cpu_suspend(int vcpu, bool report_stop)
759 debug("$vCPU %d suspending\n", vcpu);
760 CPU_SET(vcpu, &vcpus_waiting);
761 if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
762 gdb_finish_suspend_vcpus();
763 while (CPU_ISSET(vcpu, &vcpus_suspended))
764 pthread_cond_wait(&idle_vcpus, &gdb_lock);
765 CPU_CLR(vcpu, &vcpus_waiting);
766 debug("$vCPU %d resuming\n", vcpu);
770 * Invoked at the start of a vCPU thread's execution to inform the
771 * debug server about the new thread.
773 void
774 gdb_cpu_add(int vcpu)
777 if (!gdb_active)
778 return;
779 debug("$vCPU %d starting\n", vcpu);
780 pthread_mutex_lock(&gdb_lock);
781 assert(vcpu < guest_ncpus);
782 CPU_SET(vcpu, &vcpus_active);
783 if (!TAILQ_EMPTY(&breakpoints)) {
784 vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT, 1);
785 debug("$vCPU %d enabled breakpoint exits\n", vcpu);
789 * If a vcpu is added while vcpus are stopped, suspend the new
790 * vcpu so that it will pop back out with a debug exit before
791 * executing the first instruction.
793 if (!CPU_EMPTY(&vcpus_suspended)) {
794 CPU_SET(vcpu, &vcpus_suspended);
795 _gdb_cpu_suspend(vcpu, false);
797 pthread_mutex_unlock(&gdb_lock);
801 * Invoked by vCPU before resuming execution. This enables stepping
802 * if the vCPU is marked as stepping.
804 static void
805 gdb_cpu_resume(int vcpu)
807 struct vcpu_state *vs;
808 int error;
810 vs = &vcpu_state[vcpu];
813 * Any pending event should already be reported before
814 * resuming.
816 assert(vs->hit_swbreak == false);
817 assert(vs->stepped == false);
818 if (vs->stepping) {
819 error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
820 assert(error == 0);
825 * Handler for VM_EXITCODE_DEBUG used to suspend a vCPU when the guest
826 * has been suspended due to an event on different vCPU or in response
827 * to a guest-wide suspend such as Ctrl-C or the stop on attach.
829 void
830 gdb_cpu_suspend(int vcpu)
833 if (!gdb_active)
834 return;
835 pthread_mutex_lock(&gdb_lock);
836 _gdb_cpu_suspend(vcpu, true);
837 gdb_cpu_resume(vcpu);
838 pthread_mutex_unlock(&gdb_lock);
841 static void
842 gdb_suspend_vcpus(void)
845 assert(pthread_mutex_isowned_np(&gdb_lock));
846 debug("suspending all CPUs\n");
847 vcpus_suspended = vcpus_active;
848 vm_suspend_cpu(ctx, -1);
849 if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
850 gdb_finish_suspend_vcpus();
854 * Handler for VM_EXITCODE_MTRAP reported when a vCPU single-steps via
855 * the VT-x-specific MTRAP exit.
857 void
858 gdb_cpu_mtrap(int vcpu)
860 struct vcpu_state *vs;
862 if (!gdb_active)
863 return;
864 debug("$vCPU %d MTRAP\n", vcpu);
865 pthread_mutex_lock(&gdb_lock);
866 vs = &vcpu_state[vcpu];
867 if (vs->stepping) {
868 vs->stepping = false;
869 vs->stepped = true;
870 vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
871 while (vs->stepped) {
872 if (stopped_vcpu == -1) {
873 debug("$vCPU %d reporting step\n", vcpu);
874 stopped_vcpu = vcpu;
875 gdb_suspend_vcpus();
877 _gdb_cpu_suspend(vcpu, true);
879 gdb_cpu_resume(vcpu);
881 pthread_mutex_unlock(&gdb_lock);
884 static struct breakpoint *
885 find_breakpoint(uint64_t gpa)
887 struct breakpoint *bp;
889 TAILQ_FOREACH(bp, &breakpoints, link) {
890 if (bp->gpa == gpa)
891 return (bp);
893 return (NULL);
896 void
897 gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit)
899 struct breakpoint *bp;
900 struct vcpu_state *vs;
901 uint64_t gpa;
902 int error;
904 if (!gdb_active) {
905 fprintf(stderr, "vm_loop: unexpected VMEXIT_DEBUG\n");
906 exit(4);
908 pthread_mutex_lock(&gdb_lock);
909 error = guest_vaddr2paddr(vcpu, vmexit->rip, &gpa);
910 assert(error == 1);
911 bp = find_breakpoint(gpa);
912 if (bp != NULL) {
913 vs = &vcpu_state[vcpu];
914 assert(vs->stepping == false);
915 assert(vs->stepped == false);
916 assert(vs->hit_swbreak == false);
917 vs->hit_swbreak = true;
918 vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, vmexit->rip);
919 for (;;) {
920 if (stopped_vcpu == -1) {
921 debug("$vCPU %d reporting breakpoint at rip %#lx\n", vcpu,
922 vmexit->rip);
923 stopped_vcpu = vcpu;
924 gdb_suspend_vcpus();
926 _gdb_cpu_suspend(vcpu, true);
927 if (!vs->hit_swbreak) {
928 /* Breakpoint reported. */
929 break;
931 bp = find_breakpoint(gpa);
932 if (bp == NULL) {
933 /* Breakpoint was removed. */
934 vs->hit_swbreak = false;
935 break;
938 gdb_cpu_resume(vcpu);
939 } else {
940 debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpu,
941 vmexit->rip);
942 error = vm_set_register(ctx, vcpu,
943 VM_REG_GUEST_ENTRY_INST_LENGTH, vmexit->u.bpt.inst_length);
944 assert(error == 0);
945 error = vm_inject_exception(ctx, vcpu, IDT_BP, 0, 0, 0);
946 assert(error == 0);
948 pthread_mutex_unlock(&gdb_lock);
951 static bool
952 gdb_step_vcpu(int vcpu)
954 int error, val;
956 debug("$vCPU %d step\n", vcpu);
957 error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
958 if (error < 0)
959 return (false);
961 discard_stop();
962 vcpu_state[vcpu].stepping = true;
963 vm_resume_cpu(ctx, vcpu);
964 CPU_CLR(vcpu, &vcpus_suspended);
965 pthread_cond_broadcast(&idle_vcpus);
966 return (true);
969 static void
970 gdb_resume_vcpus(void)
973 assert(pthread_mutex_isowned_np(&gdb_lock));
974 vm_resume_cpu(ctx, -1);
975 debug("resuming all CPUs\n");
976 CPU_ZERO(&vcpus_suspended);
977 pthread_cond_broadcast(&idle_vcpus);
980 static void
981 gdb_read_regs(void)
983 uint64_t regvals[nitems(gdb_regset)];
985 if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
986 gdb_regset, regvals) == -1) {
987 send_error(errno);
988 return;
990 start_packet();
991 for (size_t i = 0; i < nitems(regvals); i++)
992 append_unsigned_native(regvals[i], gdb_regsize[i]);
993 finish_packet();
996 static void
997 gdb_read_mem(const uint8_t *data, size_t len)
999 uint64_t gpa, gva, val;
1000 uint8_t *cp;
1001 size_t resid, todo, bytes;
1002 bool started;
1003 int error;
1005 /* Skip 'm' */
1006 data += 1;
1007 len -= 1;
1009 /* Parse and consume address. */
1010 cp = memchr(data, ',', len);
1011 if (cp == NULL || cp == data) {
1012 send_error(EINVAL);
1013 return;
1015 gva = parse_integer(data, cp - data);
1016 len -= (cp - data) + 1;
1017 data += (cp - data) + 1;
1019 /* Parse length. */
1020 resid = parse_integer(data, len);
1022 started = false;
1023 while (resid > 0) {
1024 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1025 if (error == -1) {
1026 if (started)
1027 finish_packet();
1028 else
1029 send_error(errno);
1030 return;
1032 if (error == 0) {
1033 if (started)
1034 finish_packet();
1035 else
1036 send_error(EFAULT);
1037 return;
1040 /* Read bytes from current page. */
1041 todo = getpagesize() - gpa % getpagesize();
1042 if (todo > resid)
1043 todo = resid;
1045 cp = paddr_guest2host(ctx, gpa, todo);
1046 if (cp != NULL) {
1048 * If this page is guest RAM, read it a byte
1049 * at a time.
1051 if (!started) {
1052 start_packet();
1053 started = true;
1055 while (todo > 0) {
1056 append_byte(*cp);
1057 cp++;
1058 gpa++;
1059 gva++;
1060 resid--;
1061 todo--;
1063 } else {
1065 * If this page isn't guest RAM, try to handle
1066 * it via MMIO. For MMIO requests, use
1067 * aligned reads of words when possible.
1069 while (todo > 0) {
1070 if (gpa & 1 || todo == 1)
1071 bytes = 1;
1072 else if (gpa & 2 || todo == 2)
1073 bytes = 2;
1074 else
1075 bytes = 4;
1076 error = read_mem(ctx, cur_vcpu, gpa, &val,
1077 bytes);
1078 if (error == 0) {
1079 if (!started) {
1080 start_packet();
1081 started = true;
1083 gpa += bytes;
1084 gva += bytes;
1085 resid -= bytes;
1086 todo -= bytes;
1087 while (bytes > 0) {
1088 append_byte(val);
1089 val >>= 8;
1090 bytes--;
1092 } else {
1093 if (started)
1094 finish_packet();
1095 else
1096 send_error(EFAULT);
1097 return;
1101 assert(resid == 0 || gpa % getpagesize() == 0);
1103 if (!started)
1104 start_packet();
1105 finish_packet();
1108 static void
1109 gdb_write_mem(const uint8_t *data, size_t len)
1111 uint64_t gpa, gva, val;
1112 uint8_t *cp;
1113 size_t resid, todo, bytes;
1114 int error;
1116 /* Skip 'M' */
1117 data += 1;
1118 len -= 1;
1120 /* Parse and consume address. */
1121 cp = memchr(data, ',', len);
1122 if (cp == NULL || cp == data) {
1123 send_error(EINVAL);
1124 return;
1126 gva = parse_integer(data, cp - data);
1127 len -= (cp - data) + 1;
1128 data += (cp - data) + 1;
1130 /* Parse and consume length. */
1131 cp = memchr(data, ':', len);
1132 if (cp == NULL || cp == data) {
1133 send_error(EINVAL);
1134 return;
1136 resid = parse_integer(data, cp - data);
1137 len -= (cp - data) + 1;
1138 data += (cp - data) + 1;
1140 /* Verify the available bytes match the length. */
1141 if (len != resid * 2) {
1142 send_error(EINVAL);
1143 return;
1146 while (resid > 0) {
1147 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1148 if (error == -1) {
1149 send_error(errno);
1150 return;
1152 if (error == 0) {
1153 send_error(EFAULT);
1154 return;
1157 /* Write bytes to current page. */
1158 todo = getpagesize() - gpa % getpagesize();
1159 if (todo > resid)
1160 todo = resid;
1162 cp = paddr_guest2host(ctx, gpa, todo);
1163 if (cp != NULL) {
1165 * If this page is guest RAM, write it a byte
1166 * at a time.
1168 while (todo > 0) {
1169 assert(len >= 2);
1170 *cp = parse_byte(data);
1171 data += 2;
1172 len -= 2;
1173 cp++;
1174 gpa++;
1175 gva++;
1176 resid--;
1177 todo--;
1179 } else {
1181 * If this page isn't guest RAM, try to handle
1182 * it via MMIO. For MMIO requests, use
1183 * aligned writes of words when possible.
1185 while (todo > 0) {
1186 if (gpa & 1 || todo == 1) {
1187 bytes = 1;
1188 val = parse_byte(data);
1189 } else if (gpa & 2 || todo == 2) {
1190 bytes = 2;
1191 val = be16toh(parse_integer(data, 4));
1192 } else {
1193 bytes = 4;
1194 val = be32toh(parse_integer(data, 8));
1196 error = write_mem(ctx, cur_vcpu, gpa, val,
1197 bytes);
1198 if (error == 0) {
1199 gpa += bytes;
1200 gva += bytes;
1201 resid -= bytes;
1202 todo -= bytes;
1203 data += 2 * bytes;
1204 len -= 2 * bytes;
1205 } else {
1206 send_error(EFAULT);
1207 return;
1211 assert(resid == 0 || gpa % getpagesize() == 0);
1213 assert(len == 0);
1214 send_ok();
1217 static bool
1218 set_breakpoint_caps(bool enable)
1220 cpuset_t mask;
1221 int vcpu;
1223 mask = vcpus_active;
1224 while (!CPU_EMPTY(&mask)) {
1225 vcpu = CPU_FFS(&mask) - 1;
1226 CPU_CLR(vcpu, &mask);
1227 if (vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT,
1228 enable ? 1 : 0) < 0)
1229 return (false);
1230 debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
1231 enable ? "en" : "dis");
1233 return (true);
1236 static void
1237 remove_all_sw_breakpoints(void)
1239 struct breakpoint *bp, *nbp;
1240 uint8_t *cp;
1242 if (TAILQ_EMPTY(&breakpoints))
1243 return;
1245 TAILQ_FOREACH_SAFE(bp, &breakpoints, link, nbp) {
1246 debug("remove breakpoint at %#lx\n", bp->gpa);
1247 cp = paddr_guest2host(ctx, bp->gpa, 1);
1248 *cp = bp->shadow_inst;
1249 TAILQ_REMOVE(&breakpoints, bp, link);
1250 free(bp);
1252 TAILQ_INIT(&breakpoints);
1253 set_breakpoint_caps(false);
1256 static void
1257 update_sw_breakpoint(uint64_t gva, int kind, bool insert)
1259 struct breakpoint *bp;
1260 uint64_t gpa;
1261 uint8_t *cp;
1262 int error;
1264 if (kind != 1) {
1265 send_error(EINVAL);
1266 return;
1269 error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
1270 if (error == -1) {
1271 send_error(errno);
1272 return;
1274 if (error == 0) {
1275 send_error(EFAULT);
1276 return;
1279 cp = paddr_guest2host(ctx, gpa, 1);
1281 /* Only permit breakpoints in guest RAM. */
1282 if (cp == NULL) {
1283 send_error(EFAULT);
1284 return;
1287 /* Find any existing breakpoint. */
1288 bp = find_breakpoint(gpa);
1291 * Silently ignore duplicate commands since the protocol
1292 * requires these packets to be idempotent.
1294 if (insert) {
1295 if (bp == NULL) {
1296 if (TAILQ_EMPTY(&breakpoints) &&
1297 !set_breakpoint_caps(true)) {
1298 send_empty_response();
1299 return;
1301 bp = malloc(sizeof(*bp));
1302 bp->gpa = gpa;
1303 bp->shadow_inst = *cp;
1304 *cp = 0xcc; /* INT 3 */
1305 TAILQ_INSERT_TAIL(&breakpoints, bp, link);
1306 debug("new breakpoint at %#lx\n", gpa);
1308 } else {
1309 if (bp != NULL) {
1310 debug("remove breakpoint at %#lx\n", gpa);
1311 *cp = bp->shadow_inst;
1312 TAILQ_REMOVE(&breakpoints, bp, link);
1313 free(bp);
1314 if (TAILQ_EMPTY(&breakpoints))
1315 set_breakpoint_caps(false);
1318 send_ok();
1321 static void
1322 parse_breakpoint(const uint8_t *data, size_t len)
1324 uint64_t gva;
1325 uint8_t *cp;
1326 bool insert;
1327 int kind, type;
1329 insert = data[0] == 'Z';
1331 /* Skip 'Z/z' */
1332 data += 1;
1333 len -= 1;
1335 /* Parse and consume type. */
1336 cp = memchr(data, ',', len);
1337 if (cp == NULL || cp == data) {
1338 send_error(EINVAL);
1339 return;
1341 type = parse_integer(data, cp - data);
1342 len -= (cp - data) + 1;
1343 data += (cp - data) + 1;
1345 /* Parse and consume address. */
1346 cp = memchr(data, ',', len);
1347 if (cp == NULL || cp == data) {
1348 send_error(EINVAL);
1349 return;
1351 gva = parse_integer(data, cp - data);
1352 len -= (cp - data) + 1;
1353 data += (cp - data) + 1;
1355 /* Parse and consume kind. */
1356 cp = memchr(data, ';', len);
1357 if (cp == data) {
1358 send_error(EINVAL);
1359 return;
1361 if (cp != NULL) {
1363 * We do not advertise support for either the
1364 * ConditionalBreakpoints or BreakpointCommands
1365 * features, so we should not be getting conditions or
1366 * commands from the remote end.
1368 send_empty_response();
1369 return;
1371 kind = parse_integer(data, len);
1372 data += len;
1373 len = 0;
1375 switch (type) {
1376 case 0:
1377 update_sw_breakpoint(gva, kind, insert);
1378 break;
1379 default:
1380 send_empty_response();
1381 break;
1385 static bool
1386 command_equals(const uint8_t *data, size_t len, const char *cmd)
1389 if (strlen(cmd) > len)
1390 return (false);
1391 return (memcmp(data, cmd, strlen(cmd)) == 0);
1394 static void
1395 check_features(const uint8_t *data, size_t len)
1397 char *feature, *next_feature, *str, *value;
1398 bool supported;
1400 str = malloc(len + 1);
1401 memcpy(str, data, len);
1402 str[len] = '\0';
1403 next_feature = str;
1405 while ((feature = strsep(&next_feature, ";")) != NULL) {
1407 * Null features shouldn't exist, but skip if they
1408 * do.
1410 if (strcmp(feature, "") == 0)
1411 continue;
1414 * Look for the value or supported / not supported
1415 * flag.
1417 value = strchr(feature, '=');
1418 if (value != NULL) {
1419 *value = '\0';
1420 value++;
1421 supported = true;
1422 } else {
1423 value = feature + strlen(feature) - 1;
1424 switch (*value) {
1425 case '+':
1426 supported = true;
1427 break;
1428 case '-':
1429 supported = false;
1430 break;
1431 default:
1433 * This is really a protocol error,
1434 * but we just ignore malformed
1435 * features for ease of
1436 * implementation.
1438 continue;
1440 value = NULL;
1443 if (strcmp(feature, "swbreak") == 0)
1444 swbreak_enabled = supported;
1446 #ifndef __FreeBSD__
1448 * The compiler dislikes 'supported' being set but never used.
1449 * Make it happy here.
1451 if (supported) {
1452 debug("feature '%s' supported\n", feature);
1454 #endif /* __FreeBSD__ */
1456 free(str);
1458 start_packet();
1460 /* This is an arbitrary limit. */
1461 append_string("PacketSize=4096");
1462 append_string(";swbreak+");
1463 finish_packet();
1466 static void
1467 gdb_query(const uint8_t *data, size_t len)
1471 * TODO:
1472 * - qSearch
1474 if (command_equals(data, len, "qAttached")) {
1475 start_packet();
1476 append_char('1');
1477 finish_packet();
1478 } else if (command_equals(data, len, "qC")) {
1479 start_packet();
1480 append_string("QC");
1481 append_integer(cur_vcpu + 1);
1482 finish_packet();
1483 } else if (command_equals(data, len, "qfThreadInfo")) {
1484 cpuset_t mask;
1485 bool first;
1486 int vcpu;
1488 if (CPU_EMPTY(&vcpus_active)) {
1489 send_error(EINVAL);
1490 return;
1492 mask = vcpus_active;
1493 start_packet();
1494 append_char('m');
1495 first = true;
1496 while (!CPU_EMPTY(&mask)) {
1497 vcpu = CPU_FFS(&mask) - 1;
1498 CPU_CLR(vcpu, &mask);
1499 if (first)
1500 first = false;
1501 else
1502 append_char(',');
1503 append_integer(vcpu + 1);
1505 finish_packet();
1506 } else if (command_equals(data, len, "qsThreadInfo")) {
1507 start_packet();
1508 append_char('l');
1509 finish_packet();
1510 } else if (command_equals(data, len, "qSupported")) {
1511 data += strlen("qSupported");
1512 len -= strlen("qSupported");
1513 check_features(data, len);
1514 } else if (command_equals(data, len, "qThreadExtraInfo")) {
1515 char buf[16];
1516 int tid;
1518 data += strlen("qThreadExtraInfo");
1519 len -= strlen("qThreadExtraInfo");
1520 if (*data != ',') {
1521 send_error(EINVAL);
1522 return;
1524 tid = parse_threadid(data + 1, len - 1);
1525 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1526 send_error(EINVAL);
1527 return;
1530 snprintf(buf, sizeof(buf), "vCPU %d", tid - 1);
1531 start_packet();
1532 append_asciihex(buf);
1533 finish_packet();
1534 } else
1535 send_empty_response();
1538 static void
1539 handle_command(const uint8_t *data, size_t len)
1542 /* Reject packets with a sequence-id. */
1543 if (len >= 3 && data[0] >= '0' && data[0] <= '9' &&
1544 data[0] >= '0' && data[0] <= '9' && data[2] == ':') {
1545 send_empty_response();
1546 return;
1549 switch (*data) {
1550 case 'c':
1551 if (len != 1) {
1552 send_error(EINVAL);
1553 break;
1556 discard_stop();
1557 gdb_resume_vcpus();
1558 break;
1559 case 'D':
1560 send_ok();
1562 /* TODO: Resume any stopped CPUs. */
1563 break;
1564 case 'g': {
1565 gdb_read_regs();
1566 break;
1568 case 'H': {
1569 int tid;
1571 if (data[1] != 'g' && data[1] != 'c') {
1572 send_error(EINVAL);
1573 break;
1575 tid = parse_threadid(data + 2, len - 2);
1576 if (tid == -2) {
1577 send_error(EINVAL);
1578 break;
1581 if (CPU_EMPTY(&vcpus_active)) {
1582 send_error(EINVAL);
1583 break;
1585 if (tid == -1 || tid == 0)
1586 cur_vcpu = CPU_FFS(&vcpus_active) - 1;
1587 else if (CPU_ISSET(tid - 1, &vcpus_active))
1588 cur_vcpu = tid - 1;
1589 else {
1590 send_error(EINVAL);
1591 break;
1593 send_ok();
1594 break;
1596 case 'm':
1597 gdb_read_mem(data, len);
1598 break;
1599 case 'M':
1600 gdb_write_mem(data, len);
1601 break;
1602 case 'T': {
1603 int tid;
1605 tid = parse_threadid(data + 1, len - 1);
1606 if (tid <= 0 || !CPU_ISSET(tid - 1, &vcpus_active)) {
1607 send_error(EINVAL);
1608 return;
1610 send_ok();
1611 break;
1613 case 'q':
1614 gdb_query(data, len);
1615 break;
1616 case 's':
1617 if (len != 1) {
1618 send_error(EINVAL);
1619 break;
1622 /* Don't send a reply until a stop occurs. */
1623 if (!gdb_step_vcpu(cur_vcpu)) {
1624 send_error(EOPNOTSUPP);
1625 break;
1627 break;
1628 case 'z':
1629 case 'Z':
1630 parse_breakpoint(data, len);
1631 break;
1632 case '?':
1633 report_stop(false);
1634 break;
1635 case 'G': /* TODO */
1636 case 'v':
1637 /* Handle 'vCont' */
1638 /* 'vCtrlC' */
1639 case 'p': /* TODO */
1640 case 'P': /* TODO */
1641 case 'Q': /* TODO */
1642 case 't': /* TODO */
1643 case 'X': /* TODO */
1644 default:
1645 send_empty_response();
1649 /* Check for a valid packet in the command buffer. */
1650 static void
1651 check_command(int fd)
1653 uint8_t *head, *hash, *p, sum;
1654 size_t avail, plen;
1656 for (;;) {
1657 avail = cur_comm.len;
1658 if (avail == 0)
1659 return;
1660 head = io_buffer_head(&cur_comm);
1661 switch (*head) {
1662 case 0x03:
1663 debug("<- Ctrl-C\n");
1664 io_buffer_consume(&cur_comm, 1);
1666 gdb_suspend_vcpus();
1667 break;
1668 case '+':
1669 /* ACK of previous response. */
1670 debug("<- +\n");
1671 if (response_pending())
1672 io_buffer_reset(&cur_resp);
1673 io_buffer_consume(&cur_comm, 1);
1674 if (stopped_vcpu != -1 && report_next_stop) {
1675 report_stop(true);
1676 send_pending_data(fd);
1678 break;
1679 case '-':
1680 /* NACK of previous response. */
1681 debug("<- -\n");
1682 if (response_pending()) {
1683 cur_resp.len += cur_resp.start;
1684 cur_resp.start = 0;
1685 if (cur_resp.data[0] == '+')
1686 io_buffer_advance(&cur_resp, 1);
1687 debug("-> %.*s\n", (int)cur_resp.len,
1688 io_buffer_head(&cur_resp));
1690 io_buffer_consume(&cur_comm, 1);
1691 send_pending_data(fd);
1692 break;
1693 case '$':
1694 /* Packet. */
1696 if (response_pending()) {
1697 warnx("New GDB command while response in "
1698 "progress");
1699 io_buffer_reset(&cur_resp);
1702 /* Is packet complete? */
1703 hash = memchr(head, '#', avail);
1704 if (hash == NULL)
1705 return;
1706 plen = (hash - head + 1) + 2;
1707 if (avail < plen)
1708 return;
1709 debug("<- %.*s\n", (int)plen, head);
1711 /* Verify checksum. */
1712 for (sum = 0, p = head + 1; p < hash; p++)
1713 sum += *p;
1714 if (sum != parse_byte(hash + 1)) {
1715 io_buffer_consume(&cur_comm, plen);
1716 debug("-> -\n");
1717 send_char('-');
1718 send_pending_data(fd);
1719 break;
1721 send_char('+');
1723 handle_command(head + 1, hash - (head + 1));
1724 io_buffer_consume(&cur_comm, plen);
1725 if (!response_pending()) {
1726 debug("-> +\n");
1728 send_pending_data(fd);
1729 break;
1730 default:
1731 /* XXX: Possibly drop connection instead. */
1732 debug("-> %02x\n", *head);
1733 io_buffer_consume(&cur_comm, 1);
1734 break;
1739 static void
1740 gdb_readable(int fd, enum ev_type event __unused, void *arg __unused)
1742 size_t pending;
1743 ssize_t nread;
1744 int n;
1746 if (ioctl(fd, FIONREAD, &n) == -1) {
1747 warn("FIONREAD on GDB socket");
1748 return;
1750 assert(n >= 0);
1751 pending = n;
1754 * 'pending' might be zero due to EOF. We need to call read
1755 * with a non-zero length to detect EOF.
1757 if (pending == 0)
1758 pending = 1;
1760 /* Ensure there is room in the command buffer. */
1761 io_buffer_grow(&cur_comm, pending);
1762 assert(io_buffer_avail(&cur_comm) >= pending);
1764 nread = read(fd, io_buffer_tail(&cur_comm), io_buffer_avail(&cur_comm));
1765 if (nread == 0) {
1766 close_connection();
1767 } else if (nread == -1) {
1768 if (errno == EAGAIN)
1769 return;
1771 warn("Read from GDB socket");
1772 close_connection();
1773 } else {
1774 cur_comm.len += nread;
1775 pthread_mutex_lock(&gdb_lock);
1776 check_command(fd);
1777 pthread_mutex_unlock(&gdb_lock);
1781 static void
1782 gdb_writable(int fd, enum ev_type event __unused, void *arg __unused)
1785 send_pending_data(fd);
1788 static void
1789 new_connection(int fd, enum ev_type event __unused, void *arg)
1791 int optval, s;
1793 s = accept4(fd, NULL, NULL, SOCK_NONBLOCK);
1794 if (s == -1) {
1795 if (arg != NULL)
1796 err(1, "Failed accepting initial GDB connection");
1798 /* Silently ignore errors post-startup. */
1799 return;
1802 optval = 1;
1803 if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &optval, sizeof(optval)) ==
1804 -1) {
1805 warn("Failed to disable SIGPIPE for GDB connection");
1806 close(s);
1807 return;
1810 pthread_mutex_lock(&gdb_lock);
1811 if (cur_fd != -1) {
1812 close(s);
1813 warnx("Ignoring additional GDB connection.");
1816 read_event = mevent_add(s, EVF_READ, gdb_readable, NULL);
1817 if (read_event == NULL) {
1818 if (arg != NULL)
1819 err(1, "Failed to setup initial GDB connection");
1820 pthread_mutex_unlock(&gdb_lock);
1821 return;
1823 write_event = mevent_add(s, EVF_WRITE, gdb_writable, NULL);
1824 if (write_event == NULL) {
1825 if (arg != NULL)
1826 err(1, "Failed to setup initial GDB connection");
1827 mevent_delete_close(read_event);
1828 read_event = NULL;
1831 cur_fd = s;
1832 cur_vcpu = 0;
1833 stopped_vcpu = -1;
1835 /* Break on attach. */
1836 first_stop = true;
1837 report_next_stop = false;
1838 gdb_suspend_vcpus();
1839 pthread_mutex_unlock(&gdb_lock);
1842 #ifndef WITHOUT_CAPSICUM
1843 static void
1844 limit_gdb_socket(int s)
1846 cap_rights_t rights;
1847 unsigned long ioctls[] = { FIONREAD };
1849 cap_rights_init(&rights, CAP_ACCEPT, CAP_EVENT, CAP_READ, CAP_WRITE,
1850 CAP_SETSOCKOPT, CAP_IOCTL);
1851 if (caph_rights_limit(s, &rights) == -1)
1852 errx(EX_OSERR, "Unable to apply rights for sandbox");
1853 if (caph_ioctls_limit(s, ioctls, nitems(ioctls)) == -1)
1854 errx(EX_OSERR, "Unable to apply rights for sandbox");
1856 #endif
1859 #ifndef __FreeBSD__
1861 * Equivalent to init_gdb() below, but without configuring the listening socket.
1862 * This will allow the bhyve process to tolerate mdb attaching/detaching from
1863 * the instance while it is running.
1865 void
1866 init_mdb(struct vmctx *_ctx)
1868 int error;
1869 bool wait;
1871 wait = get_config_bool_default("gdb.wait", false);
1873 error = pthread_mutex_init(&gdb_lock, NULL);
1874 if (error != 0)
1875 errc(1, error, "gdb mutex init");
1876 error = pthread_cond_init(&idle_vcpus, NULL);
1877 if (error != 0)
1878 errc(1, error, "gdb cv init");
1880 ctx = _ctx;
1881 stopped_vcpu = -1;
1882 TAILQ_INIT(&breakpoints);
1883 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1884 if (wait) {
1886 * Set vcpu 0 in vcpus_suspended. This will trigger the
1887 * logic in gdb_cpu_add() to suspend the first vcpu before
1888 * it starts execution. The vcpu will remain suspended
1889 * until a debugger connects.
1891 CPU_SET(0, &vcpus_suspended);
1892 stopped_vcpu = 0;
1895 #endif
1897 void
1898 init_gdb(struct vmctx *_ctx)
1900 int error, flags, optval, s;
1901 struct addrinfo hints;
1902 struct addrinfo *gdbaddr;
1903 const char *saddr, *value;
1904 char *sport;
1905 bool wait;
1907 value = get_config_value("gdb.port");
1908 if (value == NULL)
1909 return;
1910 sport = strdup(value);
1911 if (sport == NULL)
1912 errx(4, "Failed to allocate memory");
1914 wait = get_config_bool_default("gdb.wait", false);
1916 saddr = get_config_value("gdb.address");
1917 if (saddr == NULL) {
1918 saddr = "localhost";
1921 debug("==> starting on %s:%s, %swaiting\n",
1922 saddr, sport, wait ? "" : "not ");
1924 error = pthread_mutex_init(&gdb_lock, NULL);
1925 if (error != 0)
1926 errc(1, error, "gdb mutex init");
1927 error = pthread_cond_init(&idle_vcpus, NULL);
1928 if (error != 0)
1929 errc(1, error, "gdb cv init");
1931 memset(&hints, 0, sizeof(hints));
1932 hints.ai_family = AF_UNSPEC;
1933 hints.ai_socktype = SOCK_STREAM;
1934 hints.ai_flags = AI_NUMERICSERV | AI_PASSIVE;
1936 error = getaddrinfo(saddr, sport, &hints, &gdbaddr);
1937 if (error != 0)
1938 errx(1, "gdb address resolution: %s", gai_strerror(error));
1940 ctx = _ctx;
1941 s = socket(gdbaddr->ai_family, gdbaddr->ai_socktype, 0);
1942 if (s < 0)
1943 err(1, "gdb socket create");
1945 optval = 1;
1946 (void)setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
1948 if (bind(s, gdbaddr->ai_addr, gdbaddr->ai_addrlen) < 0)
1949 err(1, "gdb socket bind");
1951 if (listen(s, 1) < 0)
1952 err(1, "gdb socket listen");
1954 stopped_vcpu = -1;
1955 TAILQ_INIT(&breakpoints);
1956 vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
1957 if (wait) {
1959 * Set vcpu 0 in vcpus_suspended. This will trigger the
1960 * logic in gdb_cpu_add() to suspend the first vcpu before
1961 * it starts execution. The vcpu will remain suspended
1962 * until a debugger connects.
1964 CPU_SET(0, &vcpus_suspended);
1965 stopped_vcpu = 0;
1968 flags = fcntl(s, F_GETFL);
1969 if (fcntl(s, F_SETFL, flags | O_NONBLOCK) == -1)
1970 err(1, "Failed to mark gdb socket non-blocking");
1972 #ifndef WITHOUT_CAPSICUM
1973 limit_gdb_socket(s);
1974 #endif
1975 mevent_add(s, EVF_READ, new_connection, NULL);
1976 gdb_active = true;
1977 freeaddrinfo(gdbaddr);
1978 free(sport);