4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2018 Joyent, Inc.
30 * Debugger entry for both master and slave CPUs
34 #include <sys/types.h>
37 #include <sys/segments.h>
38 #include <sys/asm_linkage.h>
39 #include <sys/controlregs.h>
40 #include <sys/x86_archext.h>
41 #include <sys/privregs.h>
42 #include <sys/machprivregs.h>
43 #include <sys/kdi_regs.h>
45 #include <sys/uadmin.h>
47 #include <sys/hypervisor.h>
52 #include <kdi_assym.h>
55 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
56 #define GET_CPUSAVE_ADDR \
57 movzbq
%gs
:CPU_ID
, %rbx; \
59 movq $KRS_SIZE
, %rcx; \
61 movq $kdi_cpusave
, %rdx; \
66 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
67 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
68 * debugger through the trap handler. We don't want to clobber the saved IDT
69 * in the process, as we'd end up resuming the world on our IDT.
72 movq
%gs
:CPU_IDT
, %r11; \
73 leaq kdi_idt
(%rip
), %rsi; \
76 movq
%r11, KRS_IDT
(%rax
); \
77 movq
%gs
:CPU_GDT
, %r11; \
78 movq
%r11, KRS_GDT
(%rax
); \
83 #define SAVE_GSBASE(reg) /* nothing */
84 #define RESTORE_GSBASE(reg) /* nothing */
88 #define SAVE_GSBASE(base) \
89 movl $MSR_AMD_GSBASE
, %ecx; \
93 movq
%rdx
, REG_OFF
(KDIREG_GSBASE
)(base
)
95 #define RESTORE_GSBASE(base) \
96 movq REG_OFF
(KDIREG_GSBASE
)(base
), %rdx; \
99 movl $MSR_AMD_GSBASE
, %ecx; \
105 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. Note
106 * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
109 #define KDI_SAVE_REGS(base) \
110 movq
%rdi
, REG_OFF
(KDIREG_RDI
)(base
); \
111 movq
%rsi
, REG_OFF
(KDIREG_RSI
)(base
); \
112 movq
%rdx
, REG_OFF
(KDIREG_RDX
)(base
); \
113 movq
%rcx
, REG_OFF
(KDIREG_RCX
)(base
); \
114 movq
%r8, REG_OFF
(KDIREG_R8
)(base
); \
115 movq
%r9, REG_OFF
(KDIREG_R9
)(base
); \
116 movq
%rax
, REG_OFF
(KDIREG_RAX
)(base
); \
117 movq
%rbx
, REG_OFF
(KDIREG_RBX
)(base
); \
118 movq
%rbp
, REG_OFF
(KDIREG_RBP
)(base
); \
119 movq
%r10, REG_OFF
(KDIREG_R10
)(base
); \
120 movq
%r11, REG_OFF
(KDIREG_R11
)(base
); \
121 movq
%r12, REG_OFF
(KDIREG_R12
)(base
); \
122 movq
%r13, REG_OFF
(KDIREG_R13
)(base
); \
123 movq
%r14, REG_OFF
(KDIREG_R14
)(base
); \
124 movq
%r15, REG_OFF
(KDIREG_R15
)(base
); \
125 movq
%rbp
, REG_OFF
(KDIREG_SAVFP
)(base
); \
126 movq REG_OFF
(KDIREG_RIP
)(base
), %rax; \
127 movq
%rax
, REG_OFF
(KDIREG_SAVPC
)(base
); \
130 movq
%rax
, REG_OFF
(KDIREG_DS
)(base
); \
132 movq
%rax
, REG_OFF
(KDIREG_ES
)(base
); \
134 movq
%rax
, REG_OFF
(KDIREG_FS
)(base
); \
136 movq
%rax
, REG_OFF
(KDIREG_GS
)(base
); \
139 #define KDI_RESTORE_REGS(base) \
141 RESTORE_GSBASE
(%rdi
); \
142 movq REG_OFF
(KDIREG_ES
)(%rdi
), %rax; \
144 movq REG_OFF
(KDIREG_DS
)(%rdi
), %rax; \
146 movq REG_OFF
(KDIREG_R15
)(%rdi
), %r15; \
147 movq REG_OFF
(KDIREG_R14
)(%rdi
), %r14; \
148 movq REG_OFF
(KDIREG_R13
)(%rdi
), %r13; \
149 movq REG_OFF
(KDIREG_R12
)(%rdi
), %r12; \
150 movq REG_OFF
(KDIREG_R11
)(%rdi
), %r11; \
151 movq REG_OFF
(KDIREG_R10
)(%rdi
), %r10; \
152 movq REG_OFF
(KDIREG_RBP
)(%rdi
), %rbp; \
153 movq REG_OFF
(KDIREG_RBX
)(%rdi
), %rbx; \
154 movq REG_OFF
(KDIREG_RAX
)(%rdi
), %rax; \
155 movq REG_OFF
(KDIREG_R9
)(%rdi
), %r9; \
156 movq REG_OFF
(KDIREG_R8
)(%rdi
), %r8; \
157 movq REG_OFF
(KDIREG_RCX
)(%rdi
), %rcx; \
158 movq REG_OFF
(KDIREG_RDX
)(%rdi
), %rdx; \
159 movq REG_OFF
(KDIREG_RSI
)(%rdi
), %rsi; \
160 movq REG_OFF
(KDIREG_RDI
)(%rdi
), %rdi
163 * Given the address of the current CPU's cpusave area in %rax, the following
164 * macro restores the debugging state to said CPU. Restored state includes
165 * the debug registers from the global %dr variables.
167 * Takes the cpusave area in %rdi as a parameter.
169 #define KDI_RESTORE_DEBUGGING_STATE \
171 leaq kdi_drreg
(%rip
), %r15; \
173 movq DR_CTL
(%r15), %rsi; \
177 movq $KDIREG_DRSTAT_RESERVED
, %rsi; \
181 movq DRADDR_OFF
(0)(%r15), %rsi; \
184 movq DRADDR_OFF
(1)(%r15), %rsi; \
187 movq DRADDR_OFF
(2)(%r15), %rsi; \
190 movq DRADDR_OFF
(3)(%r15), %rsi; \
195 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
196 * The following macros manage the buffer.
199 /* Advance the ring buffer */
200 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
201 movq KRS_CURCRUMBIDX
(cpusave
), tmp1; \
202 cmpq $
[KDI_NCRUMBS
- 1], tmp1; \
204 /* Advance the pointer and index */ \
206 movq tmp1
, KRS_CURCRUMBIDX
(cpusave
); \
207 movq KRS_CURCRUMB
(cpusave
), tmp1; \
208 addq $KRM_SIZE
, tmp1; \
210 1: /* Reset the pointer and index */ \
211 movq $
0, KRS_CURCRUMBIDX
(cpusave
); \
212 leaq KRS_CRUMBS
(cpusave
), tmp1; \
213 2: movq tmp1
, KRS_CURCRUMB
(cpusave
); \
214 /* Clear the new crumb */ \
215 movq $KDI_NCRUMBS
, tmp2; \
216 3: movq $
0, -4(tmp1
, tmp2
, 4); \
220 /* Set a value in the current breadcrumb buffer */
221 #define ADD_CRUMB(cpusave, offset, value, tmp) \
222 movq KRS_CURCRUMB
(cpusave
), tmp; \
223 movq value
, offset
(tmp
)
234 /* XXX implement me */
241 * The main entry point for master CPUs. It also serves as the trap
242 * handler for all traps and interrupts taken during single-step.
245 ALTENTRY
(kdi_master_entry
)
251 /* Save current register state */
252 subq $REG_OFF
(KDIREG_TRAPNO
), %rsp
257 * Clear saved_upcall_mask in unused byte of cs slot on stack.
258 * It can only confuse things.
260 movb $
0, REG_OFF
(KDIREG_CS
)+4(%rsp
)
265 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
266 * KGSBASE can be trusted, as the kernel may or may not have already
267 * done a swapgs. All is not lost, as the kernel can divine the correct
268 * value for us. Note that the previous GSBASE is saved in the
269 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
270 * blown away. On the hypervisor, we don't need to do this, since it's
271 * ensured we're on our requested kernel GSBASE already.
275 movq
2(%rsp
), %rdi
/* gdt base now in %rdi */
277 call kdi_gdt2gsbase
/* returns kernel's GSBASE in %rax */
281 movl $MSR_AMD_GSBASE
, %ecx
285 GET_CPUSAVE_ADDR
/* %rax = cpusave, %rbx = CPU ID */
287 ADVANCE_CRUMB_POINTER
(%rax
, %rcx
, %rdx
)
289 ADD_CRUMB
(%rax
, KRM_CPU_STATE
, $KDI_CPU_STATE_MASTER
, %rdx
)
291 movq REG_OFF
(KDIREG_RIP
)(%rsp
), %rcx
292 ADD_CRUMB
(%rax
, KRM_PC
, %rcx
, %rdx
)
293 ADD_CRUMB
(%rax
, KRM_SP
, %rsp
, %rdx
)
294 movq REG_OFF
(KDIREG_TRAPNO
)(%rsp
), %rcx
295 ADD_CRUMB
(%rax
, KRM_TRAPNO
, %rcx
, %rdx
)
301 * Were we in the debugger when we took the trap (i.e. was %esp in one
302 * of the debugger's memory ranges)?
304 leaq kdi_memranges
, %rcx
305 movl kdi_nmemranges
, %edx
306 1: cmpq MR_BASE
(%rcx
), %rsp
307 jl
2f
/* below this range -- try the next one */
308 cmpq MR_LIM
(%rcx
), %rsp
309 jg
2f
/* above this range -- try the next one */
310 jmp
3f
/* matched within this range */
313 jz kdi_save_common_state
/* %rsp not within debugger memory */
318 * The master is still set. That should only happen if we hit a trap
319 * while running in the debugger. Note that it may be an intentional
320 * fault. kmdb_dpi_handle_fault will sort it all out.
323 movq REG_OFF
(KDIREG_TRAPNO
)(%rbp
), %rdi
324 movq REG_OFF
(KDIREG_RIP
)(%rbp
), %rsi
325 movq REG_OFF
(KDIREG_RSP
)(%rbp
), %rdx
326 movq
%rbx
, %rcx
/* cpuid */
328 call kdi_dvec_handle_fault
331 * If we're here, we ran into a debugger problem, and the user
332 * elected to solve it by having the debugger debug itself. The
333 * state we're about to save is that of the debugger when it took
337 jmp kdi_save_common_state
339 SET_SIZE
(kdi_master_entry
)
345 * The cross-call handler for slave CPUs.
347 * The debugger is single-threaded, so only one CPU, called the master, may be
348 * running it at any given time. The other CPUs, known as slaves, spin in a
349 * busy loop until there's something for them to do. This is the entry point
350 * for the slaves - they'll be sent here in response to a cross-call sent by the
356 kdi_slave_entry
(void
)
360 ENTRY_NP
(kdi_slave_entry
)
363 * Cross calls are implemented as function calls, so our stack currently
364 * looks like one you'd get from a zero-argument function call. That
365 * is, there's the return %rip at %rsp, and that's about it. We need
366 * to make it look like an interrupt stack. When we first save, we'll
367 * reverse the saved %ss and %rip, which we'll fix back up when we've
368 * freed up some general-purpose registers. We'll also need to fix up
372 pushq
%rsp
/* pushed value off by 8 */
378 pushq
%rax
/* rip should be here */
379 pushq $
-1 /* phony trap error code */
380 pushq $
-1 /* phony trap number */
382 subq $REG_OFF
(KDIREG_TRAPNO
), %rsp
385 movq REG_OFF
(KDIREG_SS
)(%rsp
), %rax
386 xchgq REG_OFF
(KDIREG_RIP
)(%rsp
), %rax
387 movq
%rax
, REG_OFF
(KDIREG_SS
)(%rsp
)
389 movq REG_OFF
(KDIREG_RSP
)(%rsp
), %rax
391 movq
%rax
, REG_OFF
(KDIREG_RSP
)(%rsp
)
394 * We've saved all of the general-purpose registers, and have a stack
395 * that is irettable (after we strip down to the error code)
398 GET_CPUSAVE_ADDR
/* %rax = cpusave, %rbx = CPU ID */
400 ADVANCE_CRUMB_POINTER
(%rax
, %rcx
, %rdx
)
402 ADD_CRUMB
(%rax
, KRM_CPU_STATE
, $KDI_CPU_STATE_SLAVE
, %rdx
)
404 movq REG_OFF
(KDIREG_RIP
)(%rsp
), %rcx
405 ADD_CRUMB
(%rax
, KRM_PC
, %rcx
, %rdx
)
408 jmp kdi_save_common_state
410 SET_SIZE
(kdi_slave_entry
)
415 * The state of the world:
417 * The stack has a complete set of saved registers and segment
418 * selectors, arranged in the kdi_regs.h order. It also has a pointer
419 * to our cpusave area.
421 * We need to save, into the cpusave area, a pointer to these saved
422 * registers. First we check whether we should jump straight back to
423 * the kernel. If not, we save a few more registers, ready the
424 * machine for debugger entry, and enter the debugger.
429 ENTRY_NP
(kdi_save_common_state
)
431 popq
%rdi
/* the cpusave area */
432 movq
%rsp
, KRS_GREGS
(%rdi
) /* save ptr to current saved regs */
437 je kdi_pass_to_kernel
438 popq
%rax
/* cpusave in %rax */
443 /* Save off %cr0, and clear write protect */
445 movq
%rcx
, KRS_CR0
(%rax
)
446 andq $_BITNOT
(CR0_WP
), %rcx
450 /* Save the debug registers and disable any active watchpoints */
452 movq
%rax
, %r15 /* save cpusave area ptr */
455 movq
%rax
, KRS_DRCTL
(%r15)
457 andq $_BITNOT
(KDIREG_DRCTL_WPALLEN_MASK
), %rax
464 movq
%rax
, KRS_DRSTAT
(%r15)
468 movq
%rax
, KRS_DROFF
(0)(%r15)
472 movq
%rax
, KRS_DROFF
(1)(%r15)
476 movq
%rax
, KRS_DROFF
(2)(%r15)
480 movq
%rax
, KRS_DROFF
(3)(%r15)
482 movq
%r15, %rax
/* restore cpu save area to rax */
484 clrq
%rbp
/* stack traces should end here */
487 movq
%rax
, %rdi
/* cpusave */
489 call kdi_debugger_entry
491 /* Pass cpusave to kdi_resume */
496 SET_SIZE
(kdi_save_common_state
)
501 * Resume the world. The code that calls kdi_resume has already
502 * decided whether or not to restore the IDT.
511 /* cpusave in %rdi */
515 * Send this CPU back into the world
518 movq KRS_CR0
(%rdi
), %rdx
522 KDI_RESTORE_DEBUGGING_STATE
524 movq KRS_GREGS
(%rdi
), %rsp
525 KDI_RESTORE_REGS
(%rsp
)
526 addq $REG_OFF
(KDIREG_RIP
), %rsp
/* Discard state, trapno, err */
535 ENTRY_NP
(kdi_pass_to_kernel
)
537 popq
%rdi
/* cpusave */
539 movq $KDI_CPU_STATE_NONE
, KRS_CPU_STATE
(%rdi
)
542 * Find the trap and vector off the right kernel handler. The trap
543 * handler will expect the stack to be in trap order, with %rip being
544 * the last entry, so we'll need to restore all our regs. On i86xpv
545 * we'll need to compensate for XPV_TRAP_POP.
547 * We're hard-coding the three cases where KMDB has installed permanent
548 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
549 * to work with; we can't use a global since other CPUs can easily pass
550 * through here at the same time.
552 * Note that we handle T_DBGENTR since userspace might have tried it.
554 movq KRS_GREGS
(%rdi
), %rsp
555 movq REG_OFF
(KDIREG_TRAPNO
)(%rsp
), %rdi
560 cmpq $T_DBGENTR
, %rdi
563 * Hmm, unknown handler. Somebody forgot to update this when they
564 * added a new trap interposition... try to drop back into kmdb.
568 #define CALL_TRAP_HANDLER(name) \
569 KDI_RESTORE_REGS
(%rsp
); \
570 /* Discard state, trapno, err */ \
571 addq $REG_OFF
(KDIREG_RIP
), %rsp; \
576 CALL_TRAP_HANDLER
(dbgtrap
)
579 CALL_TRAP_HANDLER
(brktrap
)
582 CALL_TRAP_HANDLER
(invaltrap
)
585 SET_SIZE
(kdi_pass_to_kernel
)
588 * A minimal version of mdboot(), to be used by the master CPU only.
593 movl $A_SHUTDOWN
, %esi
596 movl $SHUTDOWN_reboot
, %edi
597 call HYPERVISOR_shutdown
610 kdi_cpu_debug_init
(kdi_cpusave_t
*save
)
615 ENTRY_NP
(kdi_cpu_debug_init
)
619 pushq
%rbx
/* macro will clobber %rbx */
620 KDI_RESTORE_DEBUGGING_STATE
626 SET_SIZE
(kdi_cpu_debug_init
)