9210 remove KMDB branch debugging support
[unleashed.git] / usr / src / uts / intel / kdi / amd64 / kdi_asm.s
blobea6f404af4c09b212ebda4b45fa4dfda9b274e9e
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright 2018 Joyent, Inc.
30 * Debugger entry for both master and slave CPUs
33 #if defined(__lint)
34 #include <sys/types.h>
35 #endif
37 #include <sys/segments.h>
38 #include <sys/asm_linkage.h>
39 #include <sys/controlregs.h>
40 #include <sys/x86_archext.h>
41 #include <sys/privregs.h>
42 #include <sys/machprivregs.h>
43 #include <sys/kdi_regs.h>
44 #include <sys/psw.h>
45 #include <sys/uadmin.h>
46 #ifdef __xpv
47 #include <sys/hypervisor.h>
48 #endif
50 #ifdef _ASM
52 #include <kdi_assym.h>
53 #include <assym.h>
55 /* clobbers %rdx, %rcx, returns addr in %rax, CPU ID in %rbx */
56 #define GET_CPUSAVE_ADDR \
57 movzbq %gs:CPU_ID, %rbx; \
58 movq %rbx, %rax; \
59 movq $KRS_SIZE, %rcx; \
60 mulq %rcx; \
61 movq $kdi_cpusave, %rdx; \
62 /*CSTYLED*/ \
63 addq (%rdx), %rax
66 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
67 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
68 * debugger through the trap handler. We don't want to clobber the saved IDT
69 * in the process, as we'd end up resuming the world on our IDT.
71 #define SAVE_IDTGDT \
72 movq %gs:CPU_IDT, %r11; \
73 leaq kdi_idt(%rip), %rsi; \
74 cmpq %rsi, %r11; \
75 je 1f; \
76 movq %r11, KRS_IDT(%rax); \
77 movq %gs:CPU_GDT, %r11; \
78 movq %r11, KRS_GDT(%rax); \
81 #ifdef __xpv
83 #define SAVE_GSBASE(reg) /* nothing */
84 #define RESTORE_GSBASE(reg) /* nothing */
86 #else
88 #define SAVE_GSBASE(base) \
89 movl $MSR_AMD_GSBASE, %ecx; \
90 rdmsr; \
91 shlq $32, %rdx; \
92 orq %rax, %rdx; \
93 movq %rdx, REG_OFF(KDIREG_GSBASE)(base)
95 #define RESTORE_GSBASE(base) \
96 movq REG_OFF(KDIREG_GSBASE)(base), %rdx; \
97 movq %rdx, %rax; \
98 shrq $32, %rdx; \
99 movl $MSR_AMD_GSBASE, %ecx; \
100 wrmsr
102 #endif /* __xpv */
105 * %ss, %rsp, %rflags, %cs, %rip, %err, %trapno are already on the stack. Note
106 * that on the hypervisor, we skip the save/restore of GSBASE: it's slow, and
107 * unnecessary.
109 #define KDI_SAVE_REGS(base) \
110 movq %rdi, REG_OFF(KDIREG_RDI)(base); \
111 movq %rsi, REG_OFF(KDIREG_RSI)(base); \
112 movq %rdx, REG_OFF(KDIREG_RDX)(base); \
113 movq %rcx, REG_OFF(KDIREG_RCX)(base); \
114 movq %r8, REG_OFF(KDIREG_R8)(base); \
115 movq %r9, REG_OFF(KDIREG_R9)(base); \
116 movq %rax, REG_OFF(KDIREG_RAX)(base); \
117 movq %rbx, REG_OFF(KDIREG_RBX)(base); \
118 movq %rbp, REG_OFF(KDIREG_RBP)(base); \
119 movq %r10, REG_OFF(KDIREG_R10)(base); \
120 movq %r11, REG_OFF(KDIREG_R11)(base); \
121 movq %r12, REG_OFF(KDIREG_R12)(base); \
122 movq %r13, REG_OFF(KDIREG_R13)(base); \
123 movq %r14, REG_OFF(KDIREG_R14)(base); \
124 movq %r15, REG_OFF(KDIREG_R15)(base); \
125 movq %rbp, REG_OFF(KDIREG_SAVFP)(base); \
126 movq REG_OFF(KDIREG_RIP)(base), %rax; \
127 movq %rax, REG_OFF(KDIREG_SAVPC)(base); \
128 clrq %rax; \
129 movw %ds, %ax; \
130 movq %rax, REG_OFF(KDIREG_DS)(base); \
131 movw %es, %ax; \
132 movq %rax, REG_OFF(KDIREG_ES)(base); \
133 movw %fs, %ax; \
134 movq %rax, REG_OFF(KDIREG_FS)(base); \
135 movw %gs, %ax; \
136 movq %rax, REG_OFF(KDIREG_GS)(base); \
137 SAVE_GSBASE(base)
139 #define KDI_RESTORE_REGS(base) \
140 movq base, %rdi; \
141 RESTORE_GSBASE(%rdi); \
142 movq REG_OFF(KDIREG_ES)(%rdi), %rax; \
143 movw %ax, %es; \
144 movq REG_OFF(KDIREG_DS)(%rdi), %rax; \
145 movw %ax, %ds; \
146 movq REG_OFF(KDIREG_R15)(%rdi), %r15; \
147 movq REG_OFF(KDIREG_R14)(%rdi), %r14; \
148 movq REG_OFF(KDIREG_R13)(%rdi), %r13; \
149 movq REG_OFF(KDIREG_R12)(%rdi), %r12; \
150 movq REG_OFF(KDIREG_R11)(%rdi), %r11; \
151 movq REG_OFF(KDIREG_R10)(%rdi), %r10; \
152 movq REG_OFF(KDIREG_RBP)(%rdi), %rbp; \
153 movq REG_OFF(KDIREG_RBX)(%rdi), %rbx; \
154 movq REG_OFF(KDIREG_RAX)(%rdi), %rax; \
155 movq REG_OFF(KDIREG_R9)(%rdi), %r9; \
156 movq REG_OFF(KDIREG_R8)(%rdi), %r8; \
157 movq REG_OFF(KDIREG_RCX)(%rdi), %rcx; \
158 movq REG_OFF(KDIREG_RDX)(%rdi), %rdx; \
159 movq REG_OFF(KDIREG_RSI)(%rdi), %rsi; \
160 movq REG_OFF(KDIREG_RDI)(%rdi), %rdi
163 * Given the address of the current CPU's cpusave area in %rax, the following
164 * macro restores the debugging state to said CPU. Restored state includes
165 * the debug registers from the global %dr variables.
167 * Takes the cpusave area in %rdi as a parameter.
169 #define KDI_RESTORE_DEBUGGING_STATE \
170 pushq %rdi; \
171 leaq kdi_drreg(%rip), %r15; \
172 movl $7, %edi; \
173 movq DR_CTL(%r15), %rsi; \
174 call kdi_dreg_set; \
176 movl $6, %edi; \
177 movq $KDIREG_DRSTAT_RESERVED, %rsi; \
178 call kdi_dreg_set; \
180 movl $0, %edi; \
181 movq DRADDR_OFF(0)(%r15), %rsi; \
182 call kdi_dreg_set; \
183 movl $1, %edi; \
184 movq DRADDR_OFF(1)(%r15), %rsi; \
185 call kdi_dreg_set; \
186 movl $2, %edi; \
187 movq DRADDR_OFF(2)(%r15), %rsi; \
188 call kdi_dreg_set; \
189 movl $3, %edi; \
190 movq DRADDR_OFF(3)(%r15), %rsi; \
191 call kdi_dreg_set; \
192 popq %rdi;
195 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
196 * The following macros manage the buffer.
199 /* Advance the ring buffer */
200 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
201 movq KRS_CURCRUMBIDX(cpusave), tmp1; \
202 cmpq $[KDI_NCRUMBS - 1], tmp1; \
203 jge 1f; \
204 /* Advance the pointer and index */ \
205 addq $1, tmp1; \
206 movq tmp1, KRS_CURCRUMBIDX(cpusave); \
207 movq KRS_CURCRUMB(cpusave), tmp1; \
208 addq $KRM_SIZE, tmp1; \
209 jmp 2f; \
210 1: /* Reset the pointer and index */ \
211 movq $0, KRS_CURCRUMBIDX(cpusave); \
212 leaq KRS_CRUMBS(cpusave), tmp1; \
213 2: movq tmp1, KRS_CURCRUMB(cpusave); \
214 /* Clear the new crumb */ \
215 movq $KDI_NCRUMBS, tmp2; \
216 3: movq $0, -4(tmp1, tmp2, 4); \
217 decq tmp2; \
218 jnz 3b
220 /* Set a value in the current breadcrumb buffer */
221 #define ADD_CRUMB(cpusave, offset, value, tmp) \
222 movq KRS_CURCRUMB(cpusave), tmp; \
223 movq value, offset(tmp)
225 #endif /* _ASM */
227 #if defined(__lint)
228 void
229 kdi_cmnint(void)
232 #else /* __lint */
234 /* XXX implement me */
235 ENTRY_NP(kdi_nmiint)
236 clrq %rcx
237 movq (%rcx), %rcx
238 SET_SIZE(kdi_nmiint)
241 * The main entry point for master CPUs. It also serves as the trap
242 * handler for all traps and interrupts taken during single-step.
244 ENTRY_NP(kdi_cmnint)
245 ALTENTRY(kdi_master_entry)
247 pushq %rax
248 CLI(%rax)
249 popq %rax
251 /* Save current register state */
252 subq $REG_OFF(KDIREG_TRAPNO), %rsp
253 KDI_SAVE_REGS(%rsp)
255 #ifdef __xpv
257 * Clear saved_upcall_mask in unused byte of cs slot on stack.
258 * It can only confuse things.
260 movb $0, REG_OFF(KDIREG_CS)+4(%rsp)
261 #endif
263 #if !defined(__xpv)
265 * Switch to the kernel's GSBASE. Neither GSBASE nor the ill-named
266 * KGSBASE can be trusted, as the kernel may or may not have already
267 * done a swapgs. All is not lost, as the kernel can divine the correct
268 * value for us. Note that the previous GSBASE is saved in the
269 * KDI_SAVE_REGS macro to prevent a usermode process's GSBASE from being
270 * blown away. On the hypervisor, we don't need to do this, since it's
271 * ensured we're on our requested kernel GSBASE already.
273 subq $10, %rsp
274 sgdt (%rsp)
275 movq 2(%rsp), %rdi /* gdt base now in %rdi */
276 addq $10, %rsp
277 call kdi_gdt2gsbase /* returns kernel's GSBASE in %rax */
279 movq %rax, %rdx
280 shrq $32, %rdx
281 movl $MSR_AMD_GSBASE, %ecx
282 wrmsr
283 #endif /* __xpv */
285 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
287 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
289 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %rdx)
291 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
292 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
293 ADD_CRUMB(%rax, KRM_SP, %rsp, %rdx)
294 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rcx
295 ADD_CRUMB(%rax, KRM_TRAPNO, %rcx, %rdx)
297 movq %rsp, %rbp
298 pushq %rax
301 * Were we in the debugger when we took the trap (i.e. was %esp in one
302 * of the debugger's memory ranges)?
304 leaq kdi_memranges, %rcx
305 movl kdi_nmemranges, %edx
306 1: cmpq MR_BASE(%rcx), %rsp
307 jl 2f /* below this range -- try the next one */
308 cmpq MR_LIM(%rcx), %rsp
309 jg 2f /* above this range -- try the next one */
310 jmp 3f /* matched within this range */
312 2: decl %edx
313 jz kdi_save_common_state /* %rsp not within debugger memory */
314 addq $MR_SIZE, %rcx
315 jmp 1b
317 3: /*
318 * The master is still set. That should only happen if we hit a trap
319 * while running in the debugger. Note that it may be an intentional
320 * fault. kmdb_dpi_handle_fault will sort it all out.
323 movq REG_OFF(KDIREG_TRAPNO)(%rbp), %rdi
324 movq REG_OFF(KDIREG_RIP)(%rbp), %rsi
325 movq REG_OFF(KDIREG_RSP)(%rbp), %rdx
326 movq %rbx, %rcx /* cpuid */
328 call kdi_dvec_handle_fault
331 * If we're here, we ran into a debugger problem, and the user
332 * elected to solve it by having the debugger debug itself. The
333 * state we're about to save is that of the debugger when it took
334 * the fault.
337 jmp kdi_save_common_state
339 SET_SIZE(kdi_master_entry)
340 SET_SIZE(kdi_cmnint)
342 #endif /* __lint */
345 * The cross-call handler for slave CPUs.
347 * The debugger is single-threaded, so only one CPU, called the master, may be
348 * running it at any given time. The other CPUs, known as slaves, spin in a
349 * busy loop until there's something for them to do. This is the entry point
350 * for the slaves - they'll be sent here in response to a cross-call sent by the
351 * master.
354 #if defined(__lint)
355 void
356 kdi_slave_entry(void)
359 #else /* __lint */
360 ENTRY_NP(kdi_slave_entry)
363 * Cross calls are implemented as function calls, so our stack currently
364 * looks like one you'd get from a zero-argument function call. That
365 * is, there's the return %rip at %rsp, and that's about it. We need
366 * to make it look like an interrupt stack. When we first save, we'll
367 * reverse the saved %ss and %rip, which we'll fix back up when we've
368 * freed up some general-purpose registers. We'll also need to fix up
369 * the saved %rsp.
372 pushq %rsp /* pushed value off by 8 */
373 pushfq
374 CLI(%rax)
375 pushq $KCS_SEL
376 clrq %rax
377 movw %ss, %ax
378 pushq %rax /* rip should be here */
379 pushq $-1 /* phony trap error code */
380 pushq $-1 /* phony trap number */
382 subq $REG_OFF(KDIREG_TRAPNO), %rsp
383 KDI_SAVE_REGS(%rsp)
385 movq REG_OFF(KDIREG_SS)(%rsp), %rax
386 xchgq REG_OFF(KDIREG_RIP)(%rsp), %rax
387 movq %rax, REG_OFF(KDIREG_SS)(%rsp)
389 movq REG_OFF(KDIREG_RSP)(%rsp), %rax
390 addq $8, %rax
391 movq %rax, REG_OFF(KDIREG_RSP)(%rsp)
394 * We've saved all of the general-purpose registers, and have a stack
395 * that is irettable (after we strip down to the error code)
398 GET_CPUSAVE_ADDR /* %rax = cpusave, %rbx = CPU ID */
400 ADVANCE_CRUMB_POINTER(%rax, %rcx, %rdx)
402 ADD_CRUMB(%rax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %rdx)
404 movq REG_OFF(KDIREG_RIP)(%rsp), %rcx
405 ADD_CRUMB(%rax, KRM_PC, %rcx, %rdx)
407 pushq %rax
408 jmp kdi_save_common_state
410 SET_SIZE(kdi_slave_entry)
412 #endif /* __lint */
415 * The state of the world:
417 * The stack has a complete set of saved registers and segment
418 * selectors, arranged in the kdi_regs.h order. It also has a pointer
419 * to our cpusave area.
421 * We need to save, into the cpusave area, a pointer to these saved
422 * registers. First we check whether we should jump straight back to
423 * the kernel. If not, we save a few more registers, ready the
424 * machine for debugger entry, and enter the debugger.
427 #if !defined(__lint)
429 ENTRY_NP(kdi_save_common_state)
431 popq %rdi /* the cpusave area */
432 movq %rsp, KRS_GREGS(%rdi) /* save ptr to current saved regs */
434 pushq %rdi
435 call kdi_trap_pass
436 cmpq $1, %rax
437 je kdi_pass_to_kernel
438 popq %rax /* cpusave in %rax */
440 SAVE_IDTGDT
442 #if !defined(__xpv)
443 /* Save off %cr0, and clear write protect */
444 movq %cr0, %rcx
445 movq %rcx, KRS_CR0(%rax)
446 andq $_BITNOT(CR0_WP), %rcx
447 movq %rcx, %cr0
448 #endif
450 /* Save the debug registers and disable any active watchpoints */
452 movq %rax, %r15 /* save cpusave area ptr */
453 movl $7, %edi
454 call kdi_dreg_get
455 movq %rax, KRS_DRCTL(%r15)
457 andq $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %rax
458 movq %rax, %rsi
459 movl $7, %edi
460 call kdi_dreg_set
462 movl $6, %edi
463 call kdi_dreg_get
464 movq %rax, KRS_DRSTAT(%r15)
466 movl $0, %edi
467 call kdi_dreg_get
468 movq %rax, KRS_DROFF(0)(%r15)
470 movl $1, %edi
471 call kdi_dreg_get
472 movq %rax, KRS_DROFF(1)(%r15)
474 movl $2, %edi
475 call kdi_dreg_get
476 movq %rax, KRS_DROFF(2)(%r15)
478 movl $3, %edi
479 call kdi_dreg_get
480 movq %rax, KRS_DROFF(3)(%r15)
482 movq %r15, %rax /* restore cpu save area to rax */
484 clrq %rbp /* stack traces should end here */
486 pushq %rax
487 movq %rax, %rdi /* cpusave */
489 call kdi_debugger_entry
491 /* Pass cpusave to kdi_resume */
492 popq %rdi
494 jmp kdi_resume
496 SET_SIZE(kdi_save_common_state)
498 #endif /* !__lint */
501 * Resume the world. The code that calls kdi_resume has already
502 * decided whether or not to restore the IDT.
504 #if defined(__lint)
505 void
506 kdi_resume(void)
509 #else /* __lint */
511 /* cpusave in %rdi */
512 ENTRY_NP(kdi_resume)
515 * Send this CPU back into the world
517 #if !defined(__xpv)
518 movq KRS_CR0(%rdi), %rdx
519 movq %rdx, %cr0
520 #endif
522 KDI_RESTORE_DEBUGGING_STATE
524 movq KRS_GREGS(%rdi), %rsp
525 KDI_RESTORE_REGS(%rsp)
526 addq $REG_OFF(KDIREG_RIP), %rsp /* Discard state, trapno, err */
527 IRET
528 /*NOTREACHED*/
529 SET_SIZE(kdi_resume)
531 #endif /* __lint */
533 #if !defined(__lint)
535 ENTRY_NP(kdi_pass_to_kernel)
537 popq %rdi /* cpusave */
539 movq $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%rdi)
542 * Find the trap and vector off the right kernel handler. The trap
543 * handler will expect the stack to be in trap order, with %rip being
544 * the last entry, so we'll need to restore all our regs. On i86xpv
545 * we'll need to compensate for XPV_TRAP_POP.
547 * We're hard-coding the three cases where KMDB has installed permanent
548 * handlers, since after we KDI_RESTORE_REGS(), we don't have registers
549 * to work with; we can't use a global since other CPUs can easily pass
550 * through here at the same time.
552 * Note that we handle T_DBGENTR since userspace might have tried it.
554 movq KRS_GREGS(%rdi), %rsp
555 movq REG_OFF(KDIREG_TRAPNO)(%rsp), %rdi
556 cmpq $T_SGLSTP, %rdi
557 je 1f
558 cmpq $T_BPTFLT, %rdi
559 je 2f
560 cmpq $T_DBGENTR, %rdi
561 je 3f
563 * Hmm, unknown handler. Somebody forgot to update this when they
564 * added a new trap interposition... try to drop back into kmdb.
566 int $T_DBGENTR
568 #define CALL_TRAP_HANDLER(name) \
569 KDI_RESTORE_REGS(%rsp); \
570 /* Discard state, trapno, err */ \
571 addq $REG_OFF(KDIREG_RIP), %rsp; \
572 XPV_TRAP_PUSH; \
573 jmp %cs:name
576 CALL_TRAP_HANDLER(dbgtrap)
577 /*NOTREACHED*/
579 CALL_TRAP_HANDLER(brktrap)
580 /*NOTREACHED*/
582 CALL_TRAP_HANDLER(invaltrap)
583 /*NOTREACHED*/
585 SET_SIZE(kdi_pass_to_kernel)
588 * A minimal version of mdboot(), to be used by the master CPU only.
590 ENTRY_NP(kdi_reboot)
592 movl $AD_BOOT, %edi
593 movl $A_SHUTDOWN, %esi
594 call *psm_shutdownf
595 #if defined(__xpv)
596 movl $SHUTDOWN_reboot, %edi
597 call HYPERVISOR_shutdown
598 #else
599 call reset
600 #endif
601 /*NOTREACHED*/
603 SET_SIZE(kdi_reboot)
605 #endif /* !__lint */
607 #if defined(__lint)
608 /*ARGSUSED*/
609 void
610 kdi_cpu_debug_init(kdi_cpusave_t *save)
613 #else /* __lint */
615 ENTRY_NP(kdi_cpu_debug_init)
616 pushq %rbp
617 movq %rsp, %rbp
619 pushq %rbx /* macro will clobber %rbx */
620 KDI_RESTORE_DEBUGGING_STATE
621 popq %rbx
623 leave
626 SET_SIZE(kdi_cpu_debug_init)
627 #endif /* !__lint */