preprocessor cleanup: __xpv
[unleashed.git] / arch / x86 / kernel / kdi / kdi_asm_32.s
blobc942d55de94f8af5ddc4d94fab8bf8e1080d2f47
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * Debugger entry for both master and slave CPUs
34 #include <sys/segments.h>
35 #include <sys/asm_linkage.h>
36 #include <sys/controlregs.h>
37 #include <sys/x86_archext.h>
38 #include <sys/privregs.h>
39 #include <sys/machprivregs.h>
40 #include <sys/kdi_regs.h>
41 #include <sys/uadmin.h>
42 #include <sys/psw.h>
44 #ifdef _ASM
46 #include <kdi_assym.h>
47 #include <assym.h>
49 /* clobbers %edx, %ecx, returns addr in %eax, cpu id in %ebx */
50 #define GET_CPUSAVE_ADDR \
51 movl %gs:CPU_ID, %ebx; \
52 movl %ebx, %eax; \
53 movl $KRS_SIZE, %ecx; \
54 mull %ecx; \
55 movl $kdi_cpusave, %edx; \
56 /*CSTYLED*/ \
57 addl (%edx), %eax
60 * Save copies of the IDT and GDT descriptors. Note that we only save the IDT
61 * and GDT if the IDT isn't ours, as we may be legitimately re-entering the
62 * debugger through the trap handler. We don't want to clobber the saved IDT
63 * in the process, as we'd end up resuming the world on our IDT.
65 #define SAVE_IDTGDT \
66 movl %gs:CPU_IDT, %edx; \
67 cmpl $kdi_idt, %edx; \
68 je 1f; \
69 movl %edx, KRS_IDT(%eax); \
70 movl %gs:CPU_GDT, %edx; \
71 movl %edx, KRS_GDT(%eax); \
75 * Given the address of the current CPU's cpusave area in %edi, the following
76 * macro restores the debugging state to said CPU. Restored state includes
77 * the debug registers from the global %dr variables, and debugging MSRs from
78 * the CPU save area. This code would be in a separate routine, but for the
79 * fact that some of the MSRs are jump-sensitive. As such, we need to minimize
80 * the number of jumps taken subsequent to the update of said MSRs. We can
81 * remove one jump (the ret) by using a macro instead of a function for the
82 * debugging state restoration code.
84 * Takes the cpusave area in %edi as a parameter, clobbers %eax-%edx
85 */
86 #define KDI_RESTORE_DEBUGGING_STATE \
87 leal kdi_drreg, %ebx; \
89 pushl DR_CTL(%ebx); \
90 pushl $7; \
91 call kdi_dreg_set; \
92 addl $8, %esp; \
94 pushl $KDIREG_DRSTAT_RESERVED; \
95 pushl $6; \
96 call kdi_dreg_set; \
97 addl $8, %esp; \
99 pushl DRADDR_OFF(0)(%ebx); \
100 pushl $0; \
101 call kdi_dreg_set; \
102 addl $8, %esp; \
104 pushl DRADDR_OFF(1)(%ebx); \
105 pushl $1; \
106 call kdi_dreg_set; \
107 addl $8, %esp; \
109 pushl DRADDR_OFF(2)(%ebx); \
110 pushl $2; \
111 call kdi_dreg_set; \
112 addl $8, %esp; \
114 pushl DRADDR_OFF(3)(%ebx); \
115 pushl $3; \
116 call kdi_dreg_set; \
117 addl $8, %esp; \
119 /* \
120 * Write any requested MSRs. \
121 */ \
122 movl KRS_MSR(%edi), %ebx; \
123 cmpl $0, %ebx; \
124 je 3f; \
125 1: \
126 movl MSR_NUM(%ebx), %ecx; \
127 cmpl $0, %ecx; \
128 je 3f; \
130 movl MSR_TYPE(%ebx), %edx; \
131 cmpl $KDI_MSR_WRITE, %edx; \
132 jne 2f; \
134 movl MSR_VALP(%ebx), %edx; \
135 movl 0(%edx), %eax; \
136 movl 4(%edx), %edx; \
137 wrmsr; \
138 2: \
139 addl $MSR_SIZE, %ebx; \
140 jmp 1b; \
141 3: \
142 /* \
143 * We must not branch after re-enabling LBR. If \
144 * kdi_wsr_wrexit_msr is set, it contains the number \
145 * of the MSR that controls LBR. kdi_wsr_wrexit_valp \
146 * contains the value that is to be written to enable \
147 * LBR. \
148 */ \
149 movl kdi_msr_wrexit_msr, %ecx; \
150 cmpl $0, %ecx; \
151 je 1f; \
153 movl kdi_msr_wrexit_valp, %edx; \
154 movl 0(%edx), %eax; \
155 movl 4(%edx), %edx; \
157 wrmsr; \
160 #define KDI_RESTORE_REGS() \
161 /* Discard savfp and savpc */ \
162 addl $8, %esp; \
163 popl %ss; \
164 popl %gs; \
165 popl %fs; \
166 popl %es; \
167 popl %ds; \
168 popal; \
169 /* Discard trapno and err */ \
170 addl $8, %esp
173 * Each cpusave buffer has an area set aside for a ring buffer of breadcrumbs.
174 * The following macros manage the buffer.
177 /* Advance the ring buffer */
178 #define ADVANCE_CRUMB_POINTER(cpusave, tmp1, tmp2) \
179 movl KRS_CURCRUMBIDX(cpusave), tmp1; \
180 cmpl $[KDI_NCRUMBS - 1], tmp1; \
181 jge 1f; \
182 /* Advance the pointer and index */ \
183 addl $1, tmp1; \
184 movl tmp1, KRS_CURCRUMBIDX(cpusave); \
185 movl KRS_CURCRUMB(cpusave), tmp1; \
186 addl $KRM_SIZE, tmp1; \
187 jmp 2f; \
188 1: /* Reset the pointer and index */ \
189 movw $0, KRS_CURCRUMBIDX(cpusave); \
190 leal KRS_CRUMBS(cpusave), tmp1; \
191 2: movl tmp1, KRS_CURCRUMB(cpusave); \
192 /* Clear the new crumb */ \
193 movl $KDI_NCRUMBS, tmp2; \
194 3: movl $0, -4(tmp1, tmp2, 4); \
195 decl tmp2; \
196 jnz 3b
198 /* Set a value in the current breadcrumb buffer */
199 #define ADD_CRUMB(cpusave, offset, value, tmp) \
200 movl KRS_CURCRUMB(cpusave), tmp; \
201 movl value, offset(tmp)
203 #endif /* _ASM */
206 * The main entry point for master CPUs. It also serves as the trap handler
207 * for all traps and interrupts taken during single-step.
210 /* XXX implement me */
211 ENTRY_NP(kdi_nmiint)
212 clr %ecx
213 movl (%ecx), %ecx
214 SET_SIZE(kdi_nmiint)
216 ENTRY_NP(kdi_cmnint)
217 ALTENTRY(kdi_master_entry)
219 /* Save all registers and selectors */
221 pushal
222 pushl %ds
223 pushl %es
224 pushl %fs
225 pushl %gs
226 pushl %ss
228 subl $8, %esp
229 movl %ebp, REG_OFF(KDIREG_SAVFP)(%esp)
230 movl REG_OFF(KDIREG_EIP)(%esp), %eax
231 movl %eax, REG_OFF(KDIREG_SAVPC)(%esp)
234 * If the kernel has started using its own selectors, we should too.
235 * Update our saved selectors if they haven't been updated already.
237 movw %cs, %ax
238 cmpw $KCS_SEL, %ax
239 jne 1f /* The kernel hasn't switched yet */
241 movw $KDS_SEL, %ax
242 movw %ax, %ds
243 movw kdi_cs, %ax
244 cmpw $KCS_SEL, %ax
245 je 1f /* We already switched */
248 * The kernel switched, but we haven't. Update our saved selectors
249 * to match the kernel's copies for use below.
251 movl $KCS_SEL, kdi_cs
252 movl $KDS_SEL, kdi_ds
253 movl $KFS_SEL, kdi_fs
254 movl $KGS_SEL, kdi_gs
258 * Set the selectors to a known state. If we come in from kmdb's IDT,
259 * we'll be on boot's %cs. This will cause GET_CPUSAVE_ADDR to return
260 * CPU 0's cpusave, regardless of which CPU we're on, and chaos will
261 * ensue. So, if we've got $KCSSEL in kdi_cs, switch to it. The other
262 * selectors are restored normally.
264 movw %cs:kdi_cs, %ax
265 cmpw $KCS_SEL, %ax
266 jne 1f
267 ljmp $KCS_SEL, $1f
269 movw %cs:kdi_ds, %ds
270 movw kdi_ds, %es
271 movw kdi_fs, %fs
272 movw kdi_gs, %gs
273 movw kdi_ds, %ss
276 * This has to come after we set %gs to the kernel descriptor. Since
277 * we've hijacked some IDT entries used in user-space such as the
278 * breakpoint handler, we can enter kdi_cmnint() with GDT_LWPGS used
279 * in %gs. On the hypervisor, CLI() needs GDT_GS to access the machcpu.
281 CLI(%eax)
284 GET_CPUSAVE_ADDR /* %eax = cpusave, %ebx = CPU ID */
286 ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
288 ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_MASTER, %edx)
290 movl REG_OFF(KDIREG_EIP)(%esp), %ecx
291 ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
292 ADD_CRUMB(%eax, KRM_SP, %esp, %edx)
293 movl REG_OFF(KDIREG_TRAPNO)(%esp), %ecx
294 ADD_CRUMB(%eax, KRM_TRAPNO, %ecx, %edx)
296 movl %esp, %ebp
297 pushl %eax
300 * Were we in the debugger when we took the trap (i.e. was %esp in one
301 * of the debugger's memory ranges)?
303 leal kdi_memranges, %ecx
304 movl kdi_nmemranges, %edx
305 1: cmpl MR_BASE(%ecx), %esp
306 jl 2f /* below this range -- try the next one */
307 cmpl MR_LIM(%ecx), %esp
308 jg 2f /* above this range -- try the next one */
309 jmp 3f /* matched within this range */
311 2: decl %edx
312 jz kdi_save_common_state /* %esp not within debugger memory */
313 addl $MR_SIZE, %ecx
314 jmp 1b
316 3: /*
317 * %esp was within one of the debugger's memory ranges. This should
318 * only happen when we take a trap while running in the debugger.
319 * kmdb_dpi_handle_fault will determine whether or not it was an
320 * expected trap, and will take the appropriate action.
323 pushl %ebx /* cpuid */
325 movl REG_OFF(KDIREG_ESP)(%ebp), %ecx
326 addl $REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), %ecx
327 pushl %ecx
329 pushl REG_OFF(KDIREG_EIP)(%ebp)
330 pushl REG_OFF(KDIREG_TRAPNO)(%ebp)
332 call kdi_dvec_handle_fault
333 addl $16, %esp
336 * If we're here, we ran into a debugger problem, and the user
337 * elected to solve it by having the debugger debug itself. The
338 * state we're about to save is that of the debugger when it took
339 * the fault.
342 jmp kdi_save_common_state
344 SET_SIZE(kdi_master_entry)
345 SET_SIZE(kdi_cmnint)
349 * The cross-call handler for slave CPUs.
351 * The debugger is single-threaded, so only one CPU, called the master, may be
352 * running it at any given time. The other CPUs, known as slaves, spin in a
353 * busy loop until there's something for them to do. This is the entry point
354 * for the slaves - they'll be sent here in response to a cross-call sent by the
355 * master.
358 .globl kdi_slave_entry_patch;
360 ENTRY_NP(kdi_slave_entry)
362 /* kdi_msr_add_clrentry knows where this is */
363 kdi_slave_entry_patch:
364 KDI_MSR_PATCH;
367 * Cross calls are implemented as function calls, so our stack
368 * currently looks like one you'd get from a zero-argument function
369 * call. There's an %eip at %esp, and that's about it. We want to
370 * make it look like the master CPU's stack. By doing this, we can
371 * use the same resume code for both master and slave. We need to
372 * make our stack look like a `struct regs' before we jump into the
373 * common save routine.
376 pushl %cs
377 pushfl
378 pushl $-1 /* A phony trap error code */
379 pushl $-1 /* A phony trap number */
380 pushal
381 pushl %ds
382 pushl %es
383 pushl %fs
384 pushl %gs
385 pushl %ss
387 subl $8, %esp
388 movl %ebp, REG_OFF(KDIREG_SAVFP)(%esp)
389 movl REG_OFF(KDIREG_EIP)(%esp), %eax
390 movl %eax, REG_OFF(KDIREG_SAVPC)(%esp)
393 * Swap our saved EFLAGS and %eip. Each is where the other
394 * should be.
396 movl REG_OFF(KDIREG_EFLAGS)(%esp), %eax
397 xchgl REG_OFF(KDIREG_EIP)(%esp), %eax
398 movl %eax, REG_OFF(KDIREG_EFLAGS)(%esp)
401 * Our stack now matches struct regs, and is irettable. We don't need
402 * to do anything special for the hypervisor w.r.t. PS_IE since we
403 * iret twice anyway; the second iret back to the hypervisor
404 * will re-enable interrupts.
406 CLI(%eax)
408 /* Load sanitized segment selectors */
409 movw kdi_ds, %ds
410 movw kdi_ds, %es
411 movw kdi_fs, %fs
412 movw kdi_gs, %gs
413 movw kdi_ds, %ss
415 GET_CPUSAVE_ADDR /* %eax = cpusave, %ebx = CPU ID */
417 ADVANCE_CRUMB_POINTER(%eax, %ecx, %edx)
419 ADD_CRUMB(%eax, KRM_CPU_STATE, $KDI_CPU_STATE_SLAVE, %edx)
421 movl REG_OFF(KDIREG_EIP)(%esp), %ecx
422 ADD_CRUMB(%eax, KRM_PC, %ecx, %edx)
424 pushl %eax
425 jmp kdi_save_common_state
427 SET_SIZE(kdi_slave_entry)
431 * The state of the world:
433 * The stack has a complete set of saved registers and segment
434 * selectors, arranged in `struct regs' order (or vice-versa), up to
435 * and including EFLAGS. It also has a pointer to our cpusave area.
437 * We need to save a pointer to these saved registers. We also want
438 * to adjust the saved %esp - it should point just beyond the saved
439 * registers to the last frame of the thread we interrupted. Finally,
440 * we want to clear out bits 16-31 of the saved selectors, as the
441 * selector pushls don't automatically clear them.
444 ENTRY_NP(kdi_save_common_state)
446 popl %eax /* the cpusave area */
448 movl %esp, KRS_GREGS(%eax) /* save ptr to current saved regs */
450 addl $REG_OFF(KDIREG_EFLAGS - KDIREG_EAX), KDIREG_OFF(KDIREG_ESP)(%esp)
452 andl $0xffff, KDIREG_OFF(KDIREG_SS)(%esp)
453 andl $0xffff, KDIREG_OFF(KDIREG_GS)(%esp)
454 andl $0xffff, KDIREG_OFF(KDIREG_FS)(%esp)
455 andl $0xffff, KDIREG_OFF(KDIREG_ES)(%esp)
456 andl $0xffff, KDIREG_OFF(KDIREG_DS)(%esp)
458 pushl %eax
459 call kdi_trap_pass
460 cmpl $1, %eax
461 je kdi_pass_to_kernel
462 popl %eax
464 SAVE_IDTGDT
466 /* Save off %cr0, and clear write protect */
467 movl %cr0, %ecx
468 movl %ecx, KRS_CR0(%eax)
469 andl $_BITNOT(CR0_WP), %ecx
470 movl %ecx, %cr0
471 pushl %edi
472 movl %eax, %edi
474 /* Save the debug registers and disable any active watchpoints */
475 pushl $7
476 call kdi_dreg_get
477 addl $4, %esp
479 movl %eax, KRS_DRCTL(%edi)
480 andl $_BITNOT(KDIREG_DRCTL_WPALLEN_MASK), %eax
482 pushl %eax
483 pushl $7
484 call kdi_dreg_set
485 addl $8, %esp
487 pushl $6
488 call kdi_dreg_get
489 addl $4, %esp
490 movl %eax, KRS_DRSTAT(%edi)
492 pushl $0
493 call kdi_dreg_get
494 addl $4, %esp
495 movl %eax, KRS_DROFF(0)(%edi)
497 pushl $1
498 call kdi_dreg_get
499 addl $4, %esp
500 movl %eax, KRS_DROFF(1)(%edi)
502 pushl $2
503 call kdi_dreg_get
504 addl $4, %esp
505 movl %eax, KRS_DROFF(2)(%edi)
507 pushl $3
508 call kdi_dreg_get
509 addl $4, %esp
510 movl %eax, KRS_DROFF(3)(%edi)
512 movl %edi, %eax
513 popl %edi
516 * Save any requested MSRs.
518 movl KRS_MSR(%eax), %ecx
519 cmpl $0, %ecx
520 je no_msr
522 pushl %eax /* rdmsr clobbers %eax */
523 movl %ecx, %ebx
525 movl MSR_NUM(%ebx), %ecx
526 cmpl $0, %ecx
527 je msr_done
529 movl MSR_TYPE(%ebx), %edx
530 cmpl $KDI_MSR_READ, %edx
531 jne msr_next
533 rdmsr /* addr in %ecx, value into %edx:%eax */
534 movl %eax, MSR_VAL(%ebx)
535 movl %edx, _CONST(MSR_VAL + 4)(%ebx)
537 msr_next:
538 addl $MSR_SIZE, %ebx
539 jmp 1b
541 msr_done:
542 popl %eax
544 no_msr:
545 clr %ebp /* stack traces should end here */
547 pushl %eax
548 call kdi_debugger_entry
549 popl %eax
551 jmp kdi_resume
553 SET_SIZE(kdi_save_common_state)
557 * Resume the world. The code that calls kdi_resume has already
558 * decided whether or not to restore the IDT.
561 /* cpusave in %eax */
562 ENTRY_NP(kdi_resume)
565 * Send this CPU back into the world
568 movl KRS_CR0(%eax), %edx
569 movl %edx, %cr0
571 pushl %edi
572 movl %eax, %edi
574 KDI_RESTORE_DEBUGGING_STATE
576 popl %edi
579 addl $8, %esp /* Discard savfp and savpc */
581 popl %ss
582 popl %gs
583 popl %fs
584 popl %es
585 popl %ds
586 popal
588 addl $8, %esp /* Discard TRAPNO and ERROR */
590 IRET
592 SET_SIZE(kdi_resume)
595 ENTRY_NP(kdi_pass_to_kernel)
597 /* pop cpusave, leaving %esp pointing to saved regs */
598 popl %eax
600 movl $KDI_CPU_STATE_NONE, KRS_CPU_STATE(%eax)
603 * Find the trap and vector off the right kernel handler. The trap
604 * handler will expect the stack to be in trap order, with %eip being
605 * the last entry, so we'll need to restore all our regs.
607 * We're hard-coding the three cases where KMDB has installed permanent
608 * handlers, since after we restore, we don't have registers to work
609 * with; we can't use a global since other CPUs can easily pass through
610 * here at the same time.
612 * Note that we handle T_DBGENTR since userspace might have tried it.
614 movl REG_OFF(KDIREG_TRAPNO)(%esp), %eax
615 cmpl $T_SGLSTP, %eax
616 je kpass_dbgtrap
617 cmpl $T_BPTFLT, %eax
618 je kpass_brktrap
619 cmpl $T_DBGENTR, %eax
620 je kpass_invaltrap
622 * Hmm, unknown handler. Somebody forgot to update this when they
623 * added a new trap interposition... try to drop back into kmdb.
625 int $T_DBGENTR
627 kpass_dbgtrap:
628 KDI_RESTORE_REGS()
629 ljmp $KCS_SEL, $1f
630 1: jmp %cs:dbgtrap
631 /*NOTREACHED*/
633 kpass_brktrap:
634 KDI_RESTORE_REGS()
635 ljmp $KCS_SEL, $2f
636 2: jmp %cs:brktrap
637 /*NOTREACHED*/
639 kpass_invaltrap:
640 KDI_RESTORE_REGS()
641 ljmp $KCS_SEL, $3f
642 3: jmp %cs:invaltrap
643 /*NOTREACHED*/
645 SET_SIZE(kdi_pass_to_kernel)
648 * A minimal version of mdboot(), to be used by the master CPU only.
650 ENTRY_NP(kdi_reboot)
652 pushl $AD_BOOT
653 pushl $A_SHUTDOWN
654 call *psm_shutdownf
655 addl $8, %esp
657 call reset
658 /*NOTREACHED*/
660 SET_SIZE(kdi_reboot)
664 ENTRY_NP(kdi_cpu_debug_init)
665 pushl %ebp
666 movl %esp, %ebp
668 pushl %edi
669 pushl %ebx
671 movl 8(%ebp), %edi
673 KDI_RESTORE_DEBUGGING_STATE
675 popl %ebx
676 popl %edi
677 leave
680 SET_SIZE(kdi_cpu_debug_init)