1 /* SPDX-License-Identifier: GPL-2.0-only */
3 /* NOTE: This handler assumes the SMM window goes from 0xa0000
4 * to 0xaffff. In fact, at least on Intel Core CPUs (i945 chipset)
5 * the SMM window is 128K big, covering 0xa0000 to 0xbffff.
6 * So there is a lot of potential for growth in here. Let's stick
7 * to 64k if we can though.
10 #include <cpu/x86/lapic_def.h>
11 #include <cpu/x86/msr.h>
14 * +--------------------------------+ 0xaffff
15 * | Save State Map Node 0 |
16 * | Save State Map Node 1 |
17 * | Save State Map Node 2 |
18 * | Save State Map Node 3 |
20 * +--------------------------------+ 0xaf000
24 * +--------------------------------+ 0xa8400
25 * | SMM Entry Node 0 (+ stack) |
26 * +--------------------------------+ 0xa8000
27 * | SMM Entry Node 1 (+ stack) |
28 * | SMM Entry Node 2 (+ stack) |
29 * | SMM Entry Node 3 (+ stack) |
31 * +--------------------------------+ 0xa7400
35 * +--------------------------------+ 0xa0000
39 /* SMM_HANDLER_OFFSET is the 16bit offset within the ASEG
40 * at which smm_handler_start lives. At the moment the handler
41 * lives right at 0xa0000, so the offset is 0.
44 #define SMM_HANDLER_OFFSET 0x0000
54 /* initially SMM is some sort of real mode. Let gcc know
55 * how to treat the SMM handler stub
58 .section ".handler", "a", @progbits
63 * SMM code to enable protected mode and jump to the
64 * C-written function void smi_handler(u32 smm_revision)
66 * All the bad magic is not all that bad after all.
68 #define SMM_START 0xa0000
69 #define SMM_END 0xb0000
70 #if SMM_END <= SMM_START
71 #error invalid SMM configuration
73 .global smm_handler_start
75 #if CONFIG(SMM_LAPIC_REMAP_MITIGATION)
76 /* Check if the LAPIC register block overlaps with SMM.
77 * This block needs to work without data accesses because they
78 * may be routed into the LAPIC register block.
79 * Code accesses, on the other hand, are never routed to LAPIC,
80 * which is what makes this work in the first place.
82 mov $LAPIC_BASE_MSR, %ecx
85 sub $(SMM_START), %eax
86 cmp $(SMM_END - SMM_START), %eax
89 /* emit "Crash" on serial */
90 mov $(CONFIG_TTYS0_BASE), %dx
101 /* now crash for real */
105 movw $(smm_gdtptr16 - smm_handler_start + SMM_HANDLER_OFFSET), %bx
109 andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */
110 orl $0x60000001, %eax /* CD, NW, PE = 1 */
113 /* Enable protected mode */
118 /* flush the cache after disabling it */
121 /* Use flat data segment */
129 /* FIXME: Incompatible with X2APIC_SUPPORT. */
130 /* Get this CPU's LAPIC ID */
131 movl $(LAPIC_DEFAULT_BASE | LAPIC_ID), %esi
135 /* This is an ugly hack, and we should find a way to read the CPU index
136 * without relying on the LAPIC ID.
138 #if CONFIG(CPU_AMD_AGESA_FAMILY15_TN)
139 /* LAPIC IDs start from 0x10; map that to the proper core index */
143 /* calculate stack offset by multiplying the APIC ID
144 * by 1024 (0x400), and save that offset in ebp.
149 /* We put the stack for each core right above
150 * its SMM entry point. Core 0 starts at 0xa8000,
151 * we spare 0x10 bytes for the jump to be sure.
154 subl %ecx, %eax /* subtract offset, see above */
155 movl %eax, %ebx /* Save bottom of stack in ebx */
157 #define SMM_STACK_SIZE (0x400 - 0x10)
161 movl $(SMM_STACK_SIZE >> 2), %ecx
166 addl $SMM_STACK_SIZE, %ebx
170 /* Backup IA32_EFER. Preserves ebx. */
171 movl $(IA32_EFER), %ecx
173 movl %eax, ia32efer_backup_eax
174 movl %edx, ia32efer_backup_edx
176 /* Enable long mode. Preserves ebx. */
177 #include <cpu/x86/64bit/entry64.inc>
185 * The only reason to go back to protected mode is that RSM doesn't restore
186 * MSR registers and MSR IA32_EFER was modified by entering long mode.
187 * Drop to protected mode to safely operate on the IA32_EFER MSR.
190 /* Disable long mode. */
191 #include <cpu/x86/64bit/exit32.inc>
193 /* Restore IA32_EFER as RSM doesn't restore MSRs. */
194 movl $(IA32_EFER), %ecx
195 movl ia32efer_backup_eax, %eax
196 movl ia32efer_backup_edx, %edx
200 /* To return, just do rsm. It will "clean up" protected mode */
208 .word smm_gdt_end - smm_gdt - 1
209 .long smm_gdt - smm_handler_start + 0xa0000 + SMM_HANDLER_OFFSET
214 /* The first GDT entry can not be used. Keep it zero */
215 .long 0x00000000, 0x00000000
217 /* gdt selector 0x08, flat code segment */
219 .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, 4GB limit */
221 /* gdt selector 0x10, flat data segment */
223 .byte 0x00, 0x93, 0xcf, 0x00
225 /* gdt selector 0x18, flat code segment (64-bit) */
227 .byte 0x00, 0x9b, 0xaf, 0x00
231 .section ".jumptable", "a", @progbits
233 /* This is the SMM jump table. All cores use the same SMM handler
234 * for simplicity. But SMM Entry needs to be different due to the
235 * save state area. The jump table makes sure all CPUs jump into the
236 * real handler on SMM entry.
239 /* This code currently supports up to 4 CPU cores. If more than 4 CPU cores
240 * shall be used, below table has to be updated, as well as smm.ld
243 /* GNU AS/LD will always generate code that assumes CS is 0xa000. In reality
244 * CS will be set to SMM_BASE[19:4] though. Knowing that the smm handler is the
245 * first thing in the ASEG, we do a far jump here, to set CS to 0xa000.
251 ljmp $0xa000, $SMM_HANDLER_OFFSET
254 ljmp $0xa000, $SMM_HANDLER_OFFSET
257 ljmp $0xa000, $SMM_HANDLER_OFFSET
260 ljmp $0xa000, $SMM_HANDLER_OFFSET