complier.h: add __always_inline and use it in code base
[coreboot.git] / src / cpu / x86 / smm / smihandler.c
blob99a62896decf0a95ee312247ebae65e7eb3a8245
1 /*
2 * This file is part of the coreboot project.
4 * Copyright (C) 2008-2009 coresystems GmbH
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; version 2 of
9 * the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <arch/io.h>
18 #include <compiler.h>
19 #include <console/console.h>
20 #include <cpu/x86/cache.h>
21 #include <cpu/x86/smm.h>
23 #if IS_ENABLED(CONFIG_SPI_FLASH_SMM)
24 #include <spi-generic.h>
25 #endif
27 static int do_driver_init = 1;
29 typedef enum { SMI_LOCKED, SMI_UNLOCKED } smi_semaphore;
31 /* SMI multiprocessing semaphore */
32 static __attribute__((aligned(4))) volatile smi_semaphore smi_handler_status
33 = SMI_UNLOCKED;
35 static int smi_obtain_lock(void)
37 u8 ret = SMI_LOCKED;
39 asm volatile (
40 "movb %2, %%al\n"
41 "xchgb %%al, %1\n"
42 "movb %%al, %0\n"
43 : "=g" (ret), "=m" (smi_handler_status)
44 : "g" (SMI_LOCKED)
45 : "eax"
48 return (ret == SMI_UNLOCKED);
51 void smi_release_lock(void)
53 asm volatile (
54 "movb %1, %%al\n"
55 "xchgb %%al, %0\n"
56 : "=m" (smi_handler_status)
57 : "g" (SMI_UNLOCKED)
58 : "eax"
62 #define LAPIC_ID 0xfee00020
63 static __always_inline unsigned long nodeid(void)
65 return (*((volatile unsigned long *)(LAPIC_ID)) >> 24);
68 void io_trap_handler(int smif)
70 /* If a handler function handled a given IO trap, it
71 * shall return a non-zero value
73 printk(BIOS_DEBUG, "SMI function trap 0x%x: ", smif);
75 if (southbridge_io_trap_handler(smif))
76 return;
78 if (mainboard_io_trap_handler(smif))
79 return;
81 printk(BIOS_DEBUG, "Unknown function\n");
84 /**
85 * @brief Set the EOS bit
87 static void smi_set_eos(void)
89 southbridge_smi_set_eos();
92 static u32 pci_orig;
94 /**
95 * @brief Backup PCI address to make sure we do not mess up the OS
97 static void smi_backup_pci_address(void)
99 pci_orig = inl(0xcf8);
103 * @brief Restore PCI address previously backed up
105 static void smi_restore_pci_address(void)
107 outl(pci_orig, 0xcf8);
110 static inline void *smm_save_state(uintptr_t base, int arch_offset, int node)
112 base += SMM_SAVE_STATE_BEGIN(arch_offset) - (node * 0x400);
113 return (void *)base;
117 * @brief Interrupt handler for SMI#
119 * @param smm_revision revision of the smm state save map
122 void smi_handler(u32 smm_revision)
124 unsigned int node;
125 smm_state_save_area_t state_save;
126 u32 smm_base = 0xa0000; /* ASEG */
128 /* Are we ok to execute the handler? */
129 if (!smi_obtain_lock()) {
130 /* For security reasons we don't release the other CPUs
131 * until the CPU with the lock is actually done
133 while (smi_handler_status == SMI_LOCKED) {
134 asm volatile (
135 ".byte 0xf3, 0x90\n" /* hint a CPU we are in
136 * spinlock (PAUSE
137 * instruction, REP NOP)
141 return;
144 smi_backup_pci_address();
146 node = nodeid();
148 console_init();
150 printk(BIOS_SPEW, "\nSMI# #%d\n", node);
152 switch (smm_revision) {
153 case 0x00030002:
154 case 0x00030007:
155 state_save.type = LEGACY;
156 state_save.legacy_state_save =
157 smm_save_state(smm_base,
158 SMM_LEGACY_ARCH_OFFSET, node);
159 break;
160 case 0x00030100:
161 state_save.type = EM64T;
162 state_save.em64t_state_save =
163 smm_save_state(smm_base,
164 SMM_EM64T_ARCH_OFFSET, node);
165 break;
166 case 0x00030101: /* SandyBridge, IvyBridge, and Haswell */
167 state_save.type = EM64T101;
168 state_save.em64t101_state_save =
169 smm_save_state(smm_base,
170 SMM_EM64T101_ARCH_OFFSET, node);
171 break;
172 case 0x00030064:
173 state_save.type = AMD64;
174 state_save.amd64_state_save =
175 smm_save_state(smm_base,
176 SMM_AMD64_ARCH_OFFSET, node);
177 break;
178 default:
179 printk(BIOS_DEBUG, "smm_revision: 0x%08x\n", smm_revision);
180 printk(BIOS_DEBUG, "SMI# not supported on your CPU\n");
181 /* Don't release lock, so no further SMI will happen,
182 * if we don't handle it anyways.
184 return;
187 /* Allow drivers to initialize variables in SMM context. */
188 if (do_driver_init) {
189 #if IS_ENABLED(CONFIG_SPI_FLASH_SMM)
190 spi_init();
191 #endif
192 do_driver_init = 0;
195 /* Call chipset specific SMI handlers. */
196 cpu_smi_handler(node, &state_save);
197 northbridge_smi_handler(node, &state_save);
198 southbridge_smi_handler(node, &state_save);
200 smi_restore_pci_address();
202 smi_release_lock();
204 /* De-assert SMI# signal to allow another SMI */
205 smi_set_eos();
208 /* Provide a default implementation for all weak handlers so that relocation
209 * entries in the modules make sense. Without default implementations the
210 * weak relocations w/o a symbol have a 0 address which is where the modules
211 * are linked at. */
212 int __weak mainboard_io_trap_handler(int smif) { return 0; }
213 void __weak cpu_smi_handler(unsigned int node,
214 smm_state_save_area_t *state_save) {}
215 void __weak northbridge_smi_handler(unsigned int node,
216 smm_state_save_area_t *state_save) {}
217 void __weak southbridge_smi_handler(unsigned int node,
218 smm_state_save_area_t *state_save) {}
219 void __weak mainboard_smi_gpi(u32 gpi_sts) {}
220 int __weak mainboard_smi_apmc(u8 data) { return 0; }
221 void __weak mainboard_smi_sleep(u8 slp_typ) {}