soc/intel/apl: Call mca_configure() on cold boots only
[coreboot.git] / src / soc / intel / apollolake / cpu.c
blobd1c5f6f40610fb7995abfa7f3dda19b731bb0858
1 /*
2 * This file is part of the coreboot project.
4 * Copyright (C) 2015-2017 Intel Corp.
5 * Copyright (C) 2017 Siemens AG, Inc.
6 * (Written by Andrey Petrov <andrey.petrov@intel.com> for Intel Corp.)
7 * (Written by Alexandru Gagniuc <alexandrux.gagniuc@intel.com> for Intel Corp.)
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <arch/acpi.h>
21 #include <assert.h>
22 #include <console/console.h>
23 #include "chip.h"
24 #include <cpu/cpu.h>
25 #include <cpu/x86/cache.h>
26 #include <cpu/x86/mp.h>
27 #include <cpu/intel/microcode.h>
28 #include <cpu/intel/turbo.h>
29 #include <cpu/x86/msr.h>
30 #include <cpu/x86/mtrr.h>
31 #include <device/device.h>
32 #include <device/pci.h>
33 #include <fsp/api.h>
34 #include <fsp/memmap.h>
35 #include <intelblocks/cpulib.h>
36 #include <intelblocks/fast_spi.h>
37 #include <intelblocks/mp_init.h>
38 #include <intelblocks/msr.h>
39 #include <intelblocks/sgx.h>
40 #include <intelblocks/smm.h>
41 #include <reg_script.h>
42 #include <romstage_handoff.h>
43 #include <soc/cpu.h>
44 #include <soc/iomap.h>
45 #include <soc/pci_devs.h>
46 #include <soc/pm.h>
48 static const struct reg_script core_msr_script[] = {
49 #if !IS_ENABLED(CONFIG_SOC_INTEL_GLK)
50 /* Enable C-state and IO/MWAIT redirect */
51 REG_MSR_WRITE(MSR_PKG_CST_CONFIG_CONTROL,
52 (PKG_C_STATE_LIMIT_C2_MASK | CORE_C_STATE_LIMIT_C10_MASK
53 | IO_MWAIT_REDIRECT_MASK | CST_CFG_LOCK_MASK)),
54 /* Power Management I/O base address for I/O trapping to C-states */
55 REG_MSR_WRITE(MSR_PMG_IO_CAPTURE_BASE,
56 (ACPI_PMIO_CST_REG | (PMG_IO_BASE_CST_RNG_BLK_SIZE << 16))),
57 /* Disable support for MONITOR and MWAIT instructions */
58 REG_MSR_RMW(IA32_MISC_ENABLE, ~MONITOR_MWAIT_DIS_MASK, 0),
59 #endif
60 /* Disable C1E */
61 REG_MSR_RMW(MSR_POWER_CTL, ~POWER_CTL_C1E_MASK, 0),
63 * Enable and Lock the Advanced Encryption Standard (AES-NI)
64 * feature register
66 REG_MSR_RMW(MSR_FEATURE_CONFIG, ~FEATURE_CONFIG_RESERVED_MASK,
67 FEATURE_CONFIG_LOCK),
68 REG_SCRIPT_END
71 void soc_core_init(struct device *cpu)
73 /* Clear out pending MCEs */
74 /* TODO(adurbin): Some of these banks are core vs package
75 scope. For now every CPU clears every bank. */
76 if (IS_ENABLED(SOC_INTEL_COMMON_BLOCK_SGX) ||
77 acpi_get_sleep_type() == ACPI_S5)
78 mca_configure(NULL);
80 /* Set core MSRs */
81 reg_script_run(core_msr_script);
83 * Enable ACPI PM timer emulation, which also lets microcode know
84 * location of ACPI_BASE_ADDRESS. This also enables other features
85 * implemented in microcode.
87 enable_pm_timer_emulation();
89 /* Configure Core PRMRR for SGX. */
90 if (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_SGX))
91 prmrr_core_configure();
93 /* Set Max Non-Turbo ratio if RAPL is disabled. */
94 if (IS_ENABLED(CONFIG_APL_SKIP_SET_POWER_LIMITS)) {
95 cpu_set_p_state_to_max_non_turbo_ratio();
96 cpu_disable_eist();
97 } else if (IS_ENABLED(CONFIG_APL_SET_MIN_CLOCK_RATIO)) {
98 cpu_set_p_state_to_min_clock_ratio();
99 cpu_disable_eist();
103 #if !IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
104 static void soc_init_core(struct device *cpu)
106 soc_core_init(cpu);
109 static struct device_operations cpu_dev_ops = {
110 .init = soc_init_core,
113 static const struct cpu_device_id cpu_table[] = {
114 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_A0 },
115 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_B0 },
116 { X86_VENDOR_INTEL, CPUID_APOLLOLAKE_E0 },
117 { X86_VENDOR_INTEL, CPUID_GLK_A0 },
118 { X86_VENDOR_INTEL, CPUID_GLK_B0 },
119 { 0, 0 },
122 static const struct cpu_driver driver __cpu_driver = {
123 .ops = &cpu_dev_ops,
124 .id_table = cpu_table,
126 #endif
129 * MP and SMM loading initialization.
131 struct smm_relocation_attrs {
132 uint32_t smbase;
133 uint32_t smrr_base;
134 uint32_t smrr_mask;
137 static struct smm_relocation_attrs relo_attrs;
140 * Do essential initialization tasks before APs can be fired up.
142 * IF (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) -
143 * Skip Pre MP init MTRR programming, as MTRRs are mirrored from BSP,
144 * that are set prior to ramstage.
145 * Real MTRRs are programmed after resource allocation.
147 * Do FSP loading before MP Init to ensure that the FSP component stored in
148 * external stage cache in TSEG does not flush off due to SMM relocation
149 * during MP Init stage.
151 * ELSE -
152 * Enable MTRRs on the BSP. This creates the MTRR solution that the
153 * APs will use. Otherwise APs will try to apply the incomplete solution
154 * as the BSP is calculating it.
156 static void pre_mp_init(void)
158 if (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)) {
159 fsps_load(romstage_handoff_is_resume());
160 return;
162 x86_setup_mtrrs_with_detect();
163 x86_mtrr_check();
166 #if !IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT)
167 static void read_cpu_topology(unsigned int *num_phys, unsigned int *num_virt)
169 msr_t msr;
170 msr = rdmsr(MSR_CORE_THREAD_COUNT);
171 *num_virt = (msr.lo >> 0) & 0xffff;
172 *num_phys = (msr.lo >> 16) & 0xffff;
175 /* Find CPU topology */
176 int get_cpu_count(void)
178 unsigned int num_virt_cores, num_phys_cores;
180 read_cpu_topology(&num_phys_cores, &num_virt_cores);
182 printk(BIOS_DEBUG, "Detected %u core, %u thread CPU.\n",
183 num_phys_cores, num_virt_cores);
185 return num_virt_cores;
188 void get_microcode_info(const void **microcode, int *parallel)
190 *microcode = intel_microcode_find();
191 *parallel = 1;
193 /* Make sure BSP is using the microcode from cbfs */
194 intel_microcode_load_unlocked(*microcode);
196 #endif
198 static void get_smm_info(uintptr_t *perm_smbase, size_t *perm_smsize,
199 size_t *smm_save_state_size)
201 void *smm_base;
202 size_t smm_size;
203 void *handler_base;
204 size_t handler_size;
206 /* All range registers are aligned to 4KiB */
207 const uint32_t rmask = ~((1 << 12) - 1);
209 /* Initialize global tracking state. */
210 smm_region_info(&smm_base, &smm_size);
211 smm_subregion(SMM_SUBREGION_HANDLER, &handler_base, &handler_size);
213 relo_attrs.smbase = (uint32_t)smm_base;
214 relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK;
215 relo_attrs.smrr_mask = ~(smm_size - 1) & rmask;
216 relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID;
218 *perm_smbase = (uintptr_t)handler_base;
219 *perm_smsize = handler_size;
220 *smm_save_state_size = sizeof(em64t100_smm_state_save_area_t);
223 static void relocation_handler(int cpu, uintptr_t curr_smbase,
224 uintptr_t staggered_smbase)
226 msr_t smrr;
227 em64t100_smm_state_save_area_t *smm_state;
228 /* Set up SMRR. */
229 smrr.lo = relo_attrs.smrr_base;
230 smrr.hi = 0;
231 wrmsr(IA32_SMRR_PHYS_BASE, smrr);
232 smrr.lo = relo_attrs.smrr_mask;
233 smrr.hi = 0;
234 wrmsr(IA32_SMRR_PHYS_MASK, smrr);
235 smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + curr_smbase);
236 smm_state->smbase = staggered_smbase;
239 * CPU initialization recipe
241 * Note that no microcode update is passed to the init function. CSE updates
242 * the microcode on all cores before releasing them from reset. That means that
243 * the BSP and all APs will come up with the same microcode revision.
246 static void post_mp_init(void)
248 smm_southbridge_enable(PWRBTN_EN | GBL_EN);
250 if (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_SGX))
251 mp_run_on_all_cpus(sgx_configure, NULL, 2000);
254 static const struct mp_ops mp_ops = {
255 .pre_mp_init = pre_mp_init,
256 .get_cpu_count = get_cpu_count,
257 .get_smm_info = get_smm_info,
258 .get_microcode_info = get_microcode_info,
259 .pre_mp_smm_init = smm_southbridge_clear_state,
260 .relocation_handler = relocation_handler,
261 .post_mp_init = post_mp_init,
264 void soc_init_cpus(struct bus *cpu_bus)
266 /* Clear for take-off */
267 if (mp_init_with_smm(cpu_bus, &mp_ops))
268 printk(BIOS_ERR, "MP initialization failure.\n");
271 void apollolake_init_cpus(struct device *dev)
273 if (IS_ENABLED(CONFIG_SOC_INTEL_COMMON_BLOCK_CPU_MPINIT))
274 return;
275 soc_init_cpus(dev->link_list);
277 /* Temporarily cache the memory-mapped boot media. */
278 if (IS_ENABLED(CONFIG_BOOT_DEVICE_MEMORY_MAPPED) &&
279 IS_ENABLED(CONFIG_BOOT_DEVICE_SPI_FLASH))
280 fast_spi_cache_bios_region();
283 void cpu_lock_sgx_memory(void)
285 /* Do nothing because MCHECK while loading microcode and enabling
286 * IA untrusted mode takes care of necessary locking */
289 int soc_fill_sgx_param(struct sgx_param *sgx_param)
291 struct device *dev = SA_DEV_ROOT;
292 assert(dev != NULL);
293 config_t *conf = dev->chip_info;
295 if (!conf) {
296 printk(BIOS_ERR, "Failed to get chip_info for SGX param\n");
297 return -1;
300 sgx_param->enable = conf->sgx_enable;
301 return 0;