treewide: replace GPLv2 long form headers with SPDX header
[coreboot.git] / src / soc / intel / xeon_sp / skx / cpu.c
blob431a3bd7766fdc46154cb7aa46caad8513a39c1b
1 /* This file is part of the coreboot project. */
2 /* SPDX-License-Identifier: GPL-2.0-or-later */
4 #include <console/console.h>
5 #include <intelblocks/cpulib.h>
6 #include <cpu/cpu.h>
7 #include <cpu/x86/mtrr.h>
8 #include <cpu/x86/mp.h>
9 #include <cpu/intel/turbo.h>
10 #include <soc/msr.h>
11 #include <soc/cpu.h>
12 #include <soc/soc_util.h>
13 #include <assert.h>
14 #include "chip.h"
16 static const config_t *chip_config = NULL;
18 static void xeon_configure_mca(void)
20 msr_t msr;
21 struct cpuid_result cpuid_regs;
23 /* Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
24 * and CPUID.(EAX=1):EDX[14]==1 MCA*/
25 cpuid_regs = cpuid(1);
26 if ((cpuid_regs.edx & (1<<7 | 1<<14)) != (1<<7 | 1<<14))
27 return;
29 msr = rdmsr(IA32_MCG_CAP);
30 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
31 /* Enable all error logging */
32 msr.lo = msr.hi = 0xffffffff;
33 wrmsr(IA32_MCG_CTL, msr);
36 /* TODO(adurbin): This should only be done on a cold boot. Also, some
37 of these banks are core vs package scope. For now every CPU clears
38 every bank. */
39 mca_configure();
42 static void xeon_sp_core_init(struct device *cpu)
44 msr_t msr;
46 printk(BIOS_INFO, "%s dev: %s, cpu: %d, apic_id: 0x%x\n",
47 __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id);
48 assert(chip_config != NULL);
50 /* set MSR_PKG_CST_CONFIG_CONTROL - scope per core*/
51 msr.hi = 0;
52 msr.lo = (PKG_CSTATE_NO_LIMIT | IO_MWAIT_REDIRECTION_ENABLE | CFG_LOCK_ENABLE);
53 wrmsr(MSR_PKG_CST_CONFIG_CONTROL, msr);
55 /* set MSR_PMG_IO_CAPTURE_BASE - scope per core */
56 msr.hi = 0;
57 msr.lo = (LVL_2_BASE_ADDRESS | CST_RANGE_MAX_C6);
58 wrmsr(MSR_PMG_IO_CAPTURE_BASE, msr);
60 /* Enable Energy Perf Bias Access, Dynamic switching and lock MSR */
61 msr = rdmsr(MSR_POWER_CTL);
62 msr.lo |= (ENERGY_PERF_BIAS_ACCESS_ENABLE | PWR_PERF_TUNING_DYN_SWITCHING_ENABLE
63 | PROCHOT_LOCK_ENABLE);
64 wrmsr(MSR_POWER_CTL, msr);
66 /* Set P-State ratio */
67 msr = rdmsr(MSR_IA32_PERF_CTRL);
68 msr.lo &= ~PSTATE_REQ_MASK;
69 msr.lo |= (chip_config->pstate_req_ratio << PSTATE_REQ_SHIFT);
70 wrmsr(MSR_IA32_PERF_CTRL, msr);
73 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
74 * TODO: Set LOCK_MISC_PWR_MGMT_MSR, Unexpected Exception if you
75 * lock & issue wrmsr on every thread
76 * This is package level MSR. Need to check if it updates correctly on
77 * multi-socket platform.
79 msr = rdmsr(MSR_MISC_PWR_MGMT);
80 if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */
81 msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR |
82 LOCK_THERM_INT);
83 wrmsr(MSR_MISC_PWR_MGMT, msr);
86 /* TODO MSR_VR_MISC_CONFIG */
88 /* Set current limit lock */
89 msr = rdmsr(MSR_VR_CURRENT_CONFIG);
90 msr.lo |= CURRENT_LIMIT_LOCK;
91 wrmsr(MSR_VR_CURRENT_CONFIG, msr);
93 /* Set Turbo Ratio Limits */
94 msr.lo = chip_config->turbo_ratio_limit & 0xffffffff;
95 msr.hi = (chip_config->turbo_ratio_limit >> 32) & 0xffffffff;
96 wrmsr(MSR_TURBO_RATIO_LIMIT, msr);
98 /* Set Turbo Ratio Limit Cores */
99 msr.lo = chip_config->turbo_ratio_limit_cores & 0xffffffff;
100 msr.hi = (chip_config->turbo_ratio_limit_cores >> 32) & 0xffffffff;
101 wrmsr(MSR_TURBO_RATIO_LIMIT_CORES, msr);
103 /* set Turbo Activation ratio */
104 msr.hi = 0;
105 msr = rdmsr(MSR_TURBO_ACTIVATION_RATIO);
106 msr.lo |= MAX_NON_TURBO_RATIO;
107 wrmsr(MSR_TURBO_ACTIVATION_RATIO, msr);
109 /* Enable Fast Strings */
110 msr = rdmsr(IA32_MISC_ENABLE);
111 msr.lo |= FAST_STRINGS_ENABLE_BIT;
112 wrmsr(IA32_MISC_ENABLE, msr);
114 /* Set energy policy */
115 msr_t msr1 = rdmsr(MSR_ENERGY_PERF_BIAS_CONFIG);
116 msr.lo = (msr1.lo & EPB_ENERGY_POLICY_MASK) >> EPB_ENERGY_POLICY_SHIFT;
117 msr.hi = 0;
118 wrmsr(MSR_IA32_ENERGY_PERF_BIAS, msr);
120 /* Enable Turbo */
121 enable_turbo();
123 /* Enable speed step. */
124 if (get_turbo_state() == TURBO_ENABLED) {
125 msr = rdmsr(IA32_MISC_ENABLE);
126 msr.lo |= SPEED_STEP_ENABLE_BIT;
127 wrmsr(IA32_MISC_ENABLE, msr);
130 /* Clear out pending MCEs */
131 xeon_configure_mca();
134 static struct device_operations cpu_dev_ops = {
135 .init = xeon_sp_core_init,
138 static const struct cpu_device_id cpu_table[] = {
139 /* Skylake-SP A0/A1 CPUID 0x506f0*/
140 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_A0_A1},
141 /* Skylake-SP B0 CPUID 0x506f1*/
142 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_B0},
143 /* Skylake-SP 4 CPUID 0x50654*/
144 {X86_VENDOR_INTEL, CPUID_SKYLAKE_SP_4},
145 {0, 0},
148 static const struct cpu_driver driver __cpu_driver = {
149 .ops = &cpu_dev_ops,
150 .id_table = cpu_table,
153 static void set_max_turbo_freq(void)
155 msr_t msr, perf_ctl;
157 FUNC_ENTER();
158 perf_ctl.hi = 0;
160 /* Check for configurable TDP option */
161 if (get_turbo_state() == TURBO_ENABLED) {
162 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
163 perf_ctl.lo = (msr.lo & 0xff) << 8;
164 } else if (cpu_config_tdp_levels()) {
165 /* Set to nominal TDP ratio */
166 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
167 perf_ctl.lo = (msr.lo & 0xff) << 8;
168 } else {
169 /* Platform Info bits 15:8 give max ratio */
170 msr = rdmsr(MSR_PLATFORM_INFO);
171 perf_ctl.lo = msr.lo & 0xff00;
173 wrmsr(IA32_PERF_CTL, perf_ctl);
175 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
176 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
177 FUNC_EXIT();
181 * Do essential initialization tasks before APs can be fired up
183 * Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
184 * creates the MTRR solution that the APs will use. Otherwise APs will try to
185 * apply the incomplete solution as the BSP is calculating it.
187 static void pre_mp_init(void)
189 printk(BIOS_DEBUG, "%s: entry\n", __func__);
191 x86_setup_fixed_mtrrs();
194 static void post_mp_init(void)
196 /* Set Max Ratio */
197 set_max_turbo_freq();
200 * TODO: Now that all APs have been relocated as well as the BSP let SMIs
201 * start flowing.
206 * CPU initialization recipe
208 * Note that no microcode update is passed to the init function. CSE updates
209 * the microcode on all cores before releasing them from reset. That means that
210 * the BSP and all APs will come up with the same microcode revision.
212 static const struct mp_ops mp_ops = {
213 .pre_mp_init = pre_mp_init,
214 .get_cpu_count = get_platform_thread_count,
215 //.get_smm_info = get_smm_info, /* TODO */
216 .get_smm_info = NULL,
217 //.pre_mp_smm_init = southcluster_smm_clear_state, /* TODO */
218 .pre_mp_smm_init = NULL,
219 //.relocation_handler = relocation_handler, /* TODO */
220 .relocation_handler = NULL,
221 .post_mp_init = post_mp_init,
225 void xeon_sp_init_cpus(struct device *dev)
227 FUNC_ENTER();
230 * This gets used in cpu device callback. Other than cpu 0,
231 * rest of the CPU devices do not have
232 * chip_info updated. Global chip_config is used as workaround
234 chip_config = dev->chip_info;
236 config_reset_cpl3_csrs();
238 /* calls src/cpu/x86/mp_init.c */
239 if (mp_init_with_smm(dev->link_list, &mp_ops) < 0)
240 printk(BIOS_ERR, "MP initialization failure.\n");
242 /* update numa domain for all cpu devices */
243 xeonsp_init_cpu_config();
245 FUNC_EXIT();