soc/intel/xeon_sp/cpx: add CPUID for CPX-SP A1 processor
[coreboot.git] / src / soc / intel / xeon_sp / cpx / cpu.c
blobdb9dd4b3bda589aaa3489ed9717d9719ebf6de37
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <acpi/acpigen.h>
4 #include <acpi/acpi.h>
5 #include <assert.h>
6 #include <console/console.h>
7 #include <cpu/cpu.h>
8 #include <cpu/intel/microcode.h>
9 #include <cpu/intel/turbo.h>
10 #include <cpu/x86/lapic.h>
11 #include <cpu/x86/mp.h>
12 #include <cpu/x86/mtrr.h>
13 #include <intelblocks/cpulib.h>
14 #include <intelblocks/mp_init.h>
15 #include <soc/cpu.h>
16 #include <soc/msr.h>
17 #include <soc/soc_util.h>
18 #include "chip.h"
20 static const void *microcode_patch;
22 static const config_t *chip_config = NULL;
24 static void xeon_configure_mca(void)
26 msr_t msr;
27 struct cpuid_result cpuid_regs;
30 * Check feature flag in CPUID.(EAX=1):EDX[7]==1 MCE
31 * and CPUID.(EAX=1):EDX[14]==1 MCA
33 cpuid_regs = cpuid(1);
34 if ((cpuid_regs.edx & (1 << 7 | 1 << 14)) != (1 << 7 | 1 << 14))
35 return;
37 msr = rdmsr(IA32_MCG_CAP);
38 if (msr.lo & IA32_MCG_CAP_CTL_P_MASK) {
39 /* Enable all error logging */
40 msr.lo = msr.hi = 0xffffffff;
41 wrmsr(IA32_MCG_CTL, msr);
44 mca_configure();
48 void get_microcode_info(const void **microcode, int *parallel)
50 *microcode = intel_mp_current_microcode();
51 *parallel = 1;
54 const void *intel_mp_current_microcode(void)
56 return microcode_patch;
59 static void each_cpu_init(struct device *cpu)
61 msr_t msr;
63 printk(BIOS_SPEW, "%s dev: %s, cpu: %d, apic_id: 0x%x\n",
64 __func__, dev_path(cpu), cpu_index(), cpu->path.apic.apic_id);
65 setup_lapic();
68 * Set HWP base feature, EPP reg enumeration, lock thermal and msr
69 * This is package level MSR. Need to check if it updates correctly on
70 * multi-socket platform.
72 msr = rdmsr(MSR_MISC_PWR_MGMT);
73 if (!(msr.lo & LOCK_MISC_PWR_MGMT_MSR)) { /* if already locked skip update */
74 msr.lo = (HWP_ENUM_ENABLE | HWP_EPP_ENUM_ENABLE | LOCK_MISC_PWR_MGMT_MSR |
75 LOCK_THERM_INT);
76 wrmsr(MSR_MISC_PWR_MGMT, msr);
79 /* Enable Fast Strings */
80 msr = rdmsr(IA32_MISC_ENABLE);
81 msr.lo |= FAST_STRINGS_ENABLE_BIT;
82 wrmsr(IA32_MISC_ENABLE, msr);
83 /* Enable Turbo */
84 enable_turbo();
86 /* Enable speed step. */
87 if (get_turbo_state() == TURBO_ENABLED) {
88 msr = rdmsr(IA32_MISC_ENABLE);
89 msr.lo |= SPEED_STEP_ENABLE_BIT;
90 wrmsr(IA32_MISC_ENABLE, msr);
93 /* Clear out pending MCEs */
94 xeon_configure_mca();
97 static struct device_operations cpu_dev_ops = {
98 .init = each_cpu_init,
101 static const struct cpu_device_id cpu_table[] = {
102 {X86_VENDOR_INTEL, CPUID_COOPERLAKE_SP_A0},
103 {X86_VENDOR_INTEL, CPUID_COOPERLAKE_SP_A1},
104 {0, 0},
107 static const struct cpu_driver driver __cpu_driver = {
108 .ops = &cpu_dev_ops,
109 .id_table = cpu_table,
112 static void set_max_turbo_freq(void)
114 msr_t msr, perf_ctl;
116 FUNC_ENTER();
117 perf_ctl.hi = 0;
119 /* Check for configurable TDP option */
120 if (get_turbo_state() == TURBO_ENABLED) {
121 msr = rdmsr(MSR_TURBO_RATIO_LIMIT);
122 perf_ctl.lo = (msr.lo & 0xff) << 8;
123 } else if (cpu_config_tdp_levels()) {
124 /* Set to nominal TDP ratio */
125 msr = rdmsr(MSR_CONFIG_TDP_NOMINAL);
126 perf_ctl.lo = (msr.lo & 0xff) << 8;
127 } else {
128 /* Platform Info bits 15:8 give max ratio */
129 msr = rdmsr(MSR_PLATFORM_INFO);
130 perf_ctl.lo = msr.lo & 0xff00;
132 wrmsr(IA32_PERF_CTL, perf_ctl);
134 printk(BIOS_DEBUG, "cpu: frequency set to %d\n",
135 ((perf_ctl.lo >> 8) & 0xff) * CPU_BCLK);
136 FUNC_EXIT();
140 * Do essential initialization tasks before APs can be fired up
142 static void pre_mp_init(void)
144 x86_setup_mtrrs_with_detect();
145 x86_mtrr_check();
148 static int get_thread_count(void)
150 unsigned int num_phys = 0, num_virts = 0;
152 cpu_read_topology(&num_phys, &num_virts);
153 printk(BIOS_SPEW, "Detected %u cores and %u threads\n", num_phys, num_virts);
155 * Currently we do not know a way to figure out how many CPUs we have total
156 * on multi-socketed. So we pretend all sockets are populated with CPUs with
157 * same thread/core fusing.
158 * TODO: properly figure out number of active sockets OR refactor MPinit code
159 * to remove requirements of having to know total number of CPUs in advance.
161 return num_virts * CONFIG_MAX_SOCKET;
164 static void post_mp_init(void)
166 /* Set Max Ratio */
167 set_max_turbo_freq();
170 * TODO: Now that all APs have been relocated as well as the BSP let SMIs
171 * start flowing.
173 if (0) global_smi_enable();
176 static const struct mp_ops mp_ops = {
177 .pre_mp_init = pre_mp_init,
178 .get_cpu_count = get_thread_count,
179 .get_microcode_info = get_microcode_info,
180 .post_mp_init = post_mp_init,
183 void cpx_init_cpus(struct device *dev)
185 microcode_patch = intel_microcode_find();
187 if (!microcode_patch)
188 printk(BIOS_ERR, "microcode not found in CBFS!\n");
190 intel_microcode_load_unlocked(microcode_patch);
192 if (mp_init_with_smm(dev->link_list, &mp_ops) < 0)
193 printk(BIOS_ERR, "MP initialization failure.\n");
196 * chip_config is used in cpu device callback. Other than cpu 0,
197 * rest of the CPU devices do not have chip_info updated.
199 chip_config = dev->chip_info;
201 /* update numa domain for all cpu devices */
202 xeonsp_init_cpu_config();
205 msr_t read_msr_ppin(void)
207 msr_t ppin = {0};
208 msr_t msr;
210 /* If MSR_PLATFORM_INFO PPIN_CAP is 0, PPIN capability is not supported */
211 msr = rdmsr(MSR_PLATFORM_INFO);
212 if ((msr.lo & MSR_PPIN_CAP) == 0) {
213 printk(BIOS_ERR, "MSR_PPIN_CAP is 0, PPIN is not supported\n");
214 return ppin;
217 /* Access to MSR_PPIN is permitted only if MSR_PPIN_CTL LOCK is 0 and ENABLE is 1 */
218 msr = rdmsr(MSR_PPIN_CTL);
219 if (msr.lo & MSR_PPIN_CTL_LOCK) {
220 printk(BIOS_ERR, "MSR_PPIN_CTL_LOCK is 1, PPIN access is not allowed\n");
221 return ppin;
224 if ((msr.lo & MSR_PPIN_CTL_ENABLE) == 0) {
225 /* Set MSR_PPIN_CTL ENABLE to 1 */
226 msr.lo |= MSR_PPIN_CTL_ENABLE;
227 wrmsr(MSR_PPIN_CTL, msr);
229 ppin = rdmsr(MSR_PPIN);
230 /* Set enable to 0 after reading MSR_PPIN */
231 msr.lo &= ~MSR_PPIN_CTL_ENABLE;
232 wrmsr(MSR_PPIN_CTL, msr);
233 return ppin;