2 * This file is part of the coreboot project.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
16 #include <bootstate.h>
17 #include <console/console.h>
18 #include <device/device.h>
19 #include <device/pci.h>
20 #include <cpu/x86/mtrr.h>
21 #include <cpu/x86/msr.h>
22 #include <cpu/x86/lapic.h>
23 #include <cpu/x86/mp.h>
24 #include <cpu/intel/common/common.h>
25 #include <cpu/intel/microcode.h>
26 #include <cpu/intel/speedstep.h>
27 #include <cpu/intel/turbo.h>
28 #include <cpu/x86/cache.h>
29 #include <cpu/x86/name.h>
30 #include <cpu/x86/smm.h>
31 #include <cpu/intel/smm_reloc.h>
32 #include <intelblocks/cpulib.h>
33 #include <intelblocks/fast_spi.h>
34 #include <intelblocks/mp_init.h>
35 #include <intelblocks/sgx.h>
38 #include <soc/pci_devs.h>
40 #include <soc/ramstage.h>
41 #include <soc/systemagent.h>
46 /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
47 static const u8 power_limit_time_sec_to_msr
[] = {
75 /* Convert POWER_LIMIT_1_TIME MSR value to seconds */
76 static const u8 power_limit_time_msr_to_sec
[] = {
105 * Configure processor power limits if possible
106 * This must be done AFTER set of BIOS_RESET_CPL
108 void set_power_limits(u8 power_limit_1_time
)
110 msr_t msr
= rdmsr(MSR_PLATFORM_INFO
);
112 unsigned int power_unit
;
113 unsigned int tdp
, min_power
, max_power
, max_time
, tdp_pl2
, tdp_pl1
;
114 u8 power_limit_1_val
;
116 config_t
*conf
= config_of_soc();
118 if (power_limit_1_time
>= ARRAY_SIZE(power_limit_time_sec_to_msr
))
119 power_limit_1_time
= ARRAY_SIZE(power_limit_time_sec_to_msr
) - 1;
121 if (!(msr
.lo
& PLATFORM_INFO_SET_TDP
))
125 msr
= rdmsr(MSR_PKG_POWER_SKU_UNIT
);
126 power_unit
= 1 << (msr
.lo
& 0xf);
128 /* Get power defaults for this SKU */
129 msr
= rdmsr(MSR_PKG_POWER_SKU
);
130 tdp
= msr
.lo
& 0x7fff;
131 min_power
= (msr
.lo
>> 16) & 0x7fff;
132 max_power
= msr
.hi
& 0x7fff;
133 max_time
= (msr
.hi
>> 16) & 0x7f;
135 printk(BIOS_DEBUG
, "CPU TDP: %u Watts\n", tdp
/ power_unit
);
137 if (power_limit_time_msr_to_sec
[max_time
] > power_limit_1_time
)
138 power_limit_1_time
= power_limit_time_msr_to_sec
[max_time
];
140 if (min_power
> 0 && tdp
< min_power
)
143 if (max_power
> 0 && tdp
> max_power
)
146 power_limit_1_val
= power_limit_time_sec_to_msr
[power_limit_1_time
];
148 /* Set long term power limit to TDP */
150 tdp_pl1
= ((conf
->tdp_pl1_override
== 0) ?
151 tdp
: (conf
->tdp_pl1_override
* power_unit
));
152 limit
.lo
|= (tdp_pl1
& PKG_POWER_LIMIT_MASK
);
154 /* Set PL1 Pkg Power clamp bit */
155 limit
.lo
|= PKG_POWER_LIMIT_CLAMP
;
157 limit
.lo
|= PKG_POWER_LIMIT_EN
;
158 limit
.lo
|= (power_limit_1_val
& PKG_POWER_LIMIT_TIME_MASK
) <<
159 PKG_POWER_LIMIT_TIME_SHIFT
;
161 /* Set short term power limit to 1.25 * TDP if no config given */
163 tdp_pl2
= (conf
->tdp_pl2_override
== 0) ?
164 (tdp
* 125) / 100 : (conf
->tdp_pl2_override
* power_unit
);
165 printk(BIOS_DEBUG
, "CPU PL2 = %u Watts\n", tdp_pl2
/ power_unit
);
166 limit
.hi
|= (tdp_pl2
) & PKG_POWER_LIMIT_MASK
;
167 limit
.hi
|= PKG_POWER_LIMIT_CLAMP
;
168 limit
.hi
|= PKG_POWER_LIMIT_EN
;
170 /* Power limit 2 time is only programmable on server SKU */
171 wrmsr(MSR_PKG_POWER_LIMIT
, limit
);
173 /* Set PL2 power limit values in MCHBAR and disable PL1 */
174 MCHBAR32(MCH_PKG_POWER_LIMIT_LO
) = limit
.lo
& (~(PKG_POWER_LIMIT_EN
));
175 MCHBAR32(MCH_PKG_POWER_LIMIT_HI
) = limit
.hi
;
178 if (conf
->tdp_psyspl2
) {
179 limit
= rdmsr(MSR_PLATFORM_POWER_LIMIT
);
181 printk(BIOS_DEBUG
, "CPU PsysPL2 = %u Watts\n",
183 limit
.hi
|= (conf
->tdp_psyspl2
* power_unit
) &
184 PKG_POWER_LIMIT_MASK
;
185 limit
.hi
|= PKG_POWER_LIMIT_CLAMP
;
186 limit
.hi
|= PKG_POWER_LIMIT_EN
;
188 wrmsr(MSR_PLATFORM_POWER_LIMIT
, limit
);
192 if (conf
->tdp_psyspl3
) {
193 limit
= rdmsr(MSR_PL3_CONTROL
);
195 printk(BIOS_DEBUG
, "CPU PsysPL3 = %u Watts\n",
197 limit
.lo
|= (conf
->tdp_psyspl3
* power_unit
) &
198 PKG_POWER_LIMIT_MASK
;
200 limit
.lo
|= PKG_POWER_LIMIT_EN
;
201 /* set PsysPl3 time window */
202 limit
.lo
|= (conf
->tdp_psyspl3_time
&
203 PKG_POWER_LIMIT_TIME_MASK
) <<
204 PKG_POWER_LIMIT_TIME_SHIFT
;
205 /* set PsysPl3 duty cycle */
206 limit
.lo
|= (conf
->tdp_psyspl3_dutycycle
&
207 PKG_POWER_LIMIT_DUTYCYCLE_MASK
) <<
208 PKG_POWER_LIMIT_DUTYCYCLE_SHIFT
;
209 wrmsr(MSR_PL3_CONTROL
, limit
);
214 limit
= rdmsr(MSR_VR_CURRENT_CONFIG
);
216 printk(BIOS_DEBUG
, "CPU PL4 = %u Watts\n",
218 limit
.lo
|= (conf
->tdp_pl4
* power_unit
) &
219 PKG_POWER_LIMIT_MASK
;
220 wrmsr(MSR_VR_CURRENT_CONFIG
, limit
);
223 /* Set DDR RAPL power limit by copying from MMIO to MSR */
224 msr
.lo
= MCHBAR32(MCH_DDR_POWER_LIMIT_LO
);
225 msr
.hi
= MCHBAR32(MCH_DDR_POWER_LIMIT_HI
);
226 wrmsr(MSR_DDR_RAPL_LIMIT
, msr
);
228 /* Use nominal TDP values for CPUs with configurable TDP */
229 if (cpu_config_tdp_levels()) {
231 limit
.lo
= cpu_get_tdp_nominal_ratio();
232 wrmsr(MSR_TURBO_ACTIVATION_RATIO
, limit
);
236 static void configure_thermal_target(void)
238 config_t
*conf
= config_of_soc();
242 /* Set TCC activation offset if supported */
243 msr
= rdmsr(MSR_PLATFORM_INFO
);
244 if ((msr
.lo
& (1 << 30)) && conf
->tcc_offset
) {
245 msr
= rdmsr(MSR_TEMPERATURE_TARGET
);
246 msr
.lo
&= ~(0xf << 24); /* Bits 27:24 */
247 msr
.lo
|= (conf
->tcc_offset
& 0xf) << 24;
248 wrmsr(MSR_TEMPERATURE_TARGET
, msr
);
250 msr
= rdmsr(MSR_TEMPERATURE_TARGET
);
251 msr
.lo
&= ~0x7f; /* Bits 6:0 */
252 msr
.lo
|= 0xe6; /* setting 100ms thermal time window */
253 wrmsr(MSR_TEMPERATURE_TARGET
, msr
);
256 static void configure_isst(void)
258 config_t
*conf
= config_of_soc();
262 if (conf
->speed_shift_enable
) {
264 * Kernel driver checks CPUID.06h:EAX[Bit 7] to determine if HWP
265 is supported or not. coreboot needs to configure MSR 0x1AA
266 which is then reflected in the CPUID register.
268 msr
= rdmsr(MSR_MISC_PWR_MGMT
);
269 msr
.lo
|= MISC_PWR_MGMT_ISST_EN
; /* Enable Speed Shift */
270 msr
.lo
|= MISC_PWR_MGMT_ISST_EN_INT
; /* Enable Interrupt */
271 msr
.lo
|= MISC_PWR_MGMT_ISST_EN_EPP
; /* Enable EPP */
272 wrmsr(MSR_MISC_PWR_MGMT
, msr
);
274 msr
= rdmsr(MSR_MISC_PWR_MGMT
);
275 msr
.lo
&= ~MISC_PWR_MGMT_ISST_EN
; /* Disable Speed Shift */
276 msr
.lo
&= ~MISC_PWR_MGMT_ISST_EN_INT
; /* Disable Interrupt */
277 msr
.lo
&= ~MISC_PWR_MGMT_ISST_EN_EPP
; /* Disable EPP */
278 wrmsr(MSR_MISC_PWR_MGMT
, msr
);
282 static void configure_misc(void)
284 config_t
*conf
= config_of_soc();
288 msr
= rdmsr(IA32_MISC_ENABLE
);
289 msr
.lo
|= (1 << 0); /* Fast String enable */
290 msr
.lo
|= (1 << 3); /* TM1/TM2/EMTTM enable */
291 wrmsr(IA32_MISC_ENABLE
, msr
);
293 /* Set EIST status */
294 cpu_set_eist(conf
->eist_enable
);
296 /* Disable Thermal interrupts */
299 wrmsr(IA32_THERM_INTERRUPT
, msr
);
301 /* Enable package critical interrupt only */
304 wrmsr(IA32_PACKAGE_THERM_INTERRUPT
, msr
);
306 msr
= rdmsr(MSR_POWER_CTL
);
307 msr
.lo
|= (1 << 0); /* Enable Bi-directional PROCHOT as an input*/
308 msr
.lo
|= (1 << 18); /* Enable Energy/Performance Bias control */
309 msr
.lo
&= ~POWER_CTL_C1E_MASK
; /* Disable C1E */
310 msr
.lo
|= (1 << 23); /* Lock it */
311 wrmsr(MSR_POWER_CTL
, msr
);
314 static void enable_lapic_tpr(void)
318 msr
= rdmsr(MSR_PIC_MSG_CONTROL
);
319 msr
.lo
&= ~(1 << 10); /* Enable APIC TPR updates */
320 wrmsr(MSR_PIC_MSG_CONTROL
, msr
);
323 static void configure_dca_cap(void)
325 uint32_t feature_flag
;
328 /* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
329 feature_flag
= cpu_get_feature_flags_ecx();
330 if (feature_flag
& CPUID_DCA
) {
331 msr
= rdmsr(IA32_PLATFORM_DCA_CAP
);
333 wrmsr(IA32_PLATFORM_DCA_CAP
, msr
);
337 static void set_energy_perf_bias(u8 policy
)
342 /* Determine if energy efficient policy is supported. */
343 ecx
= cpuid_ecx(0x6);
344 if (!(ecx
& (1 << 3)))
347 /* Energy Policy is bits 3:0 */
348 msr
= rdmsr(IA32_ENERGY_PERF_BIAS
);
350 msr
.lo
|= policy
& 0xf;
351 wrmsr(IA32_ENERGY_PERF_BIAS
, msr
);
353 printk(BIOS_DEBUG
, "cpu: energy policy set to %u\n", policy
);
356 static void configure_c_states(void)
360 /* C-state Interrupt Response Latency Control 0 - package C3 latency */
362 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
| C_STATE_LATENCY_CONTROL_0_LIMIT
;
363 wrmsr(MSR_C_STATE_LATENCY_CONTROL_0
, msr
);
365 /* C-state Interrupt Response Latency Control 1 - package C6/C7 short */
367 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
| C_STATE_LATENCY_CONTROL_1_LIMIT
;
368 wrmsr(MSR_C_STATE_LATENCY_CONTROL_1
, msr
);
370 /* C-state Interrupt Response Latency Control 2 - package C6/C7 long */
372 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
| C_STATE_LATENCY_CONTROL_2_LIMIT
;
373 wrmsr(MSR_C_STATE_LATENCY_CONTROL_2
, msr
);
375 /* C-state Interrupt Response Latency Control 3 - package C8 */
377 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
|
378 C_STATE_LATENCY_CONTROL_3_LIMIT
;
379 wrmsr(MSR_C_STATE_LATENCY_CONTROL_3
, msr
);
381 /* C-state Interrupt Response Latency Control 4 - package C9 */
383 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
|
384 C_STATE_LATENCY_CONTROL_4_LIMIT
;
385 wrmsr(MSR_C_STATE_LATENCY_CONTROL_4
, msr
);
387 /* C-state Interrupt Response Latency Control 5 - package C10 */
389 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
|
390 C_STATE_LATENCY_CONTROL_5_LIMIT
;
391 wrmsr(MSR_C_STATE_LATENCY_CONTROL_5
, msr
);
395 * The emulated ACPI timer allows disabling of the ACPI timer
396 * (PM1_TMR) to have no impart on the system.
398 static void enable_pm_timer_emulation(void)
400 /* ACPI PM timer emulation */
403 * The derived frequency is calculated as follows:
404 * (CTC_FREQ * msr[63:32]) >> 32 = target frequency.
405 * Back solve the multiplier so the 3.579545MHz ACPI timer
408 msr
.hi
= (3579545ULL << 32) / CTC_FREQ
;
409 /* Set PM1 timer IO port and enable */
410 msr
.lo
= (EMULATE_DELAY_VALUE
<< EMULATE_DELAY_OFFSET_VALUE
) |
411 EMULATE_PM_TMR_EN
| (ACPI_BASE_ADDRESS
+ PM1_TMR
);
412 wrmsr(MSR_EMULATE_PM_TIMER
, msr
);
416 * Lock AES-NI (MSR_FEATURE_CONFIG) to prevent unintended disabling
417 * as suggested in Intel document 325384-070US.
419 static void cpu_lock_aesni(void)
423 /* Only run once per core as specified in the MSR datasheet */
424 if (intel_ht_sibling())
427 msr
= rdmsr(MSR_FEATURE_CONFIG
);
428 if ((msr
.lo
& 1) == 0) {
430 wrmsr(MSR_FEATURE_CONFIG
, msr
);
434 /* All CPUs including BSP will run the following function. */
435 void soc_core_init(struct device
*cpu
)
437 /* Clear out pending MCEs */
438 /* TODO(adurbin): This should only be done on a cold boot. Also, some
439 * of these banks are core vs package scope. For now every CPU clears
443 /* Enable the local CPU apics */
447 /* Configure c-state interrupt response time */
448 configure_c_states();
450 /* Configure Enhanced SpeedStep and Thermal Sensors */
453 /* Configure Intel Speed Shift */
456 /* Lock AES-NI MSR */
459 /* Enable ACPI Timer Emulation via MSR 0x121 */
460 enable_pm_timer_emulation();
462 /* Enable Direct Cache Access */
465 /* Set energy policy */
466 set_energy_perf_bias(ENERGY_POLICY_NORMAL
);
471 /* Configure Core PRMRR for SGX. */
472 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE
))
473 prmrr_core_configure();
476 static void per_cpu_smm_trigger(void)
478 /* Relocate the SMM handler. */
482 static void vmx_configure(void *unused
)
484 set_feature_ctrl_vmx();
487 static void fc_lock_configure(void *unused
)
489 set_feature_ctrl_lock();
492 static void post_mp_init(void)
500 * Now that all APs have been relocated as well as the BSP let SMIs
503 smm_southbridge_enable(GBL_EN
);
505 /* Lock down the SMRAM space. */
506 if (CONFIG(HAVE_SMI_HANDLER
))
509 ret
|= mp_run_on_all_cpus(vmx_configure
, NULL
);
511 if (CONFIG(SOC_INTEL_COMMON_BLOCK_SGX_ENABLE
))
512 ret
|= mp_run_on_all_cpus(sgx_configure
, NULL
);
514 ret
|= mp_run_on_all_cpus(fc_lock_configure
, NULL
);
517 printk(BIOS_CRIT
, "CRITICAL ERROR: MP post init failed\n");
520 static const struct mp_ops mp_ops
= {
522 * Skip Pre MP init MTRR programming as MTRRs are mirrored from BSP,
523 * that are set prior to ramstage.
524 * Real MTRRs programming are being done after resource allocation.
526 .pre_mp_init
= soc_fsp_load
,
527 .get_cpu_count
= get_cpu_count
,
528 .get_smm_info
= smm_info
,
529 .get_microcode_info
= get_microcode_info
,
530 .pre_mp_smm_init
= smm_initialize
,
531 .per_cpu_smm_trigger
= per_cpu_smm_trigger
,
532 .relocation_handler
= smm_relocation_handler
,
533 .post_mp_init
= post_mp_init
,
536 void soc_init_cpus(struct bus
*cpu_bus
)
538 if (mp_init_with_smm(cpu_bus
, &mp_ops
))
539 printk(BIOS_ERR
, "MP initialization failure.\n");
541 /* Thermal throttle activation offset */
542 configure_thermal_target();
545 int soc_skip_ucode_update(u32 current_patch_id
, u32 new_patch_id
)
551 * If PRMRR/SGX is supported the FIT microcode load will set the msr
552 * 0x08b with the Patch revision id one less than the id in the
553 * microcode binary. The PRMRR support is indicated in the MSR
554 * MTRRCAP[12]. If SGX is not enabled, check and avoid reloading the
555 * same microcode during CPU initialization. If SGX is enabled, as
556 * part of SGX BIOS initialization steps, the same microcode needs to
557 * be reloaded after the core PRMRR MSRs are programmed.
559 msr1
= rdmsr(MTRR_CAP_MSR
);
560 msr2
= rdmsr(MSR_PRMRR_PHYS_BASE
);
561 if (msr2
.lo
&& (current_patch_id
== new_patch_id
- 1))
564 return (msr1
.lo
& PRMRR_SUPPORTED
) &&
565 (current_patch_id
== new_patch_id
- 1);