2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2009 coresystems GmbH
5 * Copyright (C) 2011 The ChromiumOS Authors. All rights reserved.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; version 2 of
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
23 #include <console/console.h>
24 #include <device/device.h>
26 #include <arch/acpi.h>
28 #include <cpu/x86/mtrr.h>
29 #include <cpu/x86/msr.h>
30 #include <cpu/x86/lapic.h>
31 #include <cpu/intel/microcode.h>
32 #include <cpu/intel/speedstep.h>
33 #include <cpu/intel/turbo.h>
34 #include <cpu/x86/cache.h>
35 #include <cpu/x86/name.h>
36 #include <cpu/x86/smm.h>
38 #include <pc80/mc146818rtc.h>
39 #include <northbridge/intel/haswell/haswell.h>
40 #include <southbridge/intel/lynxpoint/pch.h>
44 /* Intel suggested latency times in units of 1024ns. */
45 #define C_STATE_LATENCY_CONTROL_0_LIMIT 0x42
46 #define C_STATE_LATENCY_CONTROL_1_LIMIT 0x73
47 #define C_STATE_LATENCY_CONTROL_2_LIMIT 0x91
48 #define C_STATE_LATENCY_CONTROL_3_LIMIT 0xe4
49 #define C_STATE_LATENCY_CONTROL_4_LIMIT 0x145
50 #define C_STATE_LATENCY_CONTROL_5_LIMIT 0x1ef
52 #define C_STATE_LATENCY_MICRO_SECONDS(limit, base) \
53 (((1 << ((base)*5)) * (limit)) / 1000)
54 #define C_STATE_LATENCY_FROM_LAT_REG(reg) \
55 C_STATE_LATENCY_MICRO_SECONDS(C_STATE_LATENCY_CONTROL_ ##reg## _LIMIT, \
59 * List of supported C-states in this processor. Only the ULT parts support C8,
67 C_STATE_C6_SHORT_LAT
, /* 4 */
68 C_STATE_C6_LONG_LAT
, /* 5 */
69 C_STATE_C7_SHORT_LAT
, /* 6 */
70 C_STATE_C7_LONG_LAT
, /* 7 */
71 C_STATE_C7S_SHORT_LAT
, /* 8 */
72 C_STATE_C7S_LONG_LAT
, /* 9 */
79 #define MWAIT_RES(state, sub_state) \
81 .addrl = (((state) << 4) | (sub_state)), \
82 .space_id = ACPI_ADDRESS_SPACE_FIXED, \
83 .bit_width = ACPI_FFIXEDHW_VENDOR_INTEL, \
84 .bit_offset = ACPI_FFIXEDHW_CLASS_MWAIT, \
85 .access_size = ACPI_FFIXEDHW_FLAG_HW_COORD, \
88 static acpi_cstate_t cstate_map
[NUM_C_STATES
] = {
93 .resource
= MWAIT_RES(0,0),
98 .resource
= MWAIT_RES(0,1),
101 .latency
= C_STATE_LATENCY_FROM_LAT_REG(0),
103 .resource
= MWAIT_RES(1, 0),
105 [C_STATE_C6_SHORT_LAT
] = {
106 .latency
= C_STATE_LATENCY_FROM_LAT_REG(1),
108 .resource
= MWAIT_RES(2, 0),
110 [C_STATE_C6_LONG_LAT
] = {
111 .latency
= C_STATE_LATENCY_FROM_LAT_REG(2),
113 .resource
= MWAIT_RES(2, 1),
115 [C_STATE_C7_SHORT_LAT
] = {
116 .latency
= C_STATE_LATENCY_FROM_LAT_REG(1),
118 .resource
= MWAIT_RES(3, 0),
120 [C_STATE_C7_LONG_LAT
] = {
121 .latency
= C_STATE_LATENCY_FROM_LAT_REG(2),
123 .resource
= MWAIT_RES(3, 1),
125 [C_STATE_C7S_SHORT_LAT
] = {
126 .latency
= C_STATE_LATENCY_FROM_LAT_REG(1),
128 .resource
= MWAIT_RES(3, 2),
130 [C_STATE_C7S_LONG_LAT
] = {
131 .latency
= C_STATE_LATENCY_FROM_LAT_REG(2),
133 .resource
= MWAIT_RES(3, 3),
136 .latency
= C_STATE_LATENCY_FROM_LAT_REG(3),
138 .resource
= MWAIT_RES(4, 0),
141 .latency
= C_STATE_LATENCY_FROM_LAT_REG(4),
143 .resource
= MWAIT_RES(5, 0),
146 .latency
= C_STATE_LATENCY_FROM_LAT_REG(5),
148 .resource
= MWAIT_RES(6, 0),
152 /* Convert time in seconds to POWER_LIMIT_1_TIME MSR value */
153 static const u8 power_limit_time_sec_to_msr
[] = {
181 /* Convert POWER_LIMIT_1_TIME MSR value to seconds */
182 static const u8 power_limit_time_msr_to_sec
[] = {
210 int haswell_family_model(void)
212 return cpuid_eax(1) & 0x0fff0ff0;
215 int haswell_stepping(void)
217 return cpuid_eax(1) & 0xf;
220 /* Dynamically determine if the part is ULT. */
221 int haswell_is_ult(void)
226 ult
= !!(haswell_family_model() == HASWELL_FAMILY_ULT
);
231 /* The core 100MHz BLCK is disabled in deeper c-states. One needs to calibrate
232 * the 100MHz BCLCK against the 24MHz BLCK to restore the clocks properly
233 * when a core is woken up. */
234 static int pcode_ready(void)
237 const int delay_step
= 10;
241 if (!(MCHBAR32(BIOS_MAILBOX_INTERFACE
) & MAILBOX_RUN_BUSY
))
243 wait_count
+= delay_step
;
245 } while (wait_count
< 1000);
250 static void calibrate_24mhz_bclk(void)
254 if (pcode_ready() < 0) {
255 printk(BIOS_ERR
, "PCODE: mailbox timeout on wait ready.\n");
259 /* A non-zero value initiates the PCODE calibration. */
260 MCHBAR32(BIOS_MAILBOX_DATA
) = ~0;
261 MCHBAR32(BIOS_MAILBOX_INTERFACE
) =
262 MAILBOX_RUN_BUSY
| MAILBOX_BIOS_CMD_FSM_MEASURE_INTVL
;
264 if (pcode_ready() < 0) {
265 printk(BIOS_ERR
, "PCODE: mailbox timeout on completion.\n");
269 err_code
= MCHBAR32(BIOS_MAILBOX_INTERFACE
) & 0xff;
271 printk(BIOS_DEBUG
, "PCODE: 24MHz BLCK calibration response: %d\n",
274 /* Read the calibrated value. */
275 MCHBAR32(BIOS_MAILBOX_INTERFACE
) =
276 MAILBOX_RUN_BUSY
| MAILBOX_BIOS_CMD_READ_CALIBRATION
;
278 if (pcode_ready() < 0) {
279 printk(BIOS_ERR
, "PCODE: mailbox timeout on read.\n");
283 printk(BIOS_DEBUG
, "PCODE: 24MHz BLCK calibration value: 0x%08x\n",
284 MCHBAR32(BIOS_MAILBOX_DATA
));
287 static u32
pcode_mailbox_read(u32 command
)
289 if (pcode_ready() < 0) {
290 printk(BIOS_ERR
, "PCODE: mailbox timeout on wait ready.\n");
294 /* Send command and start transaction */
295 MCHBAR32(BIOS_MAILBOX_INTERFACE
) = command
| MAILBOX_RUN_BUSY
;
297 if (pcode_ready() < 0) {
298 printk(BIOS_ERR
, "PCODE: mailbox timeout on completion.\n");
303 return MCHBAR32(BIOS_MAILBOX_DATA
);
306 static void initialize_vr_config(void)
310 printk(BIOS_DEBUG
, "Initializing VR config.\n");
312 /* Configure VR_CURRENT_CONFIG. */
313 msr
= rdmsr(MSR_VR_CURRENT_CONFIG
);
314 /* Preserve bits 63 and 62. Bit 62 is PSI4 enable, but it is only valid
316 msr
.hi
&= 0xc0000000;
317 msr
.hi
|= (0x01 << (52 - 32)); /* PSI3 threshold - 1A. */
318 msr
.hi
|= (0x05 << (42 - 32)); /* PSI2 threshold - 5A. */
319 msr
.hi
|= (0x0f << (32 - 32)); /* PSI1 threshold - 15A. */
321 if (haswell_is_ult())
322 msr
.hi
|= (1 << (62 - 32)); /* Enable PSI4 */
323 /* Leave the max instantaneous current limit (12:0) to default. */
324 wrmsr(MSR_VR_CURRENT_CONFIG
, msr
);
326 /* Configure VR_MISC_CONFIG MSR. */
327 msr
= rdmsr(MSR_VR_MISC_CONFIG
);
328 /* Set the IOUT_SLOPE scalar applied to dIout in U10.1.9 format. */
329 msr
.hi
&= ~(0x3ff << (40 - 32));
330 msr
.hi
|= (0x200 << (40 - 32)); /* 1.0 */
331 /* Set IOUT_OFFSET to 0. */
333 /* Set exit ramp rate to fast. */
334 msr
.hi
|= (1 << (50 - 32));
335 /* Set entry ramp rate to slow. */
336 msr
.hi
&= ~(1 << (51 - 32));
337 /* Enable decay mode on C-state entry. */
338 msr
.hi
|= (1 << (52 - 32));
339 /* Set the slow ramp rate to be fast ramp rate / 4 */
340 msr
.hi
&= ~(0x3 << (53 - 32));
341 msr
.hi
|= (0x01 << (53 - 32));
342 /* Set MIN_VID (31:24) to allow CPU to have full control. */
343 msr
.lo
&= ~0xff000000;
344 wrmsr(MSR_VR_MISC_CONFIG
, msr
);
346 /* Configure VR_MISC_CONFIG2 MSR. */
347 if (haswell_is_ult()) {
348 msr
= rdmsr(MSR_VR_MISC_CONFIG2
);
350 /* Allow CPU to control minimum voltage completely (15:8) and
351 * set the fast ramp voltage to 1110mV (0x6f in 10mV steps). */
353 wrmsr(MSR_VR_MISC_CONFIG2
, msr
);
357 static void configure_pch_power_sharing(void)
359 u32 pch_power
, pch_power_ext
, pmsync
, pmsync2
;
362 /* Read PCH Power levels from PCODE */
363 pch_power
= pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER
);
364 pch_power_ext
= pcode_mailbox_read(MAILBOX_BIOS_CMD_READ_PCH_POWER_EXT
);
366 printk(BIOS_INFO
, "PCH Power: PCODE Levels 0x%08x 0x%08x\n",
367 pch_power
, pch_power_ext
);
369 pmsync
= RCBA32(PMSYNC_CONFIG
);
370 pmsync2
= RCBA32(PMSYNC_CONFIG2
);
372 /* Program PMSYNC_TPR_CONFIG PCH power limit values
373 * pmsync[0:4] = mailbox[0:5]
374 * pmsync[8:12] = mailbox[6:11]
375 * pmsync[16:20] = mailbox[12:17]
377 for (i
= 0; i
< 3; i
++) {
378 u32 level
= pch_power
& 0x3f;
380 pmsync
&= ~(0x1f << (i
* 8));
381 pmsync
|= (level
& 0x1f) << (i
* 8);
383 RCBA32(PMSYNC_CONFIG
) = pmsync
;
385 /* Program PMSYNC_TPR_CONFIG2 Extended PCH power limit values
386 * pmsync2[0:4] = mailbox[23:18]
387 * pmsync2[8:12] = mailbox_ext[6:11]
388 * pmsync2[16:20] = mailbox_ext[12:17]
389 * pmsync2[24:28] = mailbox_ext[18:22]
392 pmsync2
|= pch_power
& 0x1f;
394 for (i
= 1; i
< 4; i
++) {
395 u32 level
= pch_power_ext
& 0x3f;
397 pmsync2
&= ~(0x1f << (i
* 8));
398 pmsync2
|= (level
& 0x1f) << (i
* 8);
400 RCBA32(PMSYNC_CONFIG2
) = pmsync2
;
403 int cpu_config_tdp_levels(void)
407 /* Bits 34:33 indicate how many levels supported */
408 platform_info
= rdmsr(MSR_PLATFORM_INFO
);
409 return (platform_info
.hi
>> 1) & 3;
413 * Configure processor power limits if possible
414 * This must be done AFTER set of BIOS_RESET_CPL
416 void set_power_limits(u8 power_limit_1_time
)
418 msr_t msr
= rdmsr(MSR_PLATFORM_INFO
);
421 unsigned tdp
, min_power
, max_power
, max_time
;
422 u8 power_limit_1_val
;
424 if (power_limit_1_time
> ARRAY_SIZE(power_limit_time_sec_to_msr
))
425 power_limit_1_time
= 28;
427 if (!(msr
.lo
& PLATFORM_INFO_SET_TDP
))
431 msr
= rdmsr(MSR_PKG_POWER_SKU_UNIT
);
432 power_unit
= 2 << ((msr
.lo
& 0xf) - 1);
434 /* Get power defaults for this SKU */
435 msr
= rdmsr(MSR_PKG_POWER_SKU
);
436 tdp
= msr
.lo
& 0x7fff;
437 min_power
= (msr
.lo
>> 16) & 0x7fff;
438 max_power
= msr
.hi
& 0x7fff;
439 max_time
= (msr
.hi
>> 16) & 0x7f;
441 printk(BIOS_DEBUG
, "CPU TDP: %u Watts\n", tdp
/ power_unit
);
443 if (power_limit_time_msr_to_sec
[max_time
] > power_limit_1_time
)
444 power_limit_1_time
= power_limit_time_msr_to_sec
[max_time
];
446 if (min_power
> 0 && tdp
< min_power
)
449 if (max_power
> 0 && tdp
> max_power
)
452 power_limit_1_val
= power_limit_time_sec_to_msr
[power_limit_1_time
];
454 /* Set long term power limit to TDP */
456 limit
.lo
|= tdp
& PKG_POWER_LIMIT_MASK
;
457 limit
.lo
|= PKG_POWER_LIMIT_EN
;
458 limit
.lo
|= (power_limit_1_val
& PKG_POWER_LIMIT_TIME_MASK
) <<
459 PKG_POWER_LIMIT_TIME_SHIFT
;
461 /* Set short term power limit to 1.25 * TDP */
463 limit
.hi
|= ((tdp
* 125) / 100) & PKG_POWER_LIMIT_MASK
;
464 limit
.hi
|= PKG_POWER_LIMIT_EN
;
465 /* Power limit 2 time is only programmable on server SKU */
467 wrmsr(MSR_PKG_POWER_LIMIT
, limit
);
469 /* Set power limit values in MCHBAR as well */
470 MCHBAR32(MCH_PKG_POWER_LIMIT_LO
) = limit
.lo
;
471 MCHBAR32(MCH_PKG_POWER_LIMIT_HI
) = limit
.hi
;
473 /* Set DDR RAPL power limit by copying from MMIO to MSR */
474 msr
.lo
= MCHBAR32(MCH_DDR_POWER_LIMIT_LO
);
475 msr
.hi
= MCHBAR32(MCH_DDR_POWER_LIMIT_HI
);
476 wrmsr(MSR_DDR_RAPL_LIMIT
, msr
);
478 /* Use nominal TDP values for CPUs with configurable TDP */
479 if (cpu_config_tdp_levels()) {
480 msr
= rdmsr(MSR_CONFIG_TDP_NOMINAL
);
482 limit
.lo
= msr
.lo
& 0xff;
483 wrmsr(MSR_TURBO_ACTIVATION_RATIO
, limit
);
487 static void configure_c_states(void)
491 msr
= rdmsr(MSR_PMG_CST_CONFIG_CONTROL
);
492 msr
.lo
|= (1 << 30); // Package c-state Undemotion Enable
493 msr
.lo
|= (1 << 29); // Package c-state Demotion Enable
494 msr
.lo
|= (1 << 28); // C1 Auto Undemotion Enable
495 msr
.lo
|= (1 << 27); // C3 Auto Undemotion Enable
496 msr
.lo
|= (1 << 26); // C1 Auto Demotion Enable
497 msr
.lo
|= (1 << 25); // C3 Auto Demotion Enable
498 msr
.lo
&= ~(1 << 10); // Disable IO MWAIT redirection
499 /* The deepest package c-state defaults to factory-configured value. */
500 wrmsr(MSR_PMG_CST_CONFIG_CONTROL
, msr
);
502 msr
= rdmsr(MSR_PMG_IO_CAPTURE_BASE
);
504 msr
.lo
|= (get_pmbase() + 0x14); // LVL_2 base address
505 /* The deepest package c-state defaults to factory-configured value. */
506 wrmsr(MSR_PMG_IO_CAPTURE_BASE
, msr
);
508 msr
= rdmsr(MSR_MISC_PWR_MGMT
);
509 msr
.lo
&= ~(1 << 0); // Enable P-state HW_ALL coordination
510 wrmsr(MSR_MISC_PWR_MGMT
, msr
);
512 msr
= rdmsr(MSR_POWER_CTL
);
513 msr
.lo
|= (1 << 18); // Enable Energy Perf Bias MSR 0x1b0
514 msr
.lo
|= (1 << 1); // C1E Enable
515 msr
.lo
|= (1 << 0); // Bi-directional PROCHOT#
516 wrmsr(MSR_POWER_CTL
, msr
);
518 /* C-state Interrupt Response Latency Control 0 - package C3 latency */
520 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
| C_STATE_LATENCY_CONTROL_0_LIMIT
;
521 wrmsr(MSR_C_STATE_LATENCY_CONTROL_0
, msr
);
523 /* C-state Interrupt Response Latency Control 1 */
525 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
| C_STATE_LATENCY_CONTROL_1_LIMIT
;
526 wrmsr(MSR_C_STATE_LATENCY_CONTROL_1
, msr
);
528 /* C-state Interrupt Response Latency Control 2 - package C6/C7 short */
530 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
| C_STATE_LATENCY_CONTROL_2_LIMIT
;
531 wrmsr(MSR_C_STATE_LATENCY_CONTROL_2
, msr
);
533 /* Haswell ULT only supoprts the 3-5 latency response registers.*/
534 if (haswell_is_ult()) {
535 /* C-state Interrupt Response Latency Control 3 - package C8 */
537 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
|
538 C_STATE_LATENCY_CONTROL_3_LIMIT
;
539 wrmsr(MSR_C_STATE_LATENCY_CONTROL_3
, msr
);
541 /* C-state Interrupt Response Latency Control 4 - package C9 */
543 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
|
544 C_STATE_LATENCY_CONTROL_4_LIMIT
;
545 wrmsr(MSR_C_STATE_LATENCY_CONTROL_4
, msr
);
547 /* C-state Interrupt Response Latency Control 5 - package C10 */
549 msr
.lo
= IRTL_VALID
| IRTL_1024_NS
|
550 C_STATE_LATENCY_CONTROL_5_LIMIT
;
551 wrmsr(MSR_C_STATE_LATENCY_CONTROL_5
, msr
);
555 static void configure_thermal_target(void)
557 struct cpu_intel_haswell_config
*conf
;
561 /* Find pointer to CPU configuration */
562 lapic
= dev_find_lapic(SPEEDSTEP_APIC_MAGIC
);
563 if (!lapic
|| !lapic
->chip_info
)
565 conf
= lapic
->chip_info
;
567 /* Set TCC activation offset if supported */
568 msr
= rdmsr(MSR_PLATFORM_INFO
);
569 if ((msr
.lo
& (1 << 30)) && conf
->tcc_offset
) {
570 msr
= rdmsr(MSR_TEMPERATURE_TARGET
);
571 msr
.lo
&= ~(0xf << 24); /* Bits 27:24 */
572 msr
.lo
|= (conf
->tcc_offset
& 0xf) << 24;
573 wrmsr(MSR_TEMPERATURE_TARGET
, msr
);
577 static void configure_misc(void)
581 msr
= rdmsr(IA32_MISC_ENABLE
);
582 msr
.lo
|= (1 << 0); /* Fast String enable */
583 msr
.lo
|= (1 << 3); /* TM1/TM2/EMTTM enable */
584 msr
.lo
|= (1 << 16); /* Enhanced SpeedStep Enable */
585 wrmsr(IA32_MISC_ENABLE
, msr
);
587 /* Disable Thermal interrupts */
590 wrmsr(IA32_THERM_INTERRUPT
, msr
);
592 /* Enable package critical interrupt only */
595 wrmsr(IA32_PACKAGE_THERM_INTERRUPT
, msr
);
598 static void enable_lapic_tpr(void)
602 msr
= rdmsr(MSR_PIC_MSG_CONTROL
);
603 msr
.lo
&= ~(1 << 10); /* Enable APIC TPR updates */
604 wrmsr(MSR_PIC_MSG_CONTROL
, msr
);
607 static void configure_dca_cap(void)
609 struct cpuid_result cpuid_regs
;
612 /* Check feature flag in CPUID.(EAX=1):ECX[18]==1 */
613 cpuid_regs
= cpuid(1);
614 if (cpuid_regs
.ecx
& (1 << 18)) {
615 msr
= rdmsr(IA32_PLATFORM_DCA_CAP
);
617 wrmsr(IA32_PLATFORM_DCA_CAP
, msr
);
621 static void set_max_ratio(void)
627 /* Check for configurable TDP option */
628 if (cpu_config_tdp_levels()) {
629 /* Set to nominal TDP ratio */
630 msr
= rdmsr(MSR_CONFIG_TDP_NOMINAL
);
631 perf_ctl
.lo
= (msr
.lo
& 0xff) << 8;
633 /* Platform Info bits 15:8 give max ratio */
634 msr
= rdmsr(MSR_PLATFORM_INFO
);
635 perf_ctl
.lo
= msr
.lo
& 0xff00;
637 wrmsr(IA32_PERF_CTL
, perf_ctl
);
639 printk(BIOS_DEBUG
, "haswell: frequency set to %d\n",
640 ((perf_ctl
.lo
>> 8) & 0xff) * HASWELL_BCLK
);
643 static void set_energy_perf_bias(u8 policy
)
648 /* Determine if energy efficient policy is supported. */
649 ecx
= cpuid_ecx(0x6);
650 if (!(ecx
& (1 << 3)))
653 /* Energy Policy is bits 3:0 */
654 msr
= rdmsr(IA32_ENERGY_PERFORMANCE_BIAS
);
656 msr
.lo
|= policy
& 0xf;
657 wrmsr(IA32_ENERGY_PERFORMANCE_BIAS
, msr
);
659 printk(BIOS_DEBUG
, "haswell: energy policy set to %u\n",
663 static void configure_mca(void)
666 const unsigned int mcg_cap_msr
= 0x179;
670 msr
= rdmsr(mcg_cap_msr
);
671 num_banks
= msr
.lo
& 0xff;
673 /* TODO(adurbin): This should only be done on a cold boot. Also, some
674 * of these banks are core vs package scope. For now every CPU clears
676 for (i
= 0; i
< num_banks
; i
++)
677 wrmsr(IA32_MC0_STATUS
+ (i
* 4), msr
);
680 static void bsp_init_before_ap_bringup(struct bus
*cpu_bus
)
682 struct device_path cpu_path
;
683 struct cpu_info
*info
;
684 char processor_name
[49];
686 /* Print processor name */
687 fill_processor_name(processor_name
);
688 printk(BIOS_INFO
, "CPU: %s.\n", processor_name
);
690 /* Ensure the local apic is enabled */
693 /* Set the device path of the boot cpu. */
694 cpu_path
.type
= DEVICE_PATH_APIC
;
695 cpu_path
.apic
.apic_id
= lapicid();
697 /* Find the device structure for the boot cpu. */
699 info
->cpu
= alloc_find_dev(cpu_bus
, &cpu_path
);
701 if (info
->index
!= 0)
702 printk(BIOS_CRIT
, "BSP index(%d) != 0!\n", info
->index
);
704 /* Setup MTRRs based on physical address size. */
705 x86_setup_fixed_mtrrs();
706 x86_setup_var_mtrrs(cpuid_eax(0x80000008) & 0xff, 2);
709 initialize_vr_config();
711 if (haswell_is_ult()) {
712 calibrate_24mhz_bclk();
713 configure_pch_power_sharing();
716 /* Call through the cpu driver's initialization. */
720 /* All CPUs including BSP will run the following function. */
721 static void haswell_init(device_t cpu
)
723 /* Clear out pending MCEs */
726 /* Enable the local cpu apics */
730 /* Configure C States */
731 configure_c_states();
733 /* Configure Enhanced SpeedStep and Thermal Sensors */
736 /* Thermal throttle activation offset */
737 configure_thermal_target();
739 /* Enable Direct Cache Access */
742 /* Set energy policy */
743 set_energy_perf_bias(ENERGY_POLICY_NORMAL
);
752 void bsp_init_and_start_aps(struct bus
*cpu_bus
)
756 const void *microcode_patch
;
759 /* Perform any necessary BSP initialization before APs are brought up.
760 * This call also allows the BSP to prepare for any secondary effects
761 * from calling cpu_initialize() such as smm_init(). */
762 bsp_init_before_ap_bringup(cpu_bus
);
764 microcode_patch
= intel_microcode_find();
766 /* Save default SMM area before relocation occurs. */
767 smm_save_area
= backup_default_smm_area();
769 /* This needs to be called after the mtrr setup so the BSP mtrrs
770 * can be mirrored by the APs. */
771 if (setup_ap_init(cpu_bus
, &max_cpus
, microcode_patch
)) {
772 printk(BIOS_CRIT
, "AP setup initialization failed. "
773 "No APs will be brought up.\n");
777 num_aps
= max_cpus
- 1;
778 if (start_aps(cpu_bus
, num_aps
)) {
779 printk(BIOS_CRIT
, "AP startup failed. Trying to continue.\n");
782 if (smm_initialize()) {
783 printk(BIOS_CRIT
, "SMM Initialization failed...\n");
787 /* After SMM relocation a 2nd microcode load is required. */
788 intel_microcode_load_unlocked(microcode_patch
);
790 /* Restore the default SMM region. */
791 restore_default_smm_area(smm_save_area
);
794 static struct device_operations cpu_dev_ops
= {
795 .init
= haswell_init
,
798 static struct cpu_device_id cpu_table
[] = {
799 { X86_VENDOR_INTEL
, 0x306c1 }, /* Intel Haswell 4+2 A0 */
800 { X86_VENDOR_INTEL
, 0x306c2 }, /* Intel Haswell 4+2 B0 */
801 { X86_VENDOR_INTEL
, 0x40650 }, /* Intel Haswell ULT B0 */
802 { X86_VENDOR_INTEL
, 0x40651 }, /* Intel Haswell ULT B1 */
806 static const struct cpu_driver driver __cpu_driver
= {
808 .id_table
= cpu_table
,
809 .cstates
= cstate_map
,