2 * This file is part of the coreboot project.
4 * Copyright (C) 2013 Google Inc.
5 * Copyright (C) 2015 Intel Corp.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
17 #include <console/console.h>
19 #include <cpu/intel/microcode.h>
20 #include <cpu/intel/turbo.h>
21 #include <cpu/x86/cache.h>
22 #include <cpu/x86/lapic.h>
23 #include <cpu/x86/mp.h>
24 #include <cpu/x86/msr.h>
25 #include <cpu/x86/mtrr.h>
26 #include <cpu/x86/smm.h>
27 #include <fsp/memmap.h>
28 #include <reg_script.h>
31 #include <soc/pattrs.h>
32 #include <soc/ramstage.h>
36 static void smm_relocate(void *unused
);
37 static void enable_smis(void *unused
);
38 static void pre_smm_relocation(void *unused
);
40 static struct mp_flight_record mp_steps
[] = {
41 MP_FR_BLOCK_APS(pre_smm_relocation
, NULL
, pre_smm_relocation
, NULL
),
42 MP_FR_BLOCK_APS(smm_relocate
, NULL
, smm_relocate
, NULL
),
43 MP_FR_BLOCK_APS(mp_initialize_cpu
, NULL
, mp_initialize_cpu
, NULL
),
44 /* Wait for APs to finish initialization before proceeding. */
45 MP_FR_BLOCK_APS(NULL
, NULL
, enable_smis
, NULL
),
48 /* The APIC id space is sparse. Each id is separated by 2. */
49 static int adjust_apic_id(int index
, int apic_id
)
54 /* Package level MSRs */
55 const struct reg_script package_msr_script
[] = {
56 /* Set Package TDP to ~7W */
57 REG_MSR_WRITE(MSR_PKG_POWER_LIMIT
, 0x3880fa),
58 REG_MSR_RMW(MSR_PP1_POWER_LIMIT
, ~(0x7f << 17), 0),
59 REG_MSR_WRITE(MSR_PKG_TURBO_CFG1
, 0x702),
60 REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG1
, 0x200b),
61 REG_MSR_WRITE(MSR_CPU_TURBO_WKLD_CFG2
, 0),
62 REG_MSR_WRITE(MSR_CPU_THERM_CFG1
, 0x00000305),
63 REG_MSR_WRITE(MSR_CPU_THERM_CFG2
, 0x0405500d),
64 REG_MSR_WRITE(MSR_CPU_THERM_SENS_CFG
, 0x27),
69 const struct reg_script core_msr_script
[] = {
70 /* Dynamic L2 shrink enable and threshold, clear SINGLE_PCTL bit 11 */
71 REG_MSR_RMW(MSR_PMG_CST_CONFIG_CONTROL
, ~0x3f080f, 0xe0008),
72 REG_MSR_RMW(MSR_POWER_MISC
,
73 ~(ENABLE_ULFM_AUTOCM_MASK
| ENABLE_INDP_AUTOCM_MASK
), 0),
75 REG_MSR_RMW(MSR_POWER_CTL
, ~0x2, 0),
76 REG_MSR_OR(MSR_POWER_MISC
, 0x44),
80 void soc_init_cpus(device_t dev
)
82 struct bus
*cpu_bus
= dev
->link_list
;
83 const struct pattrs
*pattrs
= pattrs_get();
84 struct mp_params mp_params
;
85 void *default_smm_area
;
88 printk(BIOS_SPEW
, "%s/%s ( %s )\n",
89 __FILE__
, __func__
, dev_name(dev
));
91 /* Set up MTRRs based on physical address size. */
92 x86_setup_fixed_mtrrs();
93 x86_setup_var_mtrrs(pattrs
->address_bits
, 2);
96 mp_params
.num_cpus
= pattrs
->num_cpus
,
97 mp_params
.parallel_microcode_load
= 1,
98 mp_params
.adjust_apic_id
= adjust_apic_id
;
99 mp_params
.flight_plan
= &mp_steps
[0];
100 mp_params
.num_records
= ARRAY_SIZE(mp_steps
);
101 mp_params
.microcode_pointer
= pattrs
->microcode_patch
;
103 default_smm_area
= backup_default_smm_area();
106 * Configure the BUNIT to allow dirty cache line evictions in non-SMM
107 * mode for the lines that were dirtied while in SMM mode. Otherwise
108 * the writes would be silently dropped.
110 bsmrwac
= iosf_bunit_read(BUNIT_SMRWAC
) | SAI_IA_UNTRUSTED
;
111 iosf_bunit_write(BUNIT_SMRWAC
, bsmrwac
);
113 /* Set package MSRs */
114 reg_script_run(package_msr_script
);
116 /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */
119 if (mp_init(cpu_bus
, &mp_params
))
120 printk(BIOS_ERR
, "MP initialization failure.\n");
122 restore_default_smm_area(default_smm_area
);
125 static void soc_core_init(device_t cpu
)
127 printk(BIOS_SPEW
, "%s/%s ( %s )\n",
128 __FILE__
, __func__
, dev_name(cpu
));
129 printk(BIOS_DEBUG
, "Init Braswell core.\n");
132 * The turbo disable bit is actually scoped at building
133 * block level -- not package. For non-bsp cores that are within a
134 * building block enable turbo. The cores within the BSP's building
135 * block will just see it already enabled and move on.
141 reg_script_run(core_msr_script
);
143 /* Set this core to max frequency ratio */
147 static struct device_operations cpu_dev_ops
= {
148 .init
= soc_core_init
,
151 static struct cpu_device_id cpu_table
[] = {
152 { X86_VENDOR_INTEL
, 0x406C3 },
153 { X86_VENDOR_INTEL
, 0x406C2 },
157 static const struct cpu_driver driver __cpu_driver
= {
159 .id_table
= cpu_table
,
164 * SMM loading and initialization.
167 struct smm_relocation_attrs
{
173 static struct smm_relocation_attrs relo_attrs
;
175 static void adjust_apic_id_map(struct smm_loader_params
*smm_params
)
178 struct smm_runtime
*runtime
= smm_params
->runtime
;
180 for (i
= 0; i
< CONFIG_MAX_CPUS
; i
++)
181 runtime
->apic_id_to_cpu
[i
] = mp_get_apic_id(i
);
184 static void asmlinkage
cpu_smm_do_relocation(void *arg
)
187 em64t100_smm_state_save_area_t
*smm_state
;
188 const struct smm_module_params
*p
;
189 const struct smm_runtime
*runtime
;
193 runtime
= p
->runtime
;
196 if (cpu
>= CONFIG_MAX_CPUS
) {
198 "Invalid CPU number assigned in SMM stub: %d\n", cpu
);
203 smrr
.lo
= relo_attrs
.smrr_base
;
205 wrmsr(SMRR_PHYS_BASE
, smrr
);
206 smrr
.lo
= relo_attrs
.smrr_mask
;
208 wrmsr(SMRR_PHYS_MASK
, smrr
);
211 * The relocated handler runs with all CPUs concurrently. Therefore
212 * stagger the entry points adjusting SMBASE downwards by save state
215 smm_state
= (void *)(SMM_EM64T100_SAVE_STATE_OFFSET
+ runtime
->smbase
);
216 smm_state
->smbase
= relo_attrs
.smbase
- cpu
* runtime
->save_state_size
;
217 printk(BIOS_DEBUG
, "New SMBASE 0x%08x\n", smm_state
->smbase
);
220 static int install_relocation_handler(int num_cpus
)
222 const int save_state_size
= sizeof(em64t100_smm_state_save_area_t
);
224 struct smm_loader_params smm_params
= {
225 .per_cpu_stack_size
= save_state_size
,
226 .num_concurrent_stacks
= num_cpus
,
227 .per_cpu_save_state_size
= save_state_size
,
228 .num_concurrent_save_states
= 1,
229 .handler
= (smm_handler_t
)&cpu_smm_do_relocation
,
232 if (smm_setup_relocation_handler(&smm_params
))
235 adjust_apic_id_map(&smm_params
);
240 static int install_permanent_handler(int num_cpus
)
243 * There are num_cpus concurrent stacks and num_cpus concurrent save
244 * state areas. Lastly, set the stack size to the save state size.
246 int save_state_size
= sizeof(em64t100_smm_state_save_area_t
);
247 struct smm_loader_params smm_params
= {
248 .per_cpu_stack_size
= save_state_size
,
249 .num_concurrent_stacks
= num_cpus
,
250 .per_cpu_save_state_size
= save_state_size
,
251 .num_concurrent_save_states
= num_cpus
,
257 printk(BIOS_DEBUG
, "Installing SMM handler to 0x%08x\n",
260 smm_region(&smm_base
, &smm_size
);
261 tseg_size
= smm_size
- CONFIG_SMM_RESERVED_SIZE
;
262 if (smm_load_module((void *)relo_attrs
.smbase
, tseg_size
, &smm_params
))
265 adjust_apic_id_map(&smm_params
);
270 static int smm_load_handlers(void)
272 /* All range registers are aligned to 4KiB */
273 const uint32_t rmask
= ~((1 << 12) - 1);
274 const struct pattrs
*pattrs
= pattrs_get();
278 /* Initialize global tracking state. */
279 smm_region(&smm_base
, &smm_size
);
280 relo_attrs
.smbase
= (uint32_t)smm_base
;
281 relo_attrs
.smrr_base
= relo_attrs
.smbase
| MTRR_TYPE_WRBACK
;
282 relo_attrs
.smrr_mask
= ~(smm_size
- 1) & rmask
;
283 relo_attrs
.smrr_mask
|= MTRR_PHYS_MASK_VALID
;
285 /* Install handlers. */
286 if (install_relocation_handler(pattrs
->num_cpus
) < 0) {
287 printk(BIOS_ERR
, "Unable to install SMM relocation handler.\n");
291 if (install_permanent_handler(pattrs
->num_cpus
) < 0) {
292 printk(BIOS_ERR
, "Unable to install SMM permanent handler.\n");
296 /* Ensure the SMM handlers hit DRAM before performing first SMI. */
302 static void pre_smm_relocation(void *unused
)
304 const struct pattrs
*pattrs
= pattrs_get();
307 /* Need to make sure that all cores have microcode loaded. */
308 msr_value
= rdmsr(MSR_IA32_BIOS_SIGN_ID
);
309 if (msr_value
.hi
== 0)
310 intel_microcode_load_unlocked(pattrs
->microcode_patch
);
313 static void smm_relocate(void *unused
)
315 const struct pattrs
*pattrs
= pattrs_get();
317 /* Load relocation and permanent handler. */
319 if (smm_load_handlers() < 0) {
320 printk(BIOS_ERR
, "Error loading SMM handlers.\n");
323 southcluster_smm_clear_state();
326 /* Relocate SMM space. */
327 smm_initiate_relocation();
329 /* Load microcode after SMM relocation. */
330 intel_microcode_load_unlocked(pattrs
->microcode_patch
);
333 static void enable_smis(void *unused
)
335 southcluster_smm_enable_smi();