1 /* SPDX-License-Identifier: GPL-2.0-only */
5 #include <device/device.h>
6 #include <device/pci.h>
7 #include <device/pci_ops.h>
8 #include <cpu/x86/mp.h>
9 #include <cpu/x86/msr.h>
10 #include <cpu/x86/mtrr.h>
11 #include <cpu/x86/smm.h>
12 #include <cpu/intel/em64t101_save_state.h>
13 #include <cpu/intel/smm_reloc.h>
14 #include <console/console.h>
18 #include <soc/pci_devs.h>
19 #include <soc/systemagent.h>
21 static void update_save_state(int cpu
, uintptr_t curr_smbase
,
22 uintptr_t staggered_smbase
,
23 struct smm_relocation_params
*relo_params
)
28 /* The relocated handler runs with all CPUs concurrently. Therefore
29 * stagger the entry points adjusting SMBASE downwards by save state
31 smbase
= staggered_smbase
;
32 iedbase
= relo_params
->ied_base
;
34 printk(BIOS_DEBUG
, "New SMBASE=0x%08x IEDBASE=0x%08x\n",
37 /* All threads need to set IEDBASE and SMBASE to the relocated
38 * handler region. However, the save state location depends on the
39 * smm_save_state_in_msrs field in the relocation parameters. If
40 * smm_save_state_in_msrs is non-zero then the CPUs are relocating
41 * the SMM handler in parallel, and each CPUs save state area is
42 * located in their respective MSR space. If smm_save_state_in_msrs
43 * is zero then the SMM relocation is happening serially so the
44 * save state is at the same default location for all CPUs. */
45 if (relo_params
->smm_save_state_in_msrs
) {
49 smbase_msr
.lo
= smbase
;
52 /* According the BWG the IEDBASE MSR is in bits 63:32. It's
53 * not clear why it differs from the SMBASE MSR. */
55 iedbase_msr
.hi
= iedbase
;
57 wrmsr(SMBASE_MSR
, smbase_msr
);
58 wrmsr(IEDBASE_MSR
, iedbase_msr
);
60 em64t101_smm_state_save_area_t
*save_state
;
62 save_state
= (void *)(curr_smbase
+ SMM_DEFAULT_SIZE
-
65 save_state
->smbase
= smbase
;
66 save_state
->iedbase
= iedbase
;
70 /* Returns 1 if SMM MSR save state was set. */
71 static int bsp_setup_msr_save_state(struct smm_relocation_params
*relo_params
)
75 smm_mca_cap
= rdmsr(SMM_MCA_CAP_MSR
);
76 if (smm_mca_cap
.hi
& SMM_CPU_SVRSTR_MASK
) {
77 msr_t smm_feature_control
;
79 smm_feature_control
= rdmsr(SMM_FEATURE_CONTROL_MSR
);
80 smm_feature_control
.hi
= 0;
81 smm_feature_control
.lo
|= SMM_CPU_SAVE_EN
;
82 wrmsr(SMM_FEATURE_CONTROL_MSR
, smm_feature_control
);
83 relo_params
->smm_save_state_in_msrs
= 1;
85 return relo_params
->smm_save_state_in_msrs
;
88 /* The relocation work is actually performed in SMM context, but the code
89 * resides in the ramstage module. This occurs by trampolining from the default
90 * SMRAM entry point to here. */
91 void smm_relocation_handler(int cpu
, uintptr_t curr_smbase
,
92 uintptr_t staggered_smbase
)
95 struct smm_relocation_params
*relo_params
= &smm_reloc_params
;
97 printk(BIOS_DEBUG
, "In relocation handler: CPU %d\n", cpu
);
99 /* Determine if the processor supports saving state in MSRs. If so,
100 * enable it before the non-BSPs run so that SMM relocation can occur
101 * in parallel in the non-BSP CPUs. */
103 /* If smm_save_state_in_msrs is 1 then that means this is the
104 * 2nd time through the relocation handler for the BSP.
105 * Parallel SMM handler relocation is taking place. However,
106 * it is desired to access other CPUs save state in the real
107 * SMM handler. Therefore, disable the SMM save state in MSRs
109 if (relo_params
->smm_save_state_in_msrs
) {
110 msr_t smm_feature_control
;
112 smm_feature_control
= rdmsr(SMM_FEATURE_CONTROL_MSR
);
113 smm_feature_control
.lo
&= ~SMM_CPU_SAVE_EN
;
114 wrmsr(SMM_FEATURE_CONTROL_MSR
, smm_feature_control
);
115 } else if (bsp_setup_msr_save_state(relo_params
))
116 /* Just return from relocation handler if MSR save
117 * state is enabled. In that case the BSP will come
118 * back into the relocation handler to setup the new
119 * SMBASE as well disabling SMM save state in MSRs. */
123 /* Make appropriate changes to the save state map. */
124 update_save_state(cpu
, curr_smbase
, staggered_smbase
, relo_params
);
126 /* Write PRMRR and SMRR MSRs based on indicated support. */
127 mtrr_cap
= rdmsr(MTRR_CAP_MSR
);
128 if (mtrr_cap
.lo
& SMRR_SUPPORTED
)
129 write_smrr(relo_params
);
131 if (mtrr_cap
.lo
& PRMRR_SUPPORTED
) {
132 write_prmrr(relo_params
);
133 /* UNCORE_PRMRR msrs are package level. Therefore, only
134 * configure these MSRs on the BSP. */
136 write_uncore_prmrr(relo_params
);
140 static void fill_in_relocation_params(struct smm_relocation_params
*params
)
147 /* All range registers are aligned to 4KiB */
148 const u32 rmask
= ~((1 << 12) - 1);
150 /* Some of the range registers are dependent on the number of physical
151 * address bits supported. */
152 phys_bits
= cpuid_eax(0x80000008) & 0xff;
154 /* The range bounded by the TSEGMB and BGSM registers encompasses the
155 * SMRAM range as well as the IED range. However, the SMRAM available
156 * to the handler is 4MiB since the IEDRAM lives TSEGMB + 4MiB.
158 smm_region(&tseg_base
, &tseg_size
);
160 /* SMRR has 32-bits of valid address aligned to 4KiB. */
161 params
->smrr_base
.lo
= (tseg_base
& rmask
) | MTRR_TYPE_WRBACK
;
162 params
->smrr_base
.hi
= 0;
163 params
->smrr_mask
.lo
= (~(tseg_size
- 1) & rmask
) | MTRR_PHYS_MASK_VALID
;
164 params
->smrr_mask
.hi
= 0;
166 smm_subregion(SMM_SUBREGION_CHIPSET
, ¶ms
->ied_base
, ¶ms
->ied_size
);
168 /* The PRMRR and UNCORE_PRMRR are at IEDBASE + 2MiB */
169 prmrr_base
= (params
->ied_base
+ (2 << 20)) & rmask
;
170 prmrr_size
= params
->ied_size
- (2 << 20);
172 /* PRMRR has 46 bits of valid address aligned to 4KiB. It's dependent
173 * on the number of physical address bits supported. */
174 params
->prmrr_base
.lo
= prmrr_base
| MTRR_TYPE_WRBACK
;
175 params
->prmrr_base
.hi
= 0;
176 params
->prmrr_mask
.lo
= (~(prmrr_size
- 1) & rmask
)
177 | MTRR_PHYS_MASK_VALID
;
178 params
->prmrr_mask
.hi
= (1 << (phys_bits
- 32)) - 1;
180 /* UNCORE_PRMRR has 39 bits of valid address aligned to 4KiB. */
181 params
->uncore_prmrr_base
.lo
= prmrr_base
;
182 params
->uncore_prmrr_base
.hi
= 0;
183 params
->uncore_prmrr_mask
.lo
= (~(prmrr_size
- 1) & rmask
) |
184 MTRR_PHYS_MASK_VALID
;
185 params
->uncore_prmrr_mask
.hi
= (1 << (39 - 32)) - 1;
188 static void setup_ied_area(struct smm_relocation_params
*params
)
192 struct ied_header ied
= {
193 .signature
= "INTEL RSVD",
194 .size
= params
->ied_size
,
198 ied_base
= (void *)params
->ied_base
;
200 /* Place IED header at IEDBASE. */
201 memcpy(ied_base
, &ied
, sizeof(ied
));
203 /* Zero out 32KiB at IEDBASE + 1MiB */
204 memset(ied_base
+ (1 << 20), 0, (32 << 10));
207 void smm_info(uintptr_t *perm_smbase
, size_t *perm_smsize
,
208 size_t *smm_save_state_size
)
210 printk(BIOS_DEBUG
, "Setting up SMI for CPU\n");
212 fill_in_relocation_params(&smm_reloc_params
);
214 smm_subregion(SMM_SUBREGION_HANDLER
, perm_smbase
, perm_smsize
);
216 setup_ied_area(&smm_reloc_params
);
218 *smm_save_state_size
= sizeof(em64t101_smm_state_save_area_t
);
221 void smm_initialize(void)
223 /* Clear the SMM state in the southbridge. */
224 smm_southbridge_clear_state();
227 * Run the relocation handler for on the BSP to check and set up
228 * parallel SMM relocation.
230 smm_initiate_relocation();
232 if (smm_reloc_params
.smm_save_state_in_msrs
)
233 printk(BIOS_DEBUG
, "Doing parallel SMM relocation.\n");
236 /* The default SMM entry can happen in parallel or serially. If the
237 * default SMM entry is done in parallel the BSP has already setup
238 * the saving state to each CPU's MSRs. At least one save state size
239 * is required for the initial SMM entry for the BSP to determine if
240 * parallel SMM relocation is even feasible. */
241 void smm_relocate(void)
244 * If smm_save_state_in_msrs is non-zero then parallel SMM relocation
245 * shall take place. Run the relocation handler a second time on the
246 * BSP to do * the final move. For APs, a relocation handler always
249 if (smm_reloc_params
.smm_save_state_in_msrs
)
250 smm_initiate_relocation_parallel();
251 else if (!boot_cpu())
252 smm_initiate_relocation();
257 struct device
*sa_dev
= pcidev_path_on_root(SA_DEVFN_ROOT
);
258 /* LOCK the SMM memory window and enable normal SMM.
259 * After running this function, only a full reset can
260 * make the SMM registers writable again.
262 printk(BIOS_DEBUG
, "Locking SMM.\n");
263 pci_write_config8(sa_dev
, SMRAM
, D_LCK
| G_SMRAME
| C_BASE_SEG
);