1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <amdblocks/cpu.h>
5 #include <cpu/x86/mp.h>
6 #include <cpu/x86/mtrr.h>
7 #include <cpu/x86/msr.h>
8 #include <cpu/x86/smm.h>
9 #include <cpu/amd/msr.h>
10 #include <cpu/amd/amd64_save_state.h>
11 #include <cpu/x86/lapic.h>
12 #include <device/device.h>
13 #include <device/pci_ops.h>
14 #include <soc/pci_devs.h>
16 #include <soc/reset.h>
18 #include <soc/iomap.h>
19 #include <console/console.h>
20 #include <cpu/amd/microcode.h>
23 * MP and SMM loading initialization.
25 struct smm_relocation_params
{
30 static struct smm_relocation_params smm_reloc_params
;
33 * Do essential initialization tasks before APs can be fired up -
35 * 1. Prevent race condition in MTRR solution. Enable MTRRs on the BSP. This
36 * creates the MTRR solution that the APs will use. Otherwise APs will try to
37 * apply the incomplete solution as the BSP is calculating it.
39 static void pre_mp_init(void)
41 x86_setup_mtrrs_with_detect_no_above_4gb();
45 static void fill_in_relocation_params(struct smm_relocation_params
*params
)
50 smm_region(&tseg_base
, &tseg_size
);
52 params
->tseg_base
.lo
= ALIGN_DOWN(tseg_base
, 128 * KiB
);
53 params
->tseg_base
.hi
= 0;
54 params
->tseg_mask
.lo
= ALIGN_DOWN(~(tseg_size
- 1), 128 * KiB
);
55 params
->tseg_mask
.hi
= ((1 << (cpu_phys_address_size() - 32)) - 1);
57 params
->tseg_mask
.lo
|= SMM_TSEG_WB
;
60 static void get_smm_info(uintptr_t *perm_smbase
, size_t *perm_smsize
,
61 size_t *smm_save_state_size
)
63 printk(BIOS_DEBUG
, "Setting up SMI for CPU\n");
65 fill_in_relocation_params(&smm_reloc_params
);
67 smm_subregion(SMM_SUBREGION_HANDLER
, perm_smbase
, perm_smsize
);
68 *smm_save_state_size
= sizeof(amd64_smm_state_save_area_t
);
71 static void relocation_handler(int cpu
, uintptr_t curr_smbase
,
72 uintptr_t staggered_smbase
)
74 struct smm_relocation_params
*relo_params
= &smm_reloc_params
;
75 amd64_smm_state_save_area_t
*smm_state
;
77 wrmsr(SMM_ADDR_MSR
, relo_params
->tseg_base
);
78 wrmsr(SMM_MASK_MSR
, relo_params
->tseg_mask
);
80 smm_state
= (void *)(SMM_AMD64_SAVE_STATE_OFFSET
+ curr_smbase
);
81 smm_state
->smbase
= staggered_smbase
;
84 static void post_mp_init(void)
87 apm_control(APM_CNT_SMMINFO
);
90 static const struct mp_ops mp_ops
= {
91 .pre_mp_init
= pre_mp_init
,
92 .get_cpu_count
= get_cpu_count
,
93 .get_smm_info
= get_smm_info
,
94 .relocation_handler
= relocation_handler
,
95 .post_mp_init
= post_mp_init
,
98 void mp_init_cpus(struct bus
*cpu_bus
)
100 /* Clear for take-off */
101 if (mp_init_with_smm(cpu_bus
, &mp_ops
) < 0)
102 printk(BIOS_ERR
, "MP initialization failure.\n");
104 /* pre_mp_init made the flash not cacheable. Reset to WP for performance. */
105 mtrr_use_temp_range(FLASH_BASE_ADDR
, CONFIG_ROM_SIZE
, MTRR_TYPE_WRPROT
);
107 set_warm_reset_flag();
110 static void model_17_init(struct device
*dev
)
114 set_cstate_io_addr();
116 amd_update_microcode_from_cbfs();
119 static struct device_operations cpu_dev_ops
= {
120 .init
= model_17_init
,
123 static struct cpu_device_id cpu_table
[] = {
124 { X86_VENDOR_AMD
, RAVEN1_B0_CPUID
},
125 { X86_VENDOR_AMD
, PICASSO_B0_CPUID
},
126 { X86_VENDOR_AMD
, PICASSO_B1_CPUID
},
127 { X86_VENDOR_AMD
, RAVEN2_A0_CPUID
},
128 { X86_VENDOR_AMD
, RAVEN2_A1_CPUID
},
132 static const struct cpu_driver model_17 __cpu_driver
= {
134 .id_table
= cpu_table
,