soc/intel/alderlake: Add provision to override Rcomp settings
[coreboot.git] / src / soc / intel / alderlake / meminit.c
blob48d338600d28653e6ebfd691d8672343a05d194e
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #include <console/console.h>
4 #include <fsp/util.h>
5 #include <soc/meminit.h>
6 #include <string.h>
8 #define LPX_PHYSICAL_CH_WIDTH 16
9 #define LPX_CHANNELS CHANNEL_COUNT(LPX_PHYSICAL_CH_WIDTH)
11 #define DDR4_PHYSICAL_CH_WIDTH 64
12 #define DDR4_CHANNELS CHANNEL_COUNT(DDR4_PHYSICAL_CH_WIDTH)
14 #define DDR5_PHYSICAL_CH_WIDTH 32
15 #define DDR5_CHANNELS CHANNEL_COUNT(DDR5_PHYSICAL_CH_WIDTH)
17 static void set_rcomp_config(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg)
19 if (mb_cfg->rcomp.resistor != 0)
20 mem_cfg->RcompResistor = mb_cfg->rcomp.resistor;
22 for (size_t i = 0; i < ARRAY_SIZE(mem_cfg->RcompTarget); i++) {
23 if (mb_cfg->rcomp.targets[i] != 0)
24 mem_cfg->RcompTarget[i] = mb_cfg->rcomp.targets[i];
28 static void meminit_lp4x(FSP_M_CONFIG *mem_cfg)
30 mem_cfg->DqPinsInterleaved = 0;
33 static void meminit_lp5x(FSP_M_CONFIG *mem_cfg, const struct mem_lp5x_config *lp5x_config)
35 mem_cfg->DqPinsInterleaved = 0;
36 mem_cfg->Lp5CccConfig = lp5x_config->ccc_config;
39 static void meminit_ddr(FSP_M_CONFIG *mem_cfg, const struct mem_ddr_config *ddr_config)
41 mem_cfg->DqPinsInterleaved = ddr_config->dq_pins_interleaved;
44 static const struct soc_mem_cfg soc_mem_cfg[] = {
45 [MEM_TYPE_DDR4] = {
46 .num_phys_channels = DDR4_CHANNELS,
47 .phys_to_mrc_map = {
48 [0] = 0,
49 [1] = 4,
51 .md_phy_masks = {
53 * Only physical channel 0 is populated in case of half-populated
54 * configuration.
56 .half_channel = BIT(0),
57 /* In mixed topologies, channel 1 is always memory-down. */
58 .mixed_topo = BIT(1),
61 [MEM_TYPE_DDR5] = {
62 .num_phys_channels = DDR5_CHANNELS,
63 .phys_to_mrc_map = {
64 [0] = 0,
65 [1] = 1,
66 [2] = 4,
67 [3] = 5,
69 .md_phy_masks = {
71 * Physical channels 0 and 1 are populated in case of
72 * half-populated configurations.
74 .half_channel = BIT(0) | BIT(1),
75 /* In mixed topologies, channels 2 and 3 are always memory-down. */
76 .mixed_topo = BIT(2) | BIT(3),
79 [MEM_TYPE_LP4X] = {
80 .num_phys_channels = LPX_CHANNELS,
81 .phys_to_mrc_map = {
82 [0] = 0,
83 [1] = 1,
84 [2] = 2,
85 [3] = 3,
86 [4] = 4,
87 [5] = 5,
88 [6] = 6,
89 [7] = 7,
91 .md_phy_masks = {
93 * Physical channels 0, 1, 2 and 3 are populated in case of
94 * half-populated configurations.
96 .half_channel = BIT(0) | BIT(1) | BIT(2) | BIT(3),
97 /* LP4x does not support mixed topologies. */
100 [MEM_TYPE_LP5X] = {
101 .num_phys_channels = LPX_CHANNELS,
102 .phys_to_mrc_map = {
103 [0] = 0,
104 [1] = 1,
105 [2] = 2,
106 [3] = 3,
107 [4] = 4,
108 [5] = 5,
109 [6] = 6,
110 [7] = 7,
112 .md_phy_masks = {
114 * Physical channels 0, 1, 2 and 3 are populated in case of
115 * half-populated configurations.
117 .half_channel = BIT(0) | BIT(1) | BIT(2) | BIT(3),
118 /* LP5x does not support mixed topologies. */
123 static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data)
125 uint32_t *spd_upds[MRC_CHANNELS][CONFIG_DIMMS_PER_CHANNEL] = {
126 [0] = { &mem_cfg->MemorySpdPtr00, &mem_cfg->MemorySpdPtr01, },
127 [1] = { &mem_cfg->MemorySpdPtr02, &mem_cfg->MemorySpdPtr03, },
128 [2] = { &mem_cfg->MemorySpdPtr04, &mem_cfg->MemorySpdPtr05, },
129 [3] = { &mem_cfg->MemorySpdPtr06, &mem_cfg->MemorySpdPtr07, },
130 [4] = { &mem_cfg->MemorySpdPtr08, &mem_cfg->MemorySpdPtr09, },
131 [5] = { &mem_cfg->MemorySpdPtr10, &mem_cfg->MemorySpdPtr11, },
132 [6] = { &mem_cfg->MemorySpdPtr12, &mem_cfg->MemorySpdPtr13, },
133 [7] = { &mem_cfg->MemorySpdPtr14, &mem_cfg->MemorySpdPtr15, },
135 uint8_t *disable_dimm_upds[MRC_CHANNELS] = {
136 &mem_cfg->DisableDimmMc0Ch0,
137 &mem_cfg->DisableDimmMc0Ch1,
138 &mem_cfg->DisableDimmMc0Ch2,
139 &mem_cfg->DisableDimmMc0Ch3,
140 &mem_cfg->DisableDimmMc1Ch0,
141 &mem_cfg->DisableDimmMc1Ch1,
142 &mem_cfg->DisableDimmMc1Ch2,
143 &mem_cfg->DisableDimmMc1Ch3,
145 size_t ch, dimm;
147 mem_cfg->MemorySpdDataLen = data->spd_len;
149 for (ch = 0; ch < MRC_CHANNELS; ch++) {
150 uint8_t *disable_dimm_ptr = disable_dimm_upds[ch];
151 *disable_dimm_ptr = 0;
153 for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
154 uint32_t *spd_ptr = spd_upds[ch][dimm];
156 *spd_ptr = data->spd[ch][dimm];
157 if (!*spd_ptr)
158 *disable_dimm_ptr |= BIT(dimm);
163 static void mem_init_dq_dqs_upds(void *upds[MRC_CHANNELS], const void *map, size_t upd_size,
164 const struct mem_channel_data *data, bool auto_detect)
166 size_t i;
168 for (i = 0; i < MRC_CHANNELS; i++, map += upd_size) {
169 if (auto_detect ||
170 !channel_is_populated(i, MRC_CHANNELS, data->ch_population_flags))
171 memset(upds[i], 0, upd_size);
172 else
173 memcpy(upds[i], map, upd_size);
177 static void mem_init_dq_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
178 const struct mb_cfg *mb_cfg, bool auto_detect)
180 void *dq_upds[MRC_CHANNELS] = {
181 &mem_cfg->DqMapCpu2DramCh0,
182 &mem_cfg->DqMapCpu2DramCh1,
183 &mem_cfg->DqMapCpu2DramCh2,
184 &mem_cfg->DqMapCpu2DramCh3,
185 &mem_cfg->DqMapCpu2DramCh4,
186 &mem_cfg->DqMapCpu2DramCh5,
187 &mem_cfg->DqMapCpu2DramCh6,
188 &mem_cfg->DqMapCpu2DramCh7,
191 const size_t upd_size = sizeof(mem_cfg->DqMapCpu2DramCh0);
193 _Static_assert(upd_size == CONFIG_MRC_CHANNEL_WIDTH, "Incorrect DQ UPD size!");
195 mem_init_dq_dqs_upds(dq_upds, mb_cfg->dq_map, upd_size, data, auto_detect);
198 static void mem_init_dqs_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
199 const struct mb_cfg *mb_cfg, bool auto_detect)
201 void *dqs_upds[MRC_CHANNELS] = {
202 &mem_cfg->DqsMapCpu2DramCh0,
203 &mem_cfg->DqsMapCpu2DramCh1,
204 &mem_cfg->DqsMapCpu2DramCh2,
205 &mem_cfg->DqsMapCpu2DramCh3,
206 &mem_cfg->DqsMapCpu2DramCh4,
207 &mem_cfg->DqsMapCpu2DramCh5,
208 &mem_cfg->DqsMapCpu2DramCh6,
209 &mem_cfg->DqsMapCpu2DramCh7,
212 const size_t upd_size = sizeof(mem_cfg->DqsMapCpu2DramCh0);
214 _Static_assert(upd_size == CONFIG_MRC_CHANNEL_WIDTH / 8, "Incorrect DQS UPD size!");
216 mem_init_dq_dqs_upds(dqs_upds, mb_cfg->dqs_map, upd_size, data, auto_detect);
219 void memcfg_init(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg,
220 const struct mem_spd *spd_info, bool half_populated)
222 struct mem_channel_data data;
223 bool dq_dqs_auto_detect = false;
225 mem_cfg->ECT = mb_cfg->ect;
226 mem_cfg->UserBd = mb_cfg->UserBd;
227 set_rcomp_config(mem_cfg, mb_cfg);
229 switch (mb_cfg->type) {
230 case MEM_TYPE_DDR4:
231 case MEM_TYPE_DDR5:
232 meminit_ddr(mem_cfg, &mb_cfg->ddr_config);
233 dq_dqs_auto_detect = true;
234 break;
235 case MEM_TYPE_LP4X:
236 meminit_lp4x(mem_cfg);
237 break;
238 case MEM_TYPE_LP5X:
239 meminit_lp5x(mem_cfg, &mb_cfg->lp5x_config);
240 break;
241 default:
242 die("Unsupported memory type(%d)\n", mb_cfg->type);
245 mem_populate_channel_data(&soc_mem_cfg[mb_cfg->type], spd_info, half_populated, &data);
246 mem_init_spd_upds(mem_cfg, &data);
247 mem_init_dq_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);
248 mem_init_dqs_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);