soc/intel/tigerlake: Replace Reserved9 usage with DisableDimmCh# UPD.
[coreboot.git] / src / soc / intel / tigerlake / meminit.c
blobbd9a4ff45d2306a7e3ccf39537d3d89875658a13
1 /*
2 * This file is part of the coreboot project.
5 * SPDX-License-Identifier: GPL-2.0-or-later
6 */
8 #include <assert.h>
9 #include <console/console.h>
10 #include <fsp/util.h>
11 #include <soc/meminit.h>
12 #include <spd_bin.h>
13 #include <string.h>
15 /* If memory is half-populated, then upper half of the channels need to be left empty. */
16 #define LPDDR4X_CHANNEL_UNPOPULATED(ch, half_populated) \
17 ((half_populated) && ((ch) >= (LPDDR4X_CHANNELS / 2)))
19 enum dimm_enable_options {
20 ENABLE_BOTH_DIMMS = 0,
21 DISABLE_DIMM0 = 1,
22 DISABLE_DIMM1 = 2,
23 DISABLE_BOTH_DIMMS = 3
26 static uint8_t get_dimm_cfg(uintptr_t dimm0, uintptr_t dimm1)
28 if (dimm0 && dimm1)
29 return ENABLE_BOTH_DIMMS;
30 if (!dimm0 && !dimm1)
31 return DISABLE_BOTH_DIMMS;
32 if (!dimm1)
33 return DISABLE_DIMM1;
34 if (!dimm0)
35 die("Disabling of only dimm0 is not supported!\n");
37 return DISABLE_BOTH_DIMMS;
40 static void init_spd_upds(FSP_M_CONFIG *mem_cfg, int channel, uintptr_t spd_dimm0,
41 uintptr_t spd_dimm1)
43 uint8_t dimm_cfg = get_dimm_cfg(spd_dimm0, spd_dimm1);
45 switch (channel) {
46 case 0:
47 mem_cfg->DisableDimmCh0 = dimm_cfg;
48 mem_cfg->MemorySpdPtr00 = spd_dimm0;
49 mem_cfg->MemorySpdPtr01 = spd_dimm1;
50 break;
52 case 1:
53 mem_cfg->DisableDimmCh1 = dimm_cfg;
54 mem_cfg->MemorySpdPtr02 = spd_dimm0;
55 mem_cfg->MemorySpdPtr03 = spd_dimm1;
56 break;
58 case 2:
59 mem_cfg->DisableDimmCh2 = dimm_cfg;
60 mem_cfg->MemorySpdPtr04 = spd_dimm0;
61 mem_cfg->MemorySpdPtr05 = spd_dimm1;
62 break;
64 case 3:
65 mem_cfg->DisableDimmCh3 = dimm_cfg;
66 mem_cfg->MemorySpdPtr06 = spd_dimm0;
67 mem_cfg->MemorySpdPtr07 = spd_dimm1;
68 break;
70 case 4:
71 mem_cfg->DisableDimmCh4 = dimm_cfg;
72 mem_cfg->MemorySpdPtr08 = spd_dimm0;
73 mem_cfg->MemorySpdPtr09 = spd_dimm1;
74 break;
76 case 5:
77 mem_cfg->DisableDimmCh5 = dimm_cfg;
78 mem_cfg->MemorySpdPtr10 = spd_dimm0;
79 mem_cfg->MemorySpdPtr11 = spd_dimm1;
80 break;
82 case 6:
83 mem_cfg->DisableDimmCh6 = dimm_cfg;
84 mem_cfg->MemorySpdPtr12 = spd_dimm0;
85 mem_cfg->MemorySpdPtr13 = spd_dimm1;
86 break;
88 case 7:
89 mem_cfg->DisableDimmCh7 = dimm_cfg;
90 mem_cfg->MemorySpdPtr14 = spd_dimm0;
91 mem_cfg->MemorySpdPtr15 = spd_dimm1;
92 break;
94 default:
95 die("Invalid channel: %d\n", channel);
99 static inline void init_spd_upds_empty(FSP_M_CONFIG *mem_cfg, int channel)
101 init_spd_upds(mem_cfg, channel, 0, 0);
104 static inline void init_spd_upds_dimm0(FSP_M_CONFIG *mem_cfg, int channel, uintptr_t spd_dimm0)
106 init_spd_upds(mem_cfg, channel, spd_dimm0, 0);
109 static void init_dq_upds(FSP_M_CONFIG *mem_cfg, int byte_pair, const uint8_t *dq_byte0,
110 const uint8_t *dq_byte1)
112 uint8_t *dq_upd;
114 switch (byte_pair) {
115 case 0:
116 dq_upd = mem_cfg->DqMapCpu2DramCh0;
117 break;
118 case 1:
119 dq_upd = mem_cfg->DqMapCpu2DramCh1;
120 break;
121 case 2:
122 dq_upd = mem_cfg->DqMapCpu2DramCh2;
123 break;
124 case 3:
125 dq_upd = mem_cfg->DqMapCpu2DramCh3;
126 break;
127 case 4:
128 dq_upd = mem_cfg->DqMapCpu2DramCh4;
129 break;
130 case 5:
131 dq_upd = mem_cfg->DqMapCpu2DramCh5;
132 break;
133 case 6:
134 dq_upd = mem_cfg->DqMapCpu2DramCh6;
135 break;
136 case 7:
137 dq_upd = mem_cfg->DqMapCpu2DramCh7;
138 break;
139 default:
140 die("Invalid byte_pair: %d\n", byte_pair);
143 if (dq_byte0 && dq_byte1) {
144 memcpy(dq_upd, dq_byte0, BITS_PER_BYTE);
145 memcpy(dq_upd + BITS_PER_BYTE, dq_byte1, BITS_PER_BYTE);
146 } else {
147 memset(dq_upd, 0, BITS_PER_BYTE * 2);
151 static inline void init_dq_upds_empty(FSP_M_CONFIG *mem_cfg, int byte_pair)
153 init_dq_upds(mem_cfg, byte_pair, NULL, NULL);
156 static void init_dqs_upds(FSP_M_CONFIG *mem_cfg, int byte_pair, uint8_t dqs_byte0,
157 uint8_t dqs_byte1)
159 uint8_t *dqs_upd;
161 switch (byte_pair) {
162 case 0:
163 dqs_upd = mem_cfg->DqsMapCpu2DramCh0;
164 break;
165 case 1:
166 dqs_upd = mem_cfg->DqsMapCpu2DramCh1;
167 break;
168 case 2:
169 dqs_upd = mem_cfg->DqsMapCpu2DramCh2;
170 break;
171 case 3:
172 dqs_upd = mem_cfg->DqsMapCpu2DramCh3;
173 break;
174 case 4:
175 dqs_upd = mem_cfg->DqsMapCpu2DramCh4;
176 break;
177 case 5:
178 dqs_upd = mem_cfg->DqsMapCpu2DramCh5;
179 break;
180 case 6:
181 dqs_upd = mem_cfg->DqsMapCpu2DramCh6;
182 break;
183 case 7:
184 dqs_upd = mem_cfg->DqsMapCpu2DramCh7;
185 break;
186 default:
187 die("Invalid byte_pair: %d\n", byte_pair);
190 dqs_upd[0] = dqs_byte0;
191 dqs_upd[1] = dqs_byte1;
194 static inline void init_dqs_upds_empty(FSP_M_CONFIG *mem_cfg, int byte_pair)
196 init_dqs_upds(mem_cfg, byte_pair, 0, 0);
199 static void read_spd_from_cbfs(uint8_t index, uintptr_t *data, size_t *len)
201 struct region_device spd_rdev;
203 printk(BIOS_DEBUG, "SPD INDEX = %u\n", index);
204 if (get_spd_cbfs_rdev(&spd_rdev, index) < 0)
205 die("spd.bin not found or incorrect index\n");
207 /* Memory leak is ok since we have memory mapped boot media */
208 assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED));
210 *len = region_device_sz(&spd_rdev);
211 *data = (uintptr_t)rdev_mmap_full(&spd_rdev);
214 static void read_md_spd(const struct spd_info *info, uintptr_t *data, size_t *len)
216 if (info->md_spd_loc == SPD_MEMPTR) {
217 *data = info->data_ptr;
218 *len = info->data_len;
219 } else if (info->md_spd_loc == SPD_CBFS) {
220 read_spd_from_cbfs(info->cbfs_index, data, len);
221 } else {
222 die("Not a valid location(%d) for Memory-down SPD!\n", info->md_spd_loc);
225 print_spd_info((unsigned char *)data);
228 void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
229 const struct spd_info *info, bool half_populated)
232 size_t spd_len;
233 uintptr_t spd_data;
234 int i;
236 if (info->topology != MEMORY_DOWN)
237 die("LPDDR4x only support memory-down topology.\n");
239 /* LPDDR4x does not allow interleaved memory */
240 mem_cfg->DqPinsInterleaved = 0;
241 mem_cfg->ECT = board_cfg->ect;
242 mem_cfg->MrcSafeConfig = 0x1;
244 read_md_spd(info, &spd_data, &spd_len);
245 mem_cfg->MemorySpdDataLen = spd_len;
247 for (i = 0; i < LPDDR4X_CHANNELS; i++) {
248 if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
249 init_spd_upds_empty(mem_cfg, i);
250 else
251 init_spd_upds_dimm0(mem_cfg, i, spd_data);
255 * LPDDR4x memory interface has 2 DQs per channel. Each DQ consists of 8 bits (1
256 * byte). However, FSP UPDs for DQ Map expect a DQ pair (i.e. mapping for 2 bytes) in
257 * each UPD.
259 * Thus, init_dq_upds() needs to be called for dq pair of each channel.
260 * DqMapCpu2DramCh0 --> dq_map[CHAN=0][0-1]
261 * DqMapCpu2DramCh1 --> dq_map[CHAN=1][0-1]
262 * DqMapCpu2DramCh2 --> dq_map[CHAN=2][0-1]
263 * DqMapCpu2DramCh3 --> dq_map[CHAN=3][0-1]
264 * DqMapCpu2DramCh4 --> dq_map[CHAN=4][0-1]
265 * DqMapCpu2DramCh5 --> dq_map[CHAN=5][0-1]
266 * DqMapCpu2DramCh6 --> dq_map[CHAN=6][0-1]
267 * DqMapCpu2DramCh7 --> dq_map[CHAN=7][0-1]
269 for (i = 0; i < LPDDR4X_CHANNELS; i++) {
270 if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
271 init_dq_upds_empty(mem_cfg, i);
272 else
273 init_dq_upds(mem_cfg, i, board_cfg->dq_map[i][0],
274 board_cfg->dq_map[i][1]);
278 * LPDDR4x memory interface has 2 DQS pairs per channel. FSP UPDs for DQS Map expect a
279 * pair in each UPD.
281 * Thus, init_dqs_upds() needs to be called for dqs pair of each channel.
282 * DqsMapCpu2DramCh0 --> dqs_map[CHAN=0][0-1]
283 * DqsMapCpu2DramCh1 --> dqs_map[CHAN=1][0-1]
284 * DqsMapCpu2DramCh2 --> dqs_map[CHAN=2][0-1]
285 * DqsMapCpu2DramCh3 --> dqs_map[CHAN=3][0-1]
286 * DqsMapCpu2DramCh4 --> dqs_map[CHAN=4][0-1]
287 * DqsMapCpu2DramCh5 --> dqs_map[CHAN=5][0-1]
288 * DqsMapCpu2DramCh6 --> dqs_map[CHAN=6][0-1]
289 * DqsMapCpu2DramCh7 --> dqs_map[CHAN=7][0-1]
291 for (i = 0; i < LPDDR4X_CHANNELS; i++) {
292 if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
293 init_dqs_upds_empty(mem_cfg, i);
294 else
295 init_dqs_upds(mem_cfg, i, board_cfg->dqs_map[i][0],
296 board_cfg->dqs_map[i][1]);