soc/intel/common: Add Alder Lake device IDs
[coreboot.git] / src / soc / intel / common / block / systemagent / systemagent.c
blob19d4522d95a65d86efa94e329111cd407deaaf3d
1 /* SPDX-License-Identifier: GPL-2.0-only */
3 #include <cbmem.h>
4 #include <console/console.h>
5 #include <cpu/cpu.h>
6 #include <device/device.h>
7 #include <device/pci.h>
8 #include <device/pci_ids.h>
9 #include <intelblocks/acpi.h>
10 #include <intelblocks/cfg.h>
11 #include <intelblocks/systemagent.h>
12 #include <smbios.h>
13 #include <soc/iomap.h>
14 #include <soc/nvs.h>
15 #include <soc/pci_devs.h>
16 #include <soc/systemagent.h>
17 #include <types.h>
18 #include "systemagent_def.h"
20 /* SoC override function */
21 __weak void soc_systemagent_init(struct device *dev)
23 /* no-op */
26 __weak void soc_add_fixed_mmio_resources(struct device *dev,
27 int *resource_cnt)
29 /* no-op */
32 __weak int soc_get_uncore_prmmr_base_and_mask(uint64_t *base,
33 uint64_t *mask)
35 /* return failure for this dummy API */
36 return -1;
39 __weak unsigned long sa_write_acpi_tables(const struct device *dev,
40 unsigned long current,
41 struct acpi_rsdp *rsdp)
43 return current;
46 __weak uint32_t soc_systemagent_max_chan_capacity_mib(u8 capid0_a_ddrsz)
48 return 32768; /* 32 GiB per channel */
51 static uint8_t sa_get_ecc_type(const uint32_t capid0_a)
53 return capid0_a & CAPID_ECCDIS ? MEMORY_ARRAY_ECC_NONE : MEMORY_ARRAY_ECC_SINGLE_BIT;
56 static size_t sa_slots_per_channel(const uint32_t capid0_a)
58 return !(capid0_a & CAPID_DDPCD) + 1;
61 static size_t sa_number_of_channels(const uint32_t capid0_a)
63 return !(capid0_a & CAPID_PDCD) + 1;
66 static void sa_soc_systemagent_init(struct device *dev)
68 soc_systemagent_init(dev);
70 struct memory_info *m = cbmem_find(CBMEM_ID_MEMINFO);
71 if (m == NULL)
72 return;
74 const uint32_t capid0_a = pci_read_config32(dev, CAPID0_A);
76 m->ecc_type = sa_get_ecc_type(capid0_a);
77 m->max_capacity_mib = soc_systemagent_max_chan_capacity_mib(CAPID_DDRSZ(capid0_a)) *
78 sa_number_of_channels(capid0_a);
79 m->number_of_devices = sa_slots_per_channel(capid0_a) *
80 sa_number_of_channels(capid0_a);
84 * Add all known fixed MMIO ranges that hang off the host bridge/memory
85 * controller device.
87 void sa_add_fixed_mmio_resources(struct device *dev, int *resource_cnt,
88 const struct sa_mmio_descriptor *sa_fixed_resources, size_t count)
90 int i;
91 int index = *resource_cnt;
93 for (i = 0; i < count; i++) {
94 uintptr_t base;
95 size_t size;
97 size = sa_fixed_resources[i].size;
98 base = sa_fixed_resources[i].base;
100 mmio_resource(dev, index++, base / KiB, size / KiB);
103 *resource_cnt = index;
107 * DRAM memory mapped register
109 * TOUUD: This 64 bit register defines the Top of Upper Usable DRAM
110 * TOLUD: This 32 bit register defines the Top of Low Usable DRAM
111 * BGSM: This register contains the base address of stolen DRAM memory for GTT
112 * TSEG: This register contains the base address of TSEG DRAM memory
114 static const struct sa_mem_map_descriptor sa_memory_map[MAX_MAP_ENTRIES] = {
115 { TOUUD, true, "TOUUD" },
116 { TOLUD, false, "TOLUD" },
117 { BGSM, false, "BGSM" },
118 { TSEG, false, "TSEG" },
121 /* Read DRAM memory map register value through PCI configuration space */
122 static void sa_read_map_entry(struct device *dev,
123 const struct sa_mem_map_descriptor *entry, uint64_t *result)
125 uint64_t value = 0;
127 if (entry->is_64_bit) {
128 value = pci_read_config32(dev, entry->reg + 4);
129 value <<= 32;
132 value |= pci_read_config32(dev, entry->reg);
133 /* All registers are on a 1MiB granularity. */
134 value = ALIGN_DOWN(value, 1 * MiB);
136 *result = value;
139 /* Fill MMIO resource above 4GB into GNVS */
140 void sa_fill_gnvs(struct global_nvs *gnvs)
142 struct device *sa_dev = pcidev_path_on_root(SA_DEVFN_ROOT);
144 sa_read_map_entry(sa_dev, &sa_memory_map[SA_TOUUD_REG], &gnvs->a4gb);
145 gnvs->a4gs = POWER_OF_2(cpu_phys_address_size()) - gnvs->a4gb;
146 printk(BIOS_DEBUG, "PCI space above 4GB MMIO is at 0x%llx, len = 0x%llx\n",
147 gnvs->a4gb, gnvs->a4gs);
150 static void sa_get_mem_map(struct device *dev, uint64_t *values)
152 int i;
153 for (i = 0; i < MAX_MAP_ENTRIES; i++)
154 sa_read_map_entry(dev, &sa_memory_map[i], &values[i]);
158 * These are the host memory ranges that should be added:
159 * - 0 -> 0xa0000: cacheable
160 * - 0xc0000 -> top_of_ram : cacheable
161 * - top_of_ram -> TOLUD: not cacheable with standard MTRRs and reserved
162 * - 4GiB -> TOUUD: cacheable
164 * The default SMRAM space is reserved so that the range doesn't
165 * have to be saved during S3 Resume. Once marked reserved the OS
166 * cannot use the memory. This is a bit of an odd place to reserve
167 * the region, but the CPU devices don't have dev_ops->read_resources()
168 * called on them.
170 * The range 0xa0000 -> 0xc0000 does not have any resources
171 * associated with it to handle legacy VGA memory. If this range
172 * is not omitted the mtrr code will setup the area as cacheable
173 * causing VGA access to not work.
175 * Don't need to mark the entire top_of_ram till TOLUD range (used
176 * for stolen memory like GFX and ME, PTT, DPR, PRMRR, TSEG etc) as
177 * cacheable for OS usage as coreboot already done with mpinit w/ smm
178 * relocation early.
180 * It should be noted that cacheable entry types need to be added in
181 * order. The reason is that the current MTRR code assumes this and
182 * falls over itself if it isn't.
184 * The resource index starts low and should not meet or exceed
185 * PCI_BASE_ADDRESS_0.
187 static void sa_add_dram_resources(struct device *dev, int *resource_count)
189 uintptr_t base_k, touud_k;
190 size_t size_k;
191 uint64_t sa_map_values[MAX_MAP_ENTRIES];
192 uintptr_t top_of_ram;
193 int index = *resource_count;
195 top_of_ram = (uintptr_t)cbmem_top();
197 /* 0 - > 0xa0000 */
198 base_k = 0;
199 size_k = (0xa0000 / KiB) - base_k;
200 ram_resource(dev, index++, base_k, size_k);
202 /* 0xc0000 -> top_of_ram */
203 base_k = 0xc0000 / KiB;
204 size_k = (top_of_ram / KiB) - base_k;
205 ram_resource(dev, index++, base_k, size_k);
207 sa_get_mem_map(dev, &sa_map_values[0]);
209 /* top_of_ram -> TOLUD */
210 base_k = top_of_ram;
211 size_k = sa_map_values[SA_TOLUD_REG] - base_k;
212 mmio_resource(dev, index++, base_k / KiB, size_k / KiB);
214 /* 4GiB -> TOUUD */
215 base_k = 4 * (GiB / KiB); /* 4GiB */
216 touud_k = sa_map_values[SA_TOUUD_REG] / KiB;
217 size_k = touud_k - base_k;
218 if (touud_k > base_k)
219 ram_resource(dev, index++, base_k, size_k);
222 * Reserve everything between A segment and 1MB:
224 * 0xa0000 - 0xbffff: legacy VGA
225 * 0xc0000 - 0xfffff: RAM
227 mmio_resource(dev, index++, 0xa0000 / KiB, (0xc0000 - 0xa0000) / KiB);
228 reserved_ram_resource(dev, index++, 0xc0000 / KiB,
229 (1*MiB - 0xc0000) / KiB);
231 *resource_count = index;
234 static bool is_imr_enabled(uint32_t imr_base_reg)
236 return !!(imr_base_reg & (1 << 31));
239 static void imr_resource(struct device *dev, int idx, uint32_t base,
240 uint32_t mask)
242 uint32_t base_k, size_k;
243 /* Bits 28:0 encode the base address bits 38:10, hence the KiB unit. */
244 base_k = (base & 0x0fffffff);
245 /* Bits 28:0 encode the AND mask used for comparison, in KiB. */
246 size_k = ((~mask & 0x0fffffff) + 1);
248 * IMRs sit in lower DRAM. Mark them cacheable, otherwise we run
249 * out of MTRRs. Memory reserved by IMRs is not usable for host
250 * so mark it reserved.
252 reserved_ram_resource(dev, idx, base_k, size_k);
256 * Add IMR ranges that hang off the host bridge/memory
257 * controller device in case CONFIG(SA_ENABLE_IMR) is selected by SoC.
259 static void sa_add_imr_resources(struct device *dev, int *resource_cnt)
261 size_t i, imr_offset;
262 uint32_t base, mask;
263 int index = *resource_cnt;
265 for (i = 0; i < MCH_NUM_IMRS; i++) {
266 imr_offset = i * MCH_IMR_PITCH;
267 base = MCHBAR32(imr_offset + MCH_IMR0_BASE);
268 mask = MCHBAR32(imr_offset + MCH_IMR0_MASK);
270 if (is_imr_enabled(base))
271 imr_resource(dev, index++, base, mask);
274 *resource_cnt = index;
277 static void systemagent_read_resources(struct device *dev)
279 int index = 0;
281 /* Read standard PCI resources. */
282 pci_dev_read_resources(dev);
284 /* Add all fixed MMIO resources. */
285 soc_add_fixed_mmio_resources(dev, &index);
286 /* Calculate and add DRAM resources. */
287 sa_add_dram_resources(dev, &index);
288 if (CONFIG(SA_ENABLE_IMR))
289 /* Add the isolated memory ranges (IMRs). */
290 sa_add_imr_resources(dev, &index);
292 /* Reserve the window used for extended BIOS decoding. */
293 if (CONFIG(FAST_SPI_SUPPORTS_EXT_BIOS_WINDOW))
294 mmio_resource(dev, index++, CONFIG_EXT_BIOS_WIN_BASE / KiB,
295 CONFIG_EXT_BIOS_WIN_SIZE / KiB);
298 void enable_power_aware_intr(void)
300 uint8_t pair;
302 /* Enable Power Aware Interrupt Routing */
303 pair = MCHBAR8(MCH_PAIR);
304 pair &= ~0x7; /* Clear 2:0 */
305 pair |= 0x4; /* Fixed Priority */
306 MCHBAR8(MCH_PAIR) = pair;
309 static struct device_operations systemagent_ops = {
310 .read_resources = systemagent_read_resources,
311 .set_resources = pci_dev_set_resources,
312 .enable_resources = pci_dev_enable_resources,
313 .init = sa_soc_systemagent_init,
314 .ops_pci = &pci_dev_ops_pci,
315 #if CONFIG(HAVE_ACPI_TABLES)
316 .write_acpi_tables = sa_write_acpi_tables,
317 #endif
320 static const unsigned short systemagent_ids[] = {
321 PCI_DEVICE_ID_INTEL_GLK_NB,
322 PCI_DEVICE_ID_INTEL_APL_NB,
323 PCI_DEVICE_ID_INTEL_CNL_ID_U,
324 PCI_DEVICE_ID_INTEL_CNL_ID_Y,
325 PCI_DEVICE_ID_INTEL_SKL_ID_U,
326 PCI_DEVICE_ID_INTEL_SKL_ID_Y,
327 PCI_DEVICE_ID_INTEL_SKL_ID_ULX,
328 PCI_DEVICE_ID_INTEL_SKL_ID_H_4,
329 PCI_DEVICE_ID_INTEL_SKL_ID_H_2,
330 PCI_DEVICE_ID_INTEL_SKL_ID_S_2,
331 PCI_DEVICE_ID_INTEL_SKL_ID_S_4,
332 PCI_DEVICE_ID_INTEL_WHL_ID_W_2,
333 PCI_DEVICE_ID_INTEL_WHL_ID_W_4,
334 PCI_DEVICE_ID_INTEL_KBL_ID_S,
335 PCI_DEVICE_ID_INTEL_SKL_ID_H_EM,
336 PCI_DEVICE_ID_INTEL_KBL_ID_U,
337 PCI_DEVICE_ID_INTEL_KBL_ID_Y,
338 PCI_DEVICE_ID_INTEL_KBL_ID_H,
339 PCI_DEVICE_ID_INTEL_KBL_U_R,
340 PCI_DEVICE_ID_INTEL_KBL_ID_DT,
341 PCI_DEVICE_ID_INTEL_KBL_ID_DT_2,
342 PCI_DEVICE_ID_INTEL_CFL_ID_U,
343 PCI_DEVICE_ID_INTEL_CFL_ID_U_2,
344 PCI_DEVICE_ID_INTEL_CFL_ID_H,
345 PCI_DEVICE_ID_INTEL_CFL_ID_H_4,
346 PCI_DEVICE_ID_INTEL_CFL_ID_H_8,
347 PCI_DEVICE_ID_INTEL_CFL_ID_S,
348 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_2,
349 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_4,
350 PCI_DEVICE_ID_INTEL_CFL_ID_S_DT_8,
351 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_4,
352 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_6,
353 PCI_DEVICE_ID_INTEL_CFL_ID_S_WS_8,
354 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_4,
355 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_6,
356 PCI_DEVICE_ID_INTEL_CFL_ID_S_S_8,
357 PCI_DEVICE_ID_INTEL_ICL_ID_U,
358 PCI_DEVICE_ID_INTEL_ICL_ID_U_2_2,
359 PCI_DEVICE_ID_INTEL_ICL_ID_Y,
360 PCI_DEVICE_ID_INTEL_ICL_ID_Y_2,
361 PCI_DEVICE_ID_INTEL_CML_ULT,
362 PCI_DEVICE_ID_INTEL_CML_ULT_2_2,
363 PCI_DEVICE_ID_INTEL_CML_ULT_6_2,
364 PCI_DEVICE_ID_INTEL_CML_ULX,
365 PCI_DEVICE_ID_INTEL_CML_S,
366 PCI_DEVICE_ID_INTEL_CML_S_G0G1_P0P1_6_2,
367 PCI_DEVICE_ID_INTEL_CML_S_P0P1_8_2,
368 PCI_DEVICE_ID_INTEL_CML_S_P0P1_10_2,
369 PCI_DEVICE_ID_INTEL_CML_S_G0G1_4,
370 PCI_DEVICE_ID_INTEL_CML_S_G0G1_2,
371 PCI_DEVICE_ID_INTEL_CML_H,
372 PCI_DEVICE_ID_INTEL_CML_H_4_2,
373 PCI_DEVICE_ID_INTEL_CML_H_8_2,
374 PCI_DEVICE_ID_INTEL_TGL_ID_U_2_2,
375 PCI_DEVICE_ID_INTEL_TGL_ID_U_4_2,
376 PCI_DEVICE_ID_INTEL_TGL_ID_Y_2_2,
377 PCI_DEVICE_ID_INTEL_TGL_ID_Y_4_2,
378 PCI_DEVICE_ID_INTEL_JSL_EHL,
379 PCI_DEVICE_ID_INTEL_EHL_ID_1,
380 PCI_DEVICE_ID_INTEL_EHL_ID_2,
381 PCI_DEVICE_ID_INTEL_EHL_ID_3,
382 PCI_DEVICE_ID_INTEL_EHL_ID_4,
383 PCI_DEVICE_ID_INTEL_EHL_ID_5,
384 PCI_DEVICE_ID_INTEL_EHL_ID_6,
385 PCI_DEVICE_ID_INTEL_EHL_ID_7,
386 PCI_DEVICE_ID_INTEL_EHL_ID_8,
387 PCI_DEVICE_ID_INTEL_EHL_ID_9,
388 PCI_DEVICE_ID_INTEL_EHL_ID_10,
389 PCI_DEVICE_ID_INTEL_EHL_ID_11,
390 PCI_DEVICE_ID_INTEL_EHL_ID_12,
391 PCI_DEVICE_ID_INTEL_JSL_ID_1,
392 PCI_DEVICE_ID_INTEL_JSL_ID_2,
393 PCI_DEVICE_ID_INTEL_JSL_ID_3,
394 PCI_DEVICE_ID_INTEL_JSL_ID_4,
395 PCI_DEVICE_ID_INTEL_JSL_ID_5,
396 PCI_DEVICE_ID_INTEL_ADL_S_ID_1,
397 PCI_DEVICE_ID_INTEL_ADL_S_ID_2,
398 PCI_DEVICE_ID_INTEL_ADL_S_ID_3,
399 PCI_DEVICE_ID_INTEL_ADL_S_ID_4,
400 PCI_DEVICE_ID_INTEL_ADL_S_ID_5,
401 PCI_DEVICE_ID_INTEL_ADL_S_ID_6,
402 PCI_DEVICE_ID_INTEL_ADL_S_ID_7,
403 PCI_DEVICE_ID_INTEL_ADL_S_ID_8,
404 PCI_DEVICE_ID_INTEL_ADL_S_ID_9,
405 PCI_DEVICE_ID_INTEL_ADL_S_ID_10,
406 PCI_DEVICE_ID_INTEL_ADL_S_ID_11,
407 PCI_DEVICE_ID_INTEL_ADL_S_ID_12,
408 PCI_DEVICE_ID_INTEL_ADL_S_ID_13,
409 PCI_DEVICE_ID_INTEL_ADL_S_ID_14,
410 PCI_DEVICE_ID_INTEL_ADL_S_ID_15,
411 PCI_DEVICE_ID_INTEL_ADL_P_ID_1,
412 PCI_DEVICE_ID_INTEL_ADL_P_ID_2,
413 PCI_DEVICE_ID_INTEL_ADL_P_ID_3,
414 PCI_DEVICE_ID_INTEL_ADL_P_ID_4,
415 PCI_DEVICE_ID_INTEL_ADL_P_ID_5,
416 PCI_DEVICE_ID_INTEL_ADL_P_ID_6,
417 PCI_DEVICE_ID_INTEL_ADL_P_ID_7,
418 PCI_DEVICE_ID_INTEL_ADL_P_ID_8,
419 PCI_DEVICE_ID_INTEL_ADL_P_ID_9,
420 PCI_DEVICE_ID_INTEL_ADL_M_ID_1,
424 static const struct pci_driver systemagent_driver __pci_driver = {
425 .ops = &systemagent_ops,
426 .vendor = PCI_VENDOR_ID_INTEL,
427 .devices = systemagent_ids