tree: drop last paragraph of GPL copyright header
[coreboot.git] / src / soc / intel / skylake / systemagent.c
blobff65731e9878e1e13a93733536bf7af925c66958
1 /*
2 * This file is part of the coreboot project.
4 * Copyright (C) 2007-2009 coresystems GmbH
5 * Copyright (C) 2014 Google Inc.
6 * Copyright (C) 2015 Intel Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <console/console.h>
19 #include <arch/acpi.h>
20 #include <arch/io.h>
21 #include <stdint.h>
22 #include <delay.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <cbmem.h>
29 #include <romstage_handoff.h>
30 #include <vendorcode/google/chromeos/chromeos.h>
31 #include <soc/cpu.h>
32 #include <soc/iomap.h>
33 #include <soc/pci_devs.h>
34 #include <soc/ramstage.h>
35 #include <soc/systemagent.h>
37 u8 systemagent_revision(void)
39 return pci_read_config8(SA_DEV_ROOT, PCI_REVISION_ID);
42 static int get_pcie_bar(device_t dev, unsigned int index, u32 *base, u32 *len)
44 u32 pciexbar_reg;
46 *base = 0;
47 *len = 0;
49 pciexbar_reg = pci_read_config32(dev, index);
51 if (!(pciexbar_reg & (1 << 0)))
52 return 0;
54 switch ((pciexbar_reg >> 1) & 3) {
55 case 0: /* 256MB */
56 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
57 (1 << 28));
58 *len = 256 * 1024 * 1024;
59 return 1;
60 case 1: /* 128M */
61 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
62 (1 << 28)|(1 << 27));
63 *len = 128 * 1024 * 1024;
64 return 1;
65 case 2: /* 64M */
66 *base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|
67 (1 << 28)|(1 << 27)|(1 << 26));
68 *len = 64 * 1024 * 1024;
69 return 1;
72 return 0;
75 static int get_bar(device_t dev, unsigned int index, u32 *base, u32 *len)
77 u32 bar;
79 bar = pci_read_config32(dev, index);
81 /* If not enabled don't report it. */
82 if (!(bar & 0x1))
83 return 0;
85 /* Knock down the enable bit. */
86 *base = bar & ~1;
88 return 1;
92 * There are special BARs that actually are programmed in the MCHBAR. These
93 * Intel special features, but they do consume resources that need to be
94 * accounted for.
96 static int get_bar_in_mchbar(device_t dev, unsigned int index, u32 *base,
97 u32 *len)
99 u32 bar;
101 bar = MCHBAR32(index);
103 /* If not enabled don't report it. */
104 if (!(bar & 0x1))
105 return 0;
107 /* Knock down the enable bit. */
108 *base = bar & ~1;
110 return 1;
113 struct fixed_mmio_descriptor {
114 unsigned int index;
115 u32 size;
116 int (*get_resource)(device_t dev, unsigned int index,
117 u32 *base, u32 *size);
118 const char *description;
121 struct fixed_mmio_descriptor mc_fixed_resources[] = {
122 { PCIEXBAR, 0, get_pcie_bar, "PCIEXBAR" },
123 { MCHBAR, MCH_BASE_SIZE, get_bar, "MCHBAR" },
124 { DMIBAR, DMI_BASE_SIZE, get_bar, "DMIBAR" },
125 { EPBAR, EP_BASE_SIZE, get_bar, "EPBAR" },
126 { GDXCBAR, GDXC_BASE_SIZE, get_bar_in_mchbar, "GDXCBAR" },
127 { EDRAMBAR, EDRAM_BASE_SIZE, get_bar_in_mchbar, "EDRAMBAR" },
131 * Add all known fixed MMIO ranges that hang off the host bridge/memory
132 * controller device.
134 static void mc_add_fixed_mmio_resources(device_t dev)
136 int i;
138 for (i = 0; i < ARRAY_SIZE(mc_fixed_resources); i++) {
139 u32 base;
140 u32 size;
141 struct resource *resource;
142 unsigned int index;
144 size = mc_fixed_resources[i].size;
145 index = mc_fixed_resources[i].index;
146 if (!mc_fixed_resources[i].get_resource(dev, index,
147 &base, &size))
148 continue;
150 resource = new_resource(dev, mc_fixed_resources[i].index);
151 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
152 IORESOURCE_STORED | IORESOURCE_RESERVE |
153 IORESOURCE_ASSIGNED;
154 resource->base = base;
155 resource->size = size;
156 printk(BIOS_DEBUG, "%s: Adding %s @ %x 0x%08lx-0x%08lx.\n",
157 __func__, mc_fixed_resources[i].description, index,
158 (unsigned long)base, (unsigned long)(base + size - 1));
162 struct map_entry {
163 int reg;
164 int is_64_bit;
165 int is_limit;
166 const char *description;
169 static void read_map_entry(device_t dev, struct map_entry *entry,
170 uint64_t *result)
172 uint64_t value;
173 uint64_t mask;
175 /* All registers are on a 1MiB granularity. */
176 mask = ((1ULL<<20)-1);
177 mask = ~mask;
179 value = 0;
181 if (entry->is_64_bit) {
182 value = pci_read_config32(dev, entry->reg + 4);
183 value <<= 32;
186 value |= pci_read_config32(dev, entry->reg);
187 value &= mask;
189 if (entry->is_limit)
190 value |= ~mask;
192 *result = value;
195 #define MAP_ENTRY(reg_, is_64_, is_limit_, desc_) \
197 .reg = reg_, \
198 .is_64_bit = is_64_, \
199 .is_limit = is_limit_, \
200 .description = desc_, \
203 #define MAP_ENTRY_BASE_64(reg_, desc_) \
204 MAP_ENTRY(reg_, 1, 0, desc_)
205 #define MAP_ENTRY_LIMIT_64(reg_, desc_) \
206 MAP_ENTRY(reg_, 1, 1, desc_)
207 #define MAP_ENTRY_BASE_32(reg_, desc_) \
208 MAP_ENTRY(reg_, 0, 0, desc_)
210 enum {
211 TOM_REG,
212 TOUUD_REG,
213 MESEG_BASE_REG,
214 MESEG_LIMIT_REG,
215 REMAP_BASE_REG,
216 REMAP_LIMIT_REG,
217 TOLUD_REG,
218 BGSM_REG,
219 BDSM_REG,
220 TSEG_REG,
221 /* Must be last. */
222 NUM_MAP_ENTRIES
225 static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
226 [TOM_REG] = MAP_ENTRY_BASE_64(TOM, "TOM"),
227 [TOUUD_REG] = MAP_ENTRY_BASE_64(TOUUD, "TOUUD"),
228 [MESEG_BASE_REG] = MAP_ENTRY_BASE_64(MESEG_BASE, "MESEG_BASE"),
229 [MESEG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(MESEG_LIMIT, "MESEG_LIMIT"),
230 [REMAP_BASE_REG] = MAP_ENTRY_BASE_64(REMAPBASE, "REMAP_BASE"),
231 [REMAP_LIMIT_REG] = MAP_ENTRY_LIMIT_64(REMAPLIMIT, "REMAP_LIMIT"),
232 [TOLUD_REG] = MAP_ENTRY_BASE_32(TOLUD, "TOLUD"),
233 [BDSM_REG] = MAP_ENTRY_BASE_32(BDSM, "BDSM"),
234 [BGSM_REG] = MAP_ENTRY_BASE_32(BGSM, "BGSM"),
235 [TSEG_REG] = MAP_ENTRY_BASE_32(TSEG, "TESGMB"),
238 static void mc_read_map_entries(device_t dev, uint64_t *values)
240 int i;
241 for (i = 0; i < NUM_MAP_ENTRIES; i++)
242 read_map_entry(dev, &memory_map[i], &values[i]);
245 static void mc_report_map_entries(device_t dev, uint64_t *values)
247 int i;
248 for (i = 0; i < NUM_MAP_ENTRIES; i++) {
249 printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n",
250 memory_map[i].description, values[i]);
252 /* One can validate the BDSM and BGSM against the GGC. */
253 printk(BIOS_DEBUG, "MC MAP: GGC: 0x%x\n", pci_read_config16(dev, GGC));
256 static void mc_add_dram_resources(device_t dev)
258 unsigned long base_k, size_k;
259 unsigned long touud_k;
260 unsigned long index;
261 struct resource *resource;
262 uint64_t mc_values[NUM_MAP_ENTRIES];
263 unsigned long dpr_size = 0;
264 u32 dpr_reg;
266 /* Read in the MAP registers and report their values. */
267 mc_read_map_entries(dev, &mc_values[0]);
268 mc_report_map_entries(dev, &mc_values[0]);
271 * DMA Protected Range can be reserved below TSEG for PCODE patch
272 * or TXT/BootGuard related data. Rather than report a base address
273 * the DPR register reports the TOP of the region, which is the same
274 * as TSEG base. The region size is reported in MiB in bits 11:4.
276 dpr_reg = pci_read_config32(SA_DEV_ROOT, DPR);
277 if (dpr_reg & DPR_EPM) {
278 dpr_size = (dpr_reg & DPR_SIZE_MASK) << 16;
279 printk(BIOS_INFO, "DPR SIZE: 0x%lx\n", dpr_size);
283 * These are the host memory ranges that should be added:
284 * - 0 -> 0xa0000: cacheable
285 * - 0xc0000 -> top_of_ram : cacheable
286 * - top_of_ram -> TSEG - DPR: uncacheable
287 * - TESG - DPR -> BGSM: cacheable with standard MTRRs and reserved
288 * - BGSM -> TOLUD: not cacheable with standard MTRRs and reserved
289 * - 4GiB -> TOUUD: cacheable
291 * The default SMRAM space is reserved so that the range doesn't
292 * have to be saved during S3 Resume. Once marked reserved the OS
293 * cannot use the memory. This is a bit of an odd place to reserve
294 * the region, but the CPU devices don't have dev_ops->read_resources()
295 * called on them.
297 * The range 0xa0000 -> 0xc0000 does not have any resources
298 * associated with it to handle legacy VGA memory. If this range
299 * is not omitted the mtrr code will setup the area as cacheable
300 * causing VGA access to not work.
302 * The TSEG region is mapped as cacheable so that one can perform
303 * SMRAM relocation faster. Once the SMRR is enabled the SMRR takes
304 * precedence over the existing MTRRs covering this region.
306 * It should be noted that cacheable entry types need to be added in
307 * order. The reason is that the current MTRR code assumes this and
308 * falls over itself if it isn't.
310 * The resource index starts low and should not meet or exceed
311 * PCI_BASE_ADDRESS_0.
313 index = 0;
315 /* 0 - > 0xa0000 */
316 base_k = 0;
317 size_k = (0xa0000 >> 10) - base_k;
318 ram_resource(dev, index++, base_k, size_k);
320 /* 0xc0000 -> top_of_ram */
321 base_k = 0xc0000 >> 10;
322 size_k = (top_of_32bit_ram() >> 10) - base_k;
323 ram_resource(dev, index++, base_k, size_k);
325 /* top_of_ram -> TSEG - DPR */
326 resource = new_resource(dev, index++);
327 resource->base = top_of_32bit_ram();
328 resource->size = mc_values[TSEG_REG] - dpr_size - resource->base;
329 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
330 IORESOURCE_STORED | IORESOURCE_RESERVE |
331 IORESOURCE_ASSIGNED;
333 /* TSEG - DPR -> BGSM */
334 resource = new_resource(dev, index++);
335 resource->base = mc_values[TSEG_REG] - dpr_size;
336 resource->size = mc_values[BGSM_REG] - resource->base;
337 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
338 IORESOURCE_STORED | IORESOURCE_RESERVE |
339 IORESOURCE_ASSIGNED | IORESOURCE_CACHEABLE;
341 /* BGSM -> TOLUD */
342 resource = new_resource(dev, index++);
343 resource->base = mc_values[BGSM_REG];
344 resource->size = mc_values[TOLUD_REG] - resource->base;
345 resource->flags = IORESOURCE_MEM | IORESOURCE_FIXED |
346 IORESOURCE_STORED | IORESOURCE_RESERVE |
347 IORESOURCE_ASSIGNED;
349 /* 4GiB -> TOUUD */
350 base_k = 4096 * 1024; /* 4GiB */
351 touud_k = mc_values[TOUUD_REG] >> 10;
352 size_k = touud_k - base_k;
353 if (touud_k > base_k)
354 ram_resource(dev, index++, base_k, size_k);
357 * Reserve everything between A segment and 1MB:
359 * 0xa0000 - 0xbffff: legacy VGA
360 * 0xc0000 - 0xfffff: RAM
362 mmio_resource(dev, index++, (0xa0000 >> 10), (0xc0000 - 0xa0000) >> 10);
363 reserved_ram_resource(dev, index++, (0xc0000 >> 10),
364 (0x100000 - 0xc0000) >> 10);
366 chromeos_reserve_ram_oops(dev, index++);
369 static void systemagent_read_resources(device_t dev)
371 /* Read standard PCI resources. */
372 pci_dev_read_resources(dev);
374 /* Add all fixed MMIO resources. */
375 mc_add_fixed_mmio_resources(dev);
377 /* Calculate and add DRAM resources. */
378 mc_add_dram_resources(dev);
381 static void systemagent_init(struct device *dev)
383 u8 bios_reset_cpl, pair;
385 /* Enable Power Aware Interrupt Routing */
386 pair = MCHBAR8(MCH_PAIR);
387 pair &= ~0x7; /* Clear 2:0 */
388 pair |= 0x4; /* Fixed Priority */
389 MCHBAR8(MCH_PAIR) = pair;
392 * Set bits 0+1 of BIOS_RESET_CPL to indicate to the CPU
393 * that BIOS has initialized memory and power management
395 bios_reset_cpl = MCHBAR8(BIOS_RESET_CPL);
396 bios_reset_cpl |= 3;
397 MCHBAR8(BIOS_RESET_CPL) = bios_reset_cpl;
398 printk(BIOS_DEBUG, "Set BIOS_RESET_CPL\n");
400 /* Configure turbo power limits 1ms after reset complete bit */
401 mdelay(1);
402 set_power_limits(28);
405 static void systemagent_enable(device_t dev)
407 #if CONFIG_HAVE_ACPI_RESUME
408 struct romstage_handoff *handoff;
410 handoff = cbmem_find(CBMEM_ID_ROMSTAGE_INFO);
412 if (handoff == NULL) {
413 printk(BIOS_DEBUG, "Unknown boot method, assuming normal.\n");
414 acpi_slp_type = 0;
415 } else if (handoff->s3_resume) {
416 printk(BIOS_DEBUG, "S3 Resume.\n");
417 acpi_slp_type = 3;
418 } else {
419 printk(BIOS_DEBUG, "Normal boot.\n");
420 acpi_slp_type = 0;
422 #endif
425 static struct device_operations systemagent_ops = {
426 .read_resources = &systemagent_read_resources,
427 .set_resources = &pci_dev_set_resources,
428 .enable_resources = &pci_dev_enable_resources,
429 .init = &systemagent_init,
430 .enable = &systemagent_enable,
431 .ops_pci = &soc_pci_ops,
434 static const unsigned short systemagent_ids[] = {
435 MCH_SKYLAKE_ID_U,
436 MCH_SKYLAKE_ID_Y,
437 MCH_SKYLAKE_ID_ULX,
441 static const struct pci_driver systemagent_driver __pci_driver = {
442 .ops = &systemagent_ops,
443 .vendor = PCI_VENDOR_ID_INTEL,
444 .devices = systemagent_ids