{mb,nb,soc}: Remove references to pci_bus_default_ops()
[coreboot.git] / src / northbridge / amd / pi / 00730F01 / northbridge.c
blob6e652c0e103202b89f586e4347b794abfe4f1214
1 /*
2 * This file is part of the coreboot project.
4 * Copyright (C) 2012 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <console/console.h>
17 #include <arch/io.h>
18 #include <arch/acpi.h>
19 #include <stdint.h>
20 #include <device/device.h>
21 #include <device/pci.h>
22 #include <device/pci_ids.h>
23 #include <device/hypertransport.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <lib.h>
27 #include <cpu/cpu.h>
28 #include <cbmem.h>
30 #include <Porting.h>
31 #include <AGESA.h>
32 #include <FieldAccessors.h>
33 #include <Topology.h>
34 #include <northbridge/amd/agesa/agesa_helper.h>
35 #if IS_ENABLED(CONFIG_BINARYPI_LEGACY_WRAPPER)
36 #include <northbridge/amd/pi/agesawrapper.h>
37 #include <northbridge/amd/pi/agesawrapper_call.h>
38 #endif
39 #include "northbridge.h"
41 #include <cpu/x86/lapic.h>
42 #include <cpu/amd/mtrr.h>
43 #include <arch/acpi.h>
44 #include <arch/acpigen.h>
46 #define MAX_NODE_NUMS (MAX_NODES * MAX_DIES)
48 typedef struct dram_base_mask {
49 u32 base; //[47:27] at [28:8]
50 u32 mask; //[47:27] at [28:8] and enable at bit 0
51 } dram_base_mask_t;
53 static unsigned node_nums;
54 static unsigned sblink;
55 static device_t __f0_dev[MAX_NODE_NUMS];
56 static device_t __f1_dev[MAX_NODE_NUMS];
57 static device_t __f2_dev[MAX_NODE_NUMS];
58 static device_t __f4_dev[MAX_NODE_NUMS];
59 static unsigned fx_devs = 0;
61 static dram_base_mask_t get_dram_base_mask(u32 nodeid)
63 device_t dev;
64 dram_base_mask_t d;
65 dev = __f1_dev[0];
66 u32 temp;
67 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
68 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
69 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
70 d.mask |= temp<<21;
71 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
72 d.mask |= (temp & 1); // enable bit
73 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
74 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
75 d.base |= temp<<21;
76 return d;
79 static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
80 u32 io_min, u32 io_max)
82 u32 i;
83 u32 tempreg;
84 /* io range allocation */
85 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
86 for (i = 0; i < node_nums; i++)
87 pci_write_config32(__f1_dev[i], reg+4, tempreg);
88 tempreg = 3 /*| (3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
89 for (i = 0; i < node_nums; i++)
90 pci_write_config32(__f1_dev[i], reg, tempreg);
93 static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
95 u32 i;
96 u32 tempreg;
97 /* io range allocation */
98 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
99 for (i = 0; i < nodes; i++)
100 pci_write_config32(__f1_dev[i], reg+4, tempreg);
101 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
102 for (i = 0; i < node_nums; i++)
103 pci_write_config32(__f1_dev[i], reg, tempreg);
106 static device_t get_node_pci(u32 nodeid, u32 fn)
108 #if MAX_NODE_NUMS + CONFIG_CDB >= 32
109 if ((CONFIG_CDB + nodeid) < 32) {
110 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
111 } else {
112 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
114 #else
115 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
116 #endif
119 static void get_fx_devs(void)
121 int i;
122 for (i = 0; i < MAX_NODE_NUMS; i++) {
123 __f0_dev[i] = get_node_pci(i, 0);
124 __f1_dev[i] = get_node_pci(i, 1);
125 __f2_dev[i] = get_node_pci(i, 2);
126 __f4_dev[i] = get_node_pci(i, 4);
127 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
128 fx_devs = i+1;
130 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
131 die("Cannot find 0:0x18.[0|1]\n");
133 printk(BIOS_DEBUG, "fx_devs = 0x%x\n", fx_devs);
136 static u32 f1_read_config32(unsigned reg)
138 if (fx_devs == 0)
139 get_fx_devs();
140 return pci_read_config32(__f1_dev[0], reg);
143 static void f1_write_config32(unsigned reg, u32 value)
145 int i;
146 if (fx_devs == 0)
147 get_fx_devs();
148 for (i = 0; i < fx_devs; i++) {
149 device_t dev;
150 dev = __f1_dev[i];
151 if (dev && dev->enabled) {
152 pci_write_config32(dev, reg, value);
157 static u32 amdfam16_nodeid(device_t dev)
159 #if MAX_NODE_NUMS == 64
160 unsigned busn;
161 busn = dev->bus->secondary;
162 if (busn != CONFIG_CBB) {
163 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
164 } else {
165 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
168 #else
169 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
170 #endif
173 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
175 u32 val;
177 val = 1 | (nodeid<<4) | (linkn<<12);
178 /* it will routing
179 * (1)mmio 0xa0000:0xbffff
180 * (2)io 0x3b0:0x3bb, 0x3c0:0x3df
182 f1_write_config32(0xf4, val);
187 * @return
188 * @retval 2 resoure does not exist, usable
189 * @retval 0 resource exists, not usable
190 * @retval 1 resource exist, resource has been allocated before
192 static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
193 unsigned goal_link)
195 struct resource *res;
196 unsigned nodeid, link = 0;
197 int result;
198 res = 0;
199 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
200 device_t dev;
201 dev = __f0_dev[nodeid];
202 if (!dev)
203 continue;
204 for (link = 0; !res && (link < 8); link++) {
205 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
208 result = 2;
209 if (res) {
210 result = 0;
211 if ((goal_link == (link - 1)) &&
212 (goal_nodeid == (nodeid - 1)) &&
213 (res->flags <= 1)) {
214 result = 1;
217 return result;
220 static struct resource *amdfam16_find_iopair(device_t dev, unsigned nodeid, unsigned link)
222 struct resource *resource;
223 u32 free_reg, reg;
224 resource = 0;
225 free_reg = 0;
226 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
227 int result;
228 result = reg_useable(reg, dev, nodeid, link);
229 if (result == 1) {
230 /* I have been allocated this one */
231 break;
233 else if (result > 1) {
234 /* I have a free register pair */
235 free_reg = reg;
238 if (reg > 0xd8) {
239 reg = free_reg; // if no free, the free_reg still be 0
242 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
244 return resource;
247 static struct resource *amdfam16_find_mempair(device_t dev, u32 nodeid, u32 link)
249 struct resource *resource;
250 u32 free_reg, reg;
251 resource = 0;
252 free_reg = 0;
253 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
254 int result;
255 result = reg_useable(reg, dev, nodeid, link);
256 if (result == 1) {
257 /* I have been allocated this one */
258 break;
260 else if (result > 1) {
261 /* I have a free register pair */
262 free_reg = reg;
265 if (reg > 0xb8) {
266 reg = free_reg;
269 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
270 return resource;
273 static void amdfam16_link_read_bases(device_t dev, u32 nodeid, u32 link)
275 struct resource *resource;
277 /* Initialize the io space constraints on the current bus */
278 resource = amdfam16_find_iopair(dev, nodeid, link);
279 if (resource) {
280 u32 align;
281 align = log2(HT_IO_HOST_ALIGN);
282 resource->base = 0;
283 resource->size = 0;
284 resource->align = align;
285 resource->gran = align;
286 resource->limit = 0xffffUL;
287 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
290 /* Initialize the prefetchable memory constraints on the current bus */
291 resource = amdfam16_find_mempair(dev, nodeid, link);
292 if (resource) {
293 resource->base = 0;
294 resource->size = 0;
295 resource->align = log2(HT_MEM_HOST_ALIGN);
296 resource->gran = log2(HT_MEM_HOST_ALIGN);
297 resource->limit = 0xffffffffffULL;
298 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
299 resource->flags |= IORESOURCE_BRIDGE;
302 /* Initialize the memory constraints on the current bus */
303 resource = amdfam16_find_mempair(dev, nodeid, link);
304 if (resource) {
305 resource->base = 0;
306 resource->size = 0;
307 resource->align = log2(HT_MEM_HOST_ALIGN);
308 resource->gran = log2(HT_MEM_HOST_ALIGN);
309 resource->limit = 0xffffffffffULL;
310 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
315 static void read_resources(device_t dev)
317 u32 nodeid;
318 struct bus *link;
320 nodeid = amdfam16_nodeid(dev);
321 for (link = dev->link_list; link; link = link->next) {
322 if (link->children) {
323 amdfam16_link_read_bases(dev, nodeid, link->link_num);
328 * This MMCONF resource must be reserved in the PCI domain.
329 * It is not honored by the coreboot resource allocator if it is in
330 * the CPU_CLUSTER.
332 mmconf_resource(dev, 0xc0010058);
335 static void set_resource(device_t dev, struct resource *resource, u32 nodeid)
337 resource_t rbase, rend;
338 unsigned reg, link_num;
339 char buf[50];
341 /* Make certain the resource has actually been set */
342 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
343 return;
346 /* If I have already stored this resource don't worry about it */
347 if (resource->flags & IORESOURCE_STORED) {
348 return;
351 /* Only handle PCI memory and IO resources */
352 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
353 return;
355 /* Ensure I am actually looking at a resource of function 1 */
356 if ((resource->index & 0xffff) < 0x1000) {
357 return;
359 /* Get the base address */
360 rbase = resource->base;
362 /* Get the limit (rounded up) */
363 rend = resource_end(resource);
365 /* Get the register and link */
366 reg = resource->index & 0xfff; // 4k
367 link_num = IOINDEX_LINK(resource->index);
369 if (resource->flags & IORESOURCE_IO) {
370 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
372 else if (resource->flags & IORESOURCE_MEM) {
373 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, node_nums); // [39:8]
375 resource->flags |= IORESOURCE_STORED;
376 snprintf(buf, sizeof(buf), " <node %x link %x>",
377 nodeid, link_num);
378 report_resource_stored(dev, resource, buf);
382 * I tried to reuse the resource allocation code in set_resource()
383 * but it is too difficult to deal with the resource allocation magic.
386 static void create_vga_resource(device_t dev, unsigned nodeid)
388 struct bus *link;
390 /* find out which link the VGA card is connected,
391 * we only deal with the 'first' vga card */
392 for (link = dev->link_list; link; link = link->next) {
393 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
394 #if IS_ENABLED(CONFIG_MULTIPLE_VGA_ADAPTERS)
395 extern device_t vga_pri; // the primary vga device, defined in device.c
396 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
397 link->secondary,link->subordinate);
398 /* We need to make sure the vga_pri is under the link */
399 if ((vga_pri->bus->secondary >= link->secondary) &&
400 (vga_pri->bus->secondary <= link->subordinate))
401 #endif
402 break;
406 /* no VGA card installed */
407 if (link == NULL)
408 return;
410 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, sblink);
411 set_vga_enable_reg(nodeid, sblink);
414 static void set_resources(device_t dev)
416 unsigned nodeid;
417 struct bus *bus;
418 struct resource *res;
420 /* Find the nodeid */
421 nodeid = amdfam16_nodeid(dev);
423 create_vga_resource(dev, nodeid); //TODO: do we need this?
425 /* Set each resource we have found */
426 for (res = dev->resource_list; res; res = res->next) {
427 set_resource(dev, res, nodeid);
430 for (bus = dev->link_list; bus; bus = bus->next) {
431 if (bus->children) {
432 assign_resources(bus);
437 static void northbridge_init(struct device *dev)
441 static unsigned long acpi_fill_hest(acpi_hest_t *hest)
443 void *addr, *current;
445 /* Skip the HEST header. */
446 current = (void *)(hest + 1);
448 addr = agesawrapper_getlateinitptr(PICK_WHEA_MCE);
449 if (addr != NULL)
450 current += acpi_create_hest_error_source(hest, current, 0, (void *)((u32)addr + 2), *(UINT16 *)addr - 2);
452 addr = agesawrapper_getlateinitptr(PICK_WHEA_CMC);
453 if (addr != NULL)
454 current += acpi_create_hest_error_source(hest, current, 1, (void *)((u32)addr + 2), *(UINT16 *)addr - 2);
456 return (unsigned long)current;
459 static void northbridge_fill_ssdt_generator(device_t device)
461 msr_t msr;
462 char pscope[] = "\\_SB.PCI0";
464 acpigen_write_scope(pscope);
465 msr = rdmsr(TOP_MEM);
466 acpigen_write_name_dword("TOM1", msr.lo);
467 msr = rdmsr(TOP_MEM2);
469 * Since XP only implements parts of ACPI 2.0, we can't use a qword
470 * here.
471 * See http://www.acpi.info/presentations/S01USMOBS169_OS%2520new.ppt
472 * slide 22ff.
473 * Shift value right by 20 bit to make it fit into 32bit,
474 * giving us 1MB granularity and a limit of almost 4Exabyte of memory.
476 acpigen_write_name_dword("TOM2", (msr.hi << 12) | msr.lo >> 20);
477 acpigen_pop_len();
480 static unsigned long agesa_write_acpi_tables(device_t device,
481 unsigned long current,
482 acpi_rsdp_t *rsdp)
484 acpi_srat_t *srat;
485 acpi_slit_t *slit;
486 acpi_header_t *ssdt;
487 acpi_header_t *alib;
488 acpi_header_t *ivrs;
489 acpi_hest_t *hest;
491 /* HEST */
492 current = ALIGN(current, 8);
493 hest = (acpi_hest_t *)current;
494 acpi_write_hest((void *)current, acpi_fill_hest);
495 acpi_add_table(rsdp, (void *)current);
496 current += ((acpi_header_t *)current)->length;
498 current = ALIGN(current, 8);
499 printk(BIOS_DEBUG, "ACPI: * IVRS at %lx\n", current);
500 ivrs = agesawrapper_getlateinitptr(PICK_IVRS);
501 if (ivrs != NULL) {
502 memcpy((void *)current, ivrs, ivrs->length);
503 ivrs = (acpi_header_t *) current;
504 current += ivrs->length;
505 acpi_add_table(rsdp, ivrs);
506 } else {
507 printk(BIOS_DEBUG, " AGESA IVRS table NULL. Skipping.\n");
510 /* SRAT */
511 current = ALIGN(current, 8);
512 printk(BIOS_DEBUG, "ACPI: * SRAT at %lx\n", current);
513 srat = (acpi_srat_t *) agesawrapper_getlateinitptr (PICK_SRAT);
514 if (srat != NULL) {
515 memcpy((void *)current, srat, srat->header.length);
516 srat = (acpi_srat_t *) current;
517 current += srat->header.length;
518 acpi_add_table(rsdp, srat);
519 } else {
520 printk(BIOS_DEBUG, " AGESA SRAT table NULL. Skipping.\n");
523 /* SLIT */
524 current = ALIGN(current, 8);
525 printk(BIOS_DEBUG, "ACPI: * SLIT at %lx\n", current);
526 slit = (acpi_slit_t *) agesawrapper_getlateinitptr (PICK_SLIT);
527 if (slit != NULL) {
528 memcpy((void *)current, slit, slit->header.length);
529 slit = (acpi_slit_t *) current;
530 current += slit->header.length;
531 acpi_add_table(rsdp, slit);
532 } else {
533 printk(BIOS_DEBUG, " AGESA SLIT table NULL. Skipping.\n");
536 /* ALIB */
537 current = ALIGN(current, 16);
538 printk(BIOS_DEBUG, "ACPI: * AGESA ALIB SSDT at %lx\n", current);
539 alib = (acpi_header_t *)agesawrapper_getlateinitptr (PICK_ALIB);
540 if (alib != NULL) {
541 memcpy((void *)current, alib, alib->length);
542 alib = (acpi_header_t *) current;
543 current += alib->length;
544 acpi_add_table(rsdp, (void *)alib);
546 else {
547 printk(BIOS_DEBUG, " AGESA ALIB SSDT table NULL. Skipping.\n");
550 /* this pstate ssdt may cause Blue Screen: Fixed: Keep this comment for a while. */
551 /* SSDT */
552 current = ALIGN(current, 16);
553 printk(BIOS_DEBUG, "ACPI: * SSDT at %lx\n", current);
554 ssdt = (acpi_header_t *)agesawrapper_getlateinitptr (PICK_PSTATE);
555 if (ssdt != NULL) {
556 memcpy((void *)current, ssdt, ssdt->length);
557 ssdt = (acpi_header_t *) current;
558 current += ssdt->length;
560 else {
561 printk(BIOS_DEBUG, " AGESA PState table NULL. Skipping.\n");
563 acpi_add_table(rsdp,ssdt);
565 printk(BIOS_DEBUG, "ACPI: * SSDT for PState at %lx\n", current);
566 return current;
569 static struct device_operations northbridge_operations = {
570 .read_resources = read_resources,
571 .set_resources = set_resources,
572 .enable_resources = pci_dev_enable_resources,
573 .init = northbridge_init,
574 .acpi_fill_ssdt_generator = northbridge_fill_ssdt_generator,
575 .write_acpi_tables = agesa_write_acpi_tables,
576 .enable = 0,
577 .ops_pci = 0,
580 static const struct pci_driver family16_northbridge __pci_driver = {
581 .ops = &northbridge_operations,
582 .vendor = PCI_VENDOR_ID_AMD,
583 .device = PCI_DEVICE_ID_AMD_16H_MODEL_303F_NB_HT,
586 static const struct pci_driver family10_northbridge __pci_driver = {
587 .ops = &northbridge_operations,
588 .vendor = PCI_VENDOR_ID_AMD,
589 .device = PCI_DEVICE_ID_AMD_10H_NB_HT,
592 static void fam16_finalize(void *chip_info)
594 device_t dev;
595 u32 value;
596 dev = dev_find_slot(0, PCI_DEVFN(0, 0)); /* clear IoapicSbFeatureEn */
597 pci_write_config32(dev, 0xF8, 0);
598 pci_write_config32(dev, 0xFC, 5); /* TODO: move it to dsdt.asl */
600 /* disable No Snoop */
601 dev = dev_find_slot(0, PCI_DEVFN(1, 1));
602 value = pci_read_config32(dev, 0x60);
603 value &= ~(1 << 11);
604 pci_write_config32(dev, 0x60, value);
607 struct chip_operations northbridge_amd_pi_00730F01_ops = {
608 CHIP_NAME("AMD FAM16 Northbridge")
609 .enable_dev = 0,
610 .final = fam16_finalize,
613 static void domain_read_resources(device_t dev)
615 unsigned reg;
617 /* Find the already assigned resource pairs */
618 get_fx_devs();
619 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
620 u32 base, limit;
621 base = f1_read_config32(reg);
622 limit = f1_read_config32(reg + 0x04);
623 /* Is this register allocated? */
624 if ((base & 3) != 0) {
625 unsigned nodeid, reg_link;
626 device_t reg_dev;
627 if (reg < 0xc0) { // mmio
628 nodeid = (limit & 0xf) + (base&0x30);
629 } else { // io
630 nodeid = (limit & 0xf) + ((base>>4)&0x30);
632 reg_link = (limit >> 4) & 7;
633 reg_dev = __f0_dev[nodeid];
634 if (reg_dev) {
635 /* Reserve the resource */
636 struct resource *res;
637 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
638 if (res) {
639 res->flags = 1;
644 /* FIXME: do we need to check extend conf space?
645 I don't believe that much preset value */
646 pci_domain_read_resources(dev);
649 static void domain_enable_resources(device_t dev)
651 #if IS_ENABLED(CONFIG_BINARYPI_LEGACY_WRAPPER)
652 /* Must be called after PCI enumeration and resource allocation */
653 if (!acpi_is_wakeup_s3())
654 AGESAWRAPPER(amdinitmid);
656 printk(BIOS_DEBUG, " ader - leaving domain_enable_resources.\n");
657 #endif
660 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
661 struct hw_mem_hole_info {
662 unsigned hole_startk;
663 int node_id;
665 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
667 struct hw_mem_hole_info mem_hole;
668 int i;
669 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
670 mem_hole.node_id = -1;
671 for (i = 0; i < node_nums; i++) {
672 dram_base_mask_t d;
673 u32 hole;
674 d = get_dram_base_mask(i);
675 if (!(d.mask & 1)) continue; // no memory on this node
676 hole = pci_read_config32(__f1_dev[i], 0xf0);
677 if (hole & 2) { // we find the hole
678 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
679 mem_hole.node_id = i; // record the node No with hole
680 break; // only one hole
684 /* We need to double check if there is special set on base reg and limit reg
685 * are not continuous instead of hole, it will find out its hole_startk.
687 if (mem_hole.node_id == -1) {
688 resource_t limitk_pri = 0;
689 for (i = 0; i < node_nums; i++) {
690 dram_base_mask_t d;
691 resource_t base_k, limit_k;
692 d = get_dram_base_mask(i);
693 if (!(d.base & 1)) continue;
694 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
695 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
696 if (limitk_pri != base_k) { // we find the hole
697 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
698 mem_hole.node_id = i;
699 break; //only one hole
701 limit_k = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
702 limitk_pri = limit_k;
705 return mem_hole;
707 #endif
709 static void domain_set_resources(device_t dev)
711 unsigned long mmio_basek;
712 u32 pci_tolm;
713 int i, idx;
714 struct bus *link;
715 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
716 struct hw_mem_hole_info mem_hole;
717 u32 reset_memhole = 1;
718 #endif
720 pci_tolm = 0xffffffffUL;
721 for (link = dev->link_list; link; link = link->next) {
722 pci_tolm = find_pci_tolm(link);
725 // FIXME handle interleaved nodes. If you fix this here, please fix
726 // amdk8, too.
727 mmio_basek = pci_tolm >> 10;
728 /* Round mmio_basek to something the processor can support */
729 mmio_basek &= ~((1 << 6) -1);
731 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
732 // MMIO hole. If you fix this here, please fix amdk8, too.
733 /* Round the mmio hole to 64M */
734 mmio_basek &= ~((64*1024) - 1);
736 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
737 /* if the hw mem hole is already set in raminit stage, here we will compare
738 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
739 * use hole_basek as mmio_basek and we don't need to reset hole.
740 * otherwise We reset the hole to the mmio_basek
743 mem_hole = get_hw_mem_hole_info();
745 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
746 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
747 mmio_basek = mem_hole.hole_startk;
748 reset_memhole = 0;
750 #endif
752 idx = 0x10;
753 for (i = 0; i < node_nums; i++) {
754 dram_base_mask_t d;
755 resource_t basek, limitk, sizek; // 4 1T
757 d = get_dram_base_mask(i);
759 if (!(d.mask & 1)) continue;
760 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
761 limitk = ((resource_t)(((d.mask & ~1) + 0x000FF) & 0x1fffff00)) << 9;
763 sizek = limitk - basek;
765 /* see if we need a hole from 0xa0000 to 0xbffff */
766 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
767 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
768 idx += 0x10;
769 basek = (8*64)+(16*16);
770 sizek = limitk - ((8*64)+(16*16));
774 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
776 /* split the region to accommodate pci memory space */
777 if ((basek < 4*1024*1024) && (limitk > mmio_basek)) {
778 if (basek <= mmio_basek) {
779 unsigned pre_sizek;
780 pre_sizek = mmio_basek - basek;
781 if (pre_sizek > 0) {
782 ram_resource(dev, (idx | i), basek, pre_sizek);
783 idx += 0x10;
784 sizek -= pre_sizek;
786 basek = mmio_basek;
788 if ((basek + sizek) <= 4*1024*1024) {
789 sizek = 0;
791 else {
792 uint64_t topmem2 = bsp_topmem2();
793 basek = 4*1024*1024;
794 sizek = topmem2/1024 - basek;
798 ram_resource(dev, (idx | i), basek, sizek);
799 idx += 0x10;
800 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
801 i, mmio_basek, basek, limitk);
804 add_uma_resource_below_tolm(dev, 7);
806 for (link = dev->link_list; link; link = link->next) {
807 if (link->children) {
808 assign_resources(link);
813 static const char *domain_acpi_name(const struct device *dev)
815 if (dev->path.type == DEVICE_PATH_DOMAIN)
816 return "PCI0";
818 return NULL;
821 static struct device_operations pci_domain_ops = {
822 .read_resources = domain_read_resources,
823 .set_resources = domain_set_resources,
824 .enable_resources = domain_enable_resources,
825 .init = NULL,
826 .scan_bus = pci_domain_scan_bus,
827 .acpi_name = domain_acpi_name,
830 static void sysconf_init(device_t dev) // first node
832 sblink = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
833 node_nums = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
836 static void add_more_links(device_t dev, unsigned total_links)
838 struct bus *link, *last = NULL;
839 int link_num;
841 for (link = dev->link_list; link; link = link->next)
842 last = link;
844 if (last) {
845 int links = total_links - last->link_num;
846 link_num = last->link_num;
847 if (links > 0) {
848 link = malloc(links*sizeof(*link));
849 if (!link)
850 die("Couldn't allocate more links!\n");
851 memset(link, 0, links*sizeof(*link));
852 last->next = link;
855 else {
856 link_num = -1;
857 link = malloc(total_links*sizeof(*link));
858 memset(link, 0, total_links*sizeof(*link));
859 dev->link_list = link;
862 for (link_num = link_num + 1; link_num < total_links; link_num++) {
863 link->link_num = link_num;
864 link->dev = dev;
865 link->next = link + 1;
866 last = link;
867 link = link->next;
869 last->next = NULL;
872 static void cpu_bus_scan(device_t dev)
874 struct bus *cpu_bus;
875 device_t dev_mc;
876 #if CONFIG_CBB
877 device_t pci_domain;
878 #endif
879 int i,j;
880 int coreid_bits;
881 int core_max = 0;
882 unsigned ApicIdCoreIdSize;
883 unsigned core_nums;
884 int siblings = 0;
885 unsigned int family;
886 u32 modules = 0;
887 VOID* modules_ptr = &modules;
888 BUILD_OPT_CFG* options = NULL;
889 int ioapic_count = 0;
891 // TODO Remove the printk's.
892 printk(BIOS_SPEW, "MullinsPI Debug: Grabbing the AMD Topology Information.\n");
893 AmdGetValue(AMD_GLOBAL_USER_OPTIONS, (VOID**)&options, sizeof(options));
894 AmdGetValue(AMD_GLOBAL_NUM_MODULES, &modules_ptr, sizeof(modules));
895 modules = *(u32*)modules_ptr;
896 ASSERT(modules > 0);
897 ASSERT(options);
898 ioapic_count = (int)options->CfgPlatNumIoApics;
899 ASSERT(ioapic_count > 0);
900 printk(BIOS_SPEW, "MullinsPI Debug: AMD Topology Number of Modules (@0x%p) is %d\n", modules_ptr, modules);
901 printk(BIOS_SPEW, "MullinsPI Debug: AMD Topology Number of IOAPICs (@0x%p) is %d\n", options, (int)options->CfgPlatNumIoApics);
903 #if CONFIG_CBB
904 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
905 if (dev_mc && dev_mc->bus) {
906 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
907 pci_domain = dev_mc->bus->dev;
908 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
909 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
910 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
911 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
912 } else {
913 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
915 printk(BIOS_DEBUG, "\n");
917 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
918 if (!dev_mc) {
919 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
920 if (dev_mc && dev_mc->bus) {
921 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
922 pci_domain = dev_mc->bus->dev;
923 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_DOMAIN)) {
924 if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
925 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
926 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
927 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
928 while (dev_mc) {
929 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
930 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
931 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
932 dev_mc = dev_mc->sibling;
938 #endif
939 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
940 if (!dev_mc) {
941 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
942 die("");
944 sysconf_init(dev_mc);
945 #if CONFIG_CBB && (MAX_NODE_NUMS > 32)
946 if (node_nums > 32) { // need to put node 32 to node 63 to bus 0xfe
947 if (pci_domain->link_list && !pci_domain->link_list->next) {
948 struct bus *new_link = new_link(pci_domain);
949 pci_domain->link_list->next = new_link;
950 new_link->link_num = 1;
951 new_link->dev = pci_domain;
952 new_link->children = 0;
953 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
955 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
957 #endif
959 /* Get Max Number of cores(MNC) */
960 coreid_bits = (cpuid_ecx(0x80000008) & 0x0000F000) >> 12;
961 core_max = 1 << (coreid_bits & 0x000F); //mnc
963 ApicIdCoreIdSize = ((cpuid_ecx(0x80000008)>>12) & 0xF);
964 if (ApicIdCoreIdSize) {
965 core_nums = (1 << ApicIdCoreIdSize) - 1;
966 } else {
967 core_nums = 3; //quad core
970 /* Find which cpus are present */
971 cpu_bus = dev->link_list;
972 for (i = 0; i < node_nums; i++) {
973 device_t cdb_dev;
974 unsigned busn, devn;
975 struct bus *pbus;
977 busn = CONFIG_CBB;
978 devn = CONFIG_CDB + i;
979 pbus = dev_mc->bus;
980 #if CONFIG_CBB && (MAX_NODE_NUMS > 32)
981 if (i >= 32) {
982 busn--;
983 devn -= 32;
984 pbus = pci_domain->link_list->next;
986 #endif
988 /* Find the cpu's pci device */
989 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
990 if (!cdb_dev) {
991 /* If I am probing things in a weird order
992 * ensure all of the cpu's pci devices are found.
994 int fn;
995 for (fn = 0; fn <= 5; fn++) { //FBDIMM?
996 cdb_dev = pci_probe_dev(NULL, pbus,
997 PCI_DEVFN(devn, fn));
999 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1000 } else {
1001 /* Ok, We need to set the links for that device.
1002 * otherwise the device under it will not be scanned
1005 add_more_links(cdb_dev, 4);
1008 family = cpuid_eax(1);
1009 family = (family >> 20) & 0xFF;
1010 if (family == 1) { //f10
1011 u32 dword;
1012 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1013 dword = pci_read_config32(cdb_dev, 0xe8);
1014 siblings = ((dword & BIT15) >> 13) | ((dword & (BIT13 | BIT12)) >> 12);
1015 } else if (family == 7) {//f16
1016 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 5));
1017 if (cdb_dev && cdb_dev->enabled) {
1018 siblings = pci_read_config32(cdb_dev, 0x84);
1019 siblings &= 0xFF;
1021 } else {
1022 siblings = 0; //default one core
1024 int enable_node = cdb_dev && cdb_dev->enabled;
1025 printk(BIOS_SPEW, "%s family%xh, core_max = 0x%x, core_nums = 0x%x, siblings = 0x%x\n",
1026 dev_path(cdb_dev), 0x0f + family, core_max, core_nums, siblings);
1028 for (j = 0; j <= siblings; j++) {
1029 u32 lapicid_start = 0;
1032 * APIC ID calucation is tightly coupled with AGESA v5 code.
1033 * This calculation MUST match the assignment calculation done
1034 * in LocalApicInitializationAtEarly() function.
1035 * And reference GetLocalApicIdForCore()
1037 * Apply apic enumeration rules
1038 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1039 * put the local-APICs at m..z
1041 * This is needed because many IO-APIC devices only have 4 bits
1042 * for their APIC id and therefore must reside at 0..15
1044 if ((node_nums * core_max) + ioapic_count >= 0x10) {
1045 lapicid_start = (ioapic_count - 1) / core_max;
1046 lapicid_start = (lapicid_start + 1) * core_max;
1047 printk(BIOS_SPEW, "lpaicid_start = 0x%x ", lapicid_start);
1049 u32 apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (siblings + 1)) : j);
1050 printk(BIOS_SPEW, "node 0x%x core 0x%x apicid = 0x%x\n",
1051 i, j, apic_id);
1053 device_t cpu = add_cpu_device(cpu_bus, apic_id, enable_node);
1054 if (cpu)
1055 amd_cpu_topology(cpu, i, j);
1056 } //j
1060 static void cpu_bus_init(device_t dev)
1062 initialize_cpus(dev->link_list);
1065 static struct device_operations cpu_bus_ops = {
1066 .read_resources = DEVICE_NOOP,
1067 .set_resources = DEVICE_NOOP,
1068 .enable_resources = DEVICE_NOOP,
1069 .init = cpu_bus_init,
1070 .scan_bus = cpu_bus_scan,
1073 static void root_complex_enable_dev(struct device *dev)
1075 static int done = 0;
1077 if (!done) {
1078 setup_bsp_ramtop();
1079 done = 1;
1082 /* Set the operations if it is a special bus type */
1083 if (dev->path.type == DEVICE_PATH_DOMAIN) {
1084 dev->ops = &pci_domain_ops;
1085 } else if (dev->path.type == DEVICE_PATH_CPU_CLUSTER) {
1086 dev->ops = &cpu_bus_ops;
1090 struct chip_operations northbridge_amd_pi_00730F01_root_complex_ops = {
1091 CHIP_NAME("AMD FAM16 Root Complex")
1092 .enable_dev = root_complex_enable_dev,
1095 /*********************************************************************
1096 * Change the vendor / device IDs to match the generic VBIOS header. *
1097 *********************************************************************/
1098 u32 map_oprom_vendev(u32 vendev)
1100 u32 new_vendev;
1101 new_vendev =
1102 ((0x10029850 <= vendev) && (vendev <= 0x1002986F)) ? 0x10029850 : vendev;
1104 if (vendev != new_vendev)
1105 printk(BIOS_NOTICE, "Mapping PCI device %8x to %8x\n", vendev, new_vendev);
1107 return new_vendev;