Add AMD Family 10 cpu support to northbridge folder
[coreboot.git] / src / northbridge / amd / agesa / family10 / northbridge.c
blobb3e4c63d0057a826208b97e1fe7a6334c712f7f9
1 /*
2 * This file is part of the coreboot project.
4 * Copyright (C) 2011 Advanced Micro Devices, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include <console/console.h>
21 #include <arch/io.h>
22 #include <stdint.h>
23 #include <device/device.h>
24 #include <device/pci.h>
25 #include <device/pci_ids.h>
26 #include <device/hypertransport.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <bitops.h>
30 #include <cpu/cpu.h>
31 #include <cpu/x86/lapic.h>
33 #if CONFIG_LOGICAL_CPUS==1
34 #include <pc80/mc146818rtc.h>
35 #endif
37 #include <cpu/amd/amdfam10_sysconf.h>
38 #include <Porting.h>
39 #include <AGESA.h>
40 #include <Options.h>
41 #include "root_complex/chip.h"
42 #include "northbridge.h"
43 #include "amdfam10.h"
44 #include "chip.h"
46 extern uint32_t agesawrapper_amdinitmid(void);
48 typedef struct amdfam10_sysconf_t sys_info_conf_t;
49 typedef struct dram_base_mask {
50 u32 base; //[47:27] at [28:8]
51 u32 mask; //[47:27] at [28:8] and enable at bit 0
52 } dram_base_mask_t;
55 struct amdfam10_sysconf_t sysconf;
56 static device_t __f0_dev[NODE_NUMS];
57 static device_t __f1_dev[NODE_NUMS];
58 static device_t __f2_dev[NODE_NUMS];
59 static device_t __f4_dev[NODE_NUMS];
60 static unsigned fx_devs = 0;
62 #if (defined CONFIG_EXT_CONF_SUPPORT) && CONFIG_EXT_CONF_SUPPORT == 1
63 #error CONFIG_EXT_CONF_SUPPORT == 1 not support anymore!
64 #endif
66 static dram_base_mask_t get_dram_base_mask(u32 nodeid)
68 device_t dev;
69 dram_base_mask_t d;
70 dev = __f1_dev[0];
72 #if CONFIG_EXT_CONF_SUPPORT == 1
73 /* I will use ext space only for simple */
74 pci_write_config32(dev, 0x110, nodeid | (1<<28)); // [47:27] at [28:8]
75 d.mask = pci_read_config32(dev, 0x114); // enable is bit 0
76 pci_write_config32(dev, 0x110, nodeid | (0<<28));
77 d.base = pci_read_config32(dev, 0x114) & 0x1fffff00; //[47:27] at [28:8];
78 #else
79 u32 temp;
80 temp = pci_read_config32(dev, 0x44 + (nodeid << 3)); //[39:24] at [31:16]
81 d.mask = ((temp & 0xfff80000)>>(8+3)); // mask out DramMask [26:24] too
82 temp = pci_read_config32(dev, 0x144 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
83 d.mask |= temp<<21;
85 temp = pci_read_config32(dev, 0x40 + (nodeid << 3)); //[39:24] at [31:16]
86 d.mask |= (temp & 1); // enable bit
88 d.base = ((temp & 0xfff80000)>>(8+3)); // mask out DramBase [26:24) too
89 temp = pci_read_config32(dev, 0x140 + (nodeid <<3)) & 0xff; //[47:40] at [7:0]
90 d.base |= temp<<21;
91 #endif
92 return d;
95 #if CONFIG_EXT_CONF_SUPPORT
96 static void set_addr_map_reg_4_6_in_one_node(u32 nodeid, u32 cfg_map_dest,
97 u32 busn_min, u32 busn_max,
98 u32 type)
100 device_t dev;
101 u32 i;
102 u32 tempreg;
103 u32 index_min, index_max;
104 u32 dest_min, dest_max;
105 index_min = busn_min>>2; dest_min = busn_min - (index_min<<2);
106 index_max = busn_max>>2; dest_max = busn_max - (index_max<<2);
108 // three case: index_min==index_max, index_min+1=index_max; index_min+1<index_max
109 dev = __f1_dev[nodeid];
110 if (index_min== index_max) {
111 pci_write_config32(dev, 0x110, index_min | (type<<28));
112 tempreg = pci_read_config32(dev, 0x114);
113 for (i=dest_min; i<=dest_max; i++) {
114 tempreg &= ~(0xff<<(i*8));
115 tempreg |= (cfg_map_dest<<(i*8));
117 pci_write_config32(dev, 0x110, index_min | (type<<28)); // do i need to write it again
118 pci_write_config32(dev, 0x114, tempreg);
119 } else if (index_min<index_max) {
120 pci_write_config32(dev, 0x110, index_min | (type<<28));
121 tempreg = pci_read_config32(dev, 0x114);
122 for (i=dest_min; i<=3; i++) {
123 tempreg &= ~(0xff<<(i*8));
124 tempreg |= (cfg_map_dest<<(i*8));
126 pci_write_config32(dev, 0x110, index_min | (type<<28)); // do i need to write it again
127 pci_write_config32(dev, 0x114, tempreg);
129 pci_write_config32(dev, 0x110, index_max | (type<<28));
130 tempreg = pci_read_config32(dev, 0x114);
131 for (i=0; i<=dest_max; i++) {
132 tempreg &= ~(0xff<<(i*8));
133 tempreg |= (cfg_map_dest<<(i*8));
135 pci_write_config32(dev, 0x110, index_max | (type<<28)); // do i need to write it again
136 pci_write_config32(dev, 0x114, tempreg);
137 if ((index_max-index_min)>1) {
138 tempreg = 0;
139 for (i=0; i<=3; i++) {
140 tempreg &= ~(0xff<<(i*8));
141 tempreg |= (cfg_map_dest<<(i*8));
143 for (i=index_min+1; i<index_max;i++) {
144 pci_write_config32(dev, 0x110, i | (type<<28));
145 pci_write_config32(dev, 0x114, tempreg);
150 #endif
152 #if CONFIG_PCI_BUS_SEGN_BITS
153 static u32 check_segn(device_t dev, u32 segbusn, u32 nodes,
154 sys_info_conf_t *sysinfo)
156 //check segbusn here, We need every node have the same segn
157 if ((segbusn & 0xff)>(0xe0-1)) {// use next segn
158 u32 segn = (segbusn >> 8) & 0x0f;
159 segn++;
160 segbusn = segn<<8;
162 if (segbusn>>8) {
163 u32 val;
164 val = pci_read_config32(dev, 0x160);
165 val &= ~(0xf<<25);
166 val |= (segbusn & 0xf00)<<(25-8);
167 pci_write_config32(dev, 0x160, val);
170 return segbusn;
172 #endif
174 static u32 get_io_addr_index(u32 nodeid, u32 linkn)
176 u32 index;
178 for (index=0; index<256; index++) {
179 if ((sysconf.conf_io_addrx[index+4] == 0)) {
180 sysconf.conf_io_addr[index+4] = (nodeid & 0x3f) ;
181 sysconf.conf_io_addrx[index+4] = 1 | ((linkn & 0x7)<<4);
182 return index;
186 return 0;
190 static u32 get_mmio_addr_index(u32 nodeid, u32 linkn)
192 u32 index;
194 for (index=0; index<64; index++) {
195 if ((sysconf.conf_mmio_addrx[index+8] == 0)) {
196 sysconf.conf_mmio_addr[index+8] = (nodeid & 0x3f) ;
197 sysconf.conf_mmio_addrx[index+8] = 1 | ((linkn & 0x7)<<4);
198 return index;
202 return 0;
205 static void store_conf_io_addr(u32 nodeid, u32 linkn, u32 reg, u32 index,
206 u32 io_min, u32 io_max)
208 u32 val;
209 #if CONFIG_EXT_CONF_SUPPORT
210 if (reg!=0x110) {
211 #endif
212 /* io range allocation */
213 index = (reg-0xc0)>>3;
214 #if CONFIG_EXT_CONF_SUPPORT
215 } else {
216 index+=4;
218 #endif
220 val = (nodeid & 0x3f); // 6 bits used
221 sysconf.conf_io_addr[index] = val | ((io_max<<8) & 0xfffff000); //limit : with nodeid
222 val = 3 | ((linkn & 0x7)<<4) ; // 8 bits used
223 sysconf.conf_io_addrx[index] = val | ((io_min<<8) & 0xfffff000); // base : with enable bit
225 if (sysconf.io_addr_num<(index+1))
226 sysconf.io_addr_num = index+1;
229 static void store_conf_mmio_addr(u32 nodeid, u32 linkn, u32 reg, u32 index,
230 u32 mmio_min, u32 mmio_max)
232 u32 val;
233 #if CONFIG_EXT_CONF_SUPPORT
234 if (reg!=0x110) {
235 #endif
236 /* io range allocation */
237 index = (reg-0x80)>>3;
238 #if CONFIG_EXT_CONF_SUPPORT
239 } else {
240 index += 8;
242 #endif
244 val = (nodeid & 0x3f) ; // 6 bits used
245 sysconf.conf_mmio_addr[index] = val | (mmio_max & 0xffffff00); //limit : with nodeid and linkn
246 val = 3 | ((linkn & 0x7)<<4) ; // 8 bits used
247 sysconf.conf_mmio_addrx[index] = val | (mmio_min & 0xffffff00); // base : with enable bit
249 if (sysconf.mmio_addr_num<(index+1))
250 sysconf.mmio_addr_num = index+1;
253 static void set_io_addr_reg(device_t dev, u32 nodeid, u32 linkn, u32 reg,
254 u32 io_min, u32 io_max)
257 u32 i;
258 u32 tempreg;
259 #if CONFIG_EXT_CONF_SUPPORT
260 if (reg!=0x110) {
261 #endif
262 /* io range allocation */
263 tempreg = (nodeid&0xf) | ((nodeid & 0x30)<<(8-4)) | (linkn<<4) | ((io_max&0xf0)<<(12-4)); //limit
264 for (i=0; i<sysconf.nodes; i++)
265 pci_write_config32(__f1_dev[i], reg+4, tempreg);
267 tempreg = 3 /*| ( 3<<4)*/ | ((io_min&0xf0)<<(12-4)); //base :ISA and VGA ?
268 #if 0
269 // FIXME: can we use VGA reg instead?
270 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
271 printk(BIOS_SPEW, "%s, enabling legacy VGA IO forwarding for %s link %s\n",
272 __func__, dev_path(dev), link);
273 tempreg |= PCI_IO_BASE_VGA_EN;
275 if (dev->link[link].bridge_ctrl & PCI_BRIDGE_CTL_NO_ISA) {
276 tempreg |= PCI_IO_BASE_NO_ISA;
278 #endif
279 for (i=0; i<sysconf.nodes; i++)
280 pci_write_config32(__f1_dev[i], reg, tempreg);
281 #if CONFIG_EXT_CONF_SUPPORT
282 return;
285 u32 cfg_map_dest;
286 u32 j;
287 // if ht_c_index > 3, We should use extend space
288 if (io_min>io_max) return;
289 // for nodeid at first
290 cfg_map_dest = (1<<7) | (1<<6) | (linkn<<0);
292 set_addr_map_reg_4_6_in_one_node(nodeid, cfg_map_dest, io_min, io_max, 4);
294 // all other nodes
295 cfg_map_dest = (1<<7) | (0<<6) | (nodeid<<0);
296 for (j = 0; j< sysconf.nodes; j++) {
297 if (j== nodeid) continue;
298 set_addr_map_reg_4_6_in_one_node(j,cfg_map_dest, io_min, io_max, 4);
300 #endif
303 static void set_mmio_addr_reg(u32 nodeid, u32 linkn, u32 reg, u32 index, u32 mmio_min, u32 mmio_max, u32 nodes)
306 u32 i;
307 u32 tempreg;
308 #if CONFIG_EXT_CONF_SUPPORT
309 if (reg!=0x110) {
310 #endif
311 /* io range allocation */
312 tempreg = (nodeid&0xf) | (linkn<<4) | (mmio_max&0xffffff00); //limit
313 for (i=0; i<nodes; i++)
314 pci_write_config32(__f1_dev[i], reg+4, tempreg);
315 tempreg = 3 | (nodeid & 0x30) | (mmio_min&0xffffff00);
316 for (i=0; i<sysconf.nodes; i++)
317 pci_write_config32(__f1_dev[i], reg, tempreg);
318 #if CONFIG_EXT_CONF_SUPPORT
319 return;
322 device_t dev;
323 u32 j;
324 // if ht_c_index > 3, We should use extend space
325 // for nodeid at first
326 u32 enable;
328 if (mmio_min>mmio_max) {
329 return;
332 enable = 1;
333 dev = __f1_dev[nodeid];
334 tempreg = ((mmio_min>>3) & 0x1fffff00)| (1<<6) | (linkn<<0);
335 pci_write_config32(dev, 0x110, index | (2<<28));
336 pci_write_config32(dev, 0x114, tempreg);
338 tempreg = ((mmio_max>>3) & 0x1fffff00) | enable;
339 pci_write_config32(dev, 0x110, index | (3<<28));
340 pci_write_config32(dev, 0x114, tempreg);
342 // all other nodes
343 tempreg = ((mmio_min>>3) & 0x1fffff00) | (0<<6) | (nodeid<<0);
344 for (j = 0; j< sysconf.nodes; j++) {
345 if (j== nodeid) continue;
346 dev = __f1_dev[j];
347 pci_write_config32(dev, 0x110, index | (2<<28));
348 pci_write_config32(dev, 0x114, tempreg);
351 tempreg = ((mmio_max>>3) & 0x1fffff00) | enable;
352 for (j = 0; j< sysconf.nodes; j++) {
353 if(j==nodeid) continue;
354 dev = __f1_dev[j];
355 pci_write_config32(dev, 0x110, index | (3<<28));
356 pci_write_config32(dev, 0x114, tempreg);
358 #endif
361 static device_t get_node_pci(u32 nodeid, u32 fn)
363 #if NODE_NUMS == 64
364 if (nodeid < 32) {
365 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
366 } else {
367 return dev_find_slot(CONFIG_CBB-1, PCI_DEVFN(CONFIG_CDB + nodeid - 32, fn));
370 #else
371 return dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB + nodeid, fn));
372 #endif
375 static unsigned int read_nb_cfg_54(void)
377 msr_t msr;
378 msr = rdmsr(NB_CFG_MSR);
379 return (( msr.hi >> (54-32)) & 1);
382 static void get_fx_devs(void)
384 int i;
385 for (i = 0; i < NODE_NUMS; i++) {
386 __f0_dev[i] = get_node_pci(i, 0);
387 __f1_dev[i] = get_node_pci(i, 1);
388 __f2_dev[i] = get_node_pci(i, 2);
389 __f4_dev[i] = get_node_pci(i, 4);
390 if (__f0_dev[i] != NULL && __f1_dev[i] != NULL)
391 fx_devs = i+1;
393 if (__f1_dev[0] == NULL || __f0_dev[0] == NULL || fx_devs == 0) {
394 die("Cannot find 0:0x18.[0|1]\n");
398 static u32 f1_read_config32(unsigned reg)
400 if (fx_devs == 0)
401 get_fx_devs();
402 return pci_read_config32(__f1_dev[0], reg);
405 static void f1_write_config32(unsigned reg, u32 value)
407 int i;
408 if (fx_devs == 0)
409 get_fx_devs();
410 for(i = 0; i < fx_devs; i++) {
411 device_t dev;
412 dev = __f1_dev[i];
413 if (dev && dev->enabled) {
414 pci_write_config32(dev, reg, value);
419 static u32 amdfam10_nodeid(device_t dev)
421 #if NODE_NUMS == 64
422 unsigned busn;
423 busn = dev->bus->secondary;
424 if (busn != CONFIG_CBB) {
425 return (dev->path.pci.devfn >> 3) - CONFIG_CDB + 32;
426 } else {
427 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
430 #else
431 return (dev->path.pci.devfn >> 3) - CONFIG_CDB;
432 #endif
435 static void set_vga_enable_reg(u32 nodeid, u32 linkn)
437 u32 val;
439 val = 1 | (nodeid<<4) | (linkn<<12);
440 /* it will routing (1)mmio 0xa0000:0xbffff (2) io 0x3b0:0x3bb,
441 0x3c0:0x3df */
442 f1_write_config32(0xf4, val);
446 static int reg_useable(unsigned reg, device_t goal_dev, unsigned goal_nodeid,
447 unsigned goal_link)
449 struct resource *res;
450 unsigned nodeid, link = 0;
451 int result;
452 res = 0;
453 for (nodeid = 0; !res && (nodeid < fx_devs); nodeid++) {
454 device_t dev;
455 dev = __f0_dev[nodeid];
456 if (!dev)
457 continue;
458 for (link = 0; !res && (link < 8); link++) {
459 res = probe_resource(dev, IOINDEX(0x1000 + reg, link));
462 result = 2;
463 if (res) {
464 result = 0;
465 if ( (goal_link == (link - 1)) &&
466 (goal_nodeid == (nodeid - 1)) &&
467 (res->flags <= 1)) {
468 result = 1;
471 return result;
474 static struct resource *amdfam10_find_iopair(device_t dev, unsigned nodeid, unsigned link)
476 struct resource *resource;
477 u32 free_reg, reg;
478 resource = 0;
479 free_reg = 0;
481 for (reg = 0xc0; reg <= 0xd8; reg += 0x8) {
482 int result;
483 result = reg_useable(reg, dev, nodeid, link);
484 if (result == 1) {
485 /* I have been allocated this one */
486 break;
488 else if (result > 1) {
489 /* I have a free register pair */
490 free_reg = reg;
493 if (reg > 0xd8) {
494 reg = free_reg; // if no free, the free_reg still be 0
497 //Ext conf space
498 if(!reg) {
499 //because of Extend conf space, we will never run out of reg, but we need one index to differ them. so same node and same link can have multi range
500 u32 index = get_io_addr_index(nodeid, link);
501 reg = 0x110+ (index<<24) + (4<<20); // index could be 0, 255
504 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
506 return resource;
509 static struct resource *amdfam10_find_mempair(device_t dev, u32 nodeid, u32 link)
511 struct resource *resource;
512 u32 free_reg, reg;
513 resource = 0;
514 free_reg = 0;
516 for (reg = 0x80; reg <= 0xb8; reg += 0x8) {
517 int result;
518 result = reg_useable(reg, dev, nodeid, link);
519 if (result == 1) {
520 /* I have been allocated this one */
521 break;
523 else if (result > 1) {
524 /* I have a free register pair */
525 free_reg = reg;
528 if (reg > 0xb8) {
529 reg = free_reg;
532 //Ext conf space
533 if (!reg) {
534 //because of Extend conf space, we will never run out of reg,
535 // but we need one index to differ them. so same node and
536 // same link can have multi range
537 u32 index = get_mmio_addr_index(nodeid, link);
538 reg = 0x110+ (index<<24) + (6<<20); // index could be 0, 63
541 resource = new_resource(dev, IOINDEX(0x1000 + reg, link));
542 return resource;
545 static void amdfam10_link_read_bases(device_t dev, u32 nodeid, u32 link)
547 struct resource *resource;
549 /* Initialize the io space constraints on the current bus */
550 resource = amdfam10_find_iopair(dev, nodeid, link);
551 if (resource) {
552 u32 align;
553 #if CONFIG_EXT_CONF_SUPPORT == 1
554 if((resource->index & 0x1fff) == 0x1110) { // ext
555 align = 8;
557 else
558 #endif
559 align = log2(HT_IO_HOST_ALIGN);
560 resource->base = 0;
561 resource->size = 0;
562 resource->align = align;
563 resource->gran = align;
564 resource->limit = 0xffffUL;
565 resource->flags = IORESOURCE_IO | IORESOURCE_BRIDGE;
568 /* Initialize the prefetchable memory constraints on the current bus */
569 resource = amdfam10_find_mempair(dev, nodeid, link);
570 if (resource) {
571 resource->base = 0;
572 resource->size = 0;
573 resource->align = log2(HT_MEM_HOST_ALIGN);
574 resource->gran = log2(HT_MEM_HOST_ALIGN);
575 resource->limit = 0xffffffffffULL;
576 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
577 resource->flags |= IORESOURCE_BRIDGE;
579 #if CONFIG_EXT_CONF_SUPPORT == 1
580 if ((resource->index & 0x1fff) == 0x1110) { // ext
581 normalize_resource(resource);
583 #endif
587 /* Initialize the memory constraints on the current bus */
588 resource = amdfam10_find_mempair(dev, nodeid, link);
589 if (resource) {
590 resource->base = 0;
591 resource->size = 0;
592 resource->align = log2(HT_MEM_HOST_ALIGN);
593 resource->gran = log2(HT_MEM_HOST_ALIGN);
594 resource->limit = 0xffffffffffULL;
595 resource->flags = IORESOURCE_MEM | IORESOURCE_BRIDGE;
596 #if CONFIG_EXT_CONF_SUPPORT == 1
597 if ((resource->index & 0x1fff) == 0x1110) { // ext
598 normalize_resource(resource);
600 #endif
604 static void amdfam10_read_resources(device_t dev)
606 u32 nodeid;
607 struct bus *link;
608 nodeid = amdfam10_nodeid(dev);
609 for (link = dev->link_list; link; link = link->next) {
610 if (link->children) {
611 amdfam10_link_read_bases(dev, nodeid, link->link_num);
616 static void amdfam10_set_resource(device_t dev, struct resource *resource,
617 u32 nodeid)
619 resource_t rbase, rend;
620 unsigned reg, link_num;
621 char buf[50];
623 /* Make certain the resource has actually been set */
624 if (!(resource->flags & IORESOURCE_ASSIGNED)) {
625 return;
628 /* If I have already stored this resource don't worry about it */
629 if (resource->flags & IORESOURCE_STORED) {
630 return;
633 /* Only handle PCI memory and IO resources */
634 if (!(resource->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
635 return;
637 /* Ensure I am actually looking at a resource of function 1 */
638 if ((resource->index & 0xffff) < 0x1000) {
639 return;
641 /* Get the base address */
642 rbase = resource->base;
644 /* Get the limit (rounded up) */
645 rend = resource_end(resource);
647 /* Get the register and link */
648 reg = resource->index & 0xfff; // 4k
649 link_num = IOINDEX_LINK(resource->index);
651 if (resource->flags & IORESOURCE_IO) {
653 set_io_addr_reg(dev, nodeid, link_num, reg, rbase>>8, rend>>8);
654 store_conf_io_addr(nodeid, link_num, reg, (resource->index >> 24), rbase>>8, rend>>8);
656 else if (resource->flags & IORESOURCE_MEM) {
657 set_mmio_addr_reg(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8, sysconf.nodes) ;// [39:8]
658 store_conf_mmio_addr(nodeid, link_num, reg, (resource->index >>24), rbase>>8, rend>>8);
660 resource->flags |= IORESOURCE_STORED;
661 sprintf(buf, " <node %x link %x>",
662 nodeid, link_num);
663 report_resource_stored(dev, resource, buf);
667 * I tried to reuse the resource allocation code in amdfam10_set_resource()
668 * but it is too difficult to deal with the resource allocation magic.
671 static void amdfam10_create_vga_resource(device_t dev, unsigned nodeid)
673 struct bus *link;
675 /* find out which link the VGA card is connected,
676 * we only deal with the 'first' vga card */
677 for (link = dev->link_list; link; link = link->next) {
678 if (link->bridge_ctrl & PCI_BRIDGE_CTL_VGA) {
679 #if CONFIG_MULTIPLE_VGA_ADAPTERS == 1
680 extern device_t vga_pri; // the primary vga device, defined in device.c
681 printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d bus range [%d,%d]\n", vga_pri->bus->secondary,
682 link->secondary,link->subordinate);
683 /* We need to make sure the vga_pri is under the link */
684 if((vga_pri->bus->secondary >= link->secondary ) &&
685 (vga_pri->bus->secondary <= link->subordinate )
687 #endif
688 break;
692 /* no VGA card installed */
693 if (link == NULL)
694 return;
696 printk(BIOS_DEBUG, "VGA: %s (aka node %d) link %d has VGA device\n", dev_path(dev), nodeid, link->link_num);
697 set_vga_enable_reg(nodeid, link->link_num);
700 static void amdfam10_set_resources(device_t dev)
702 unsigned nodeid;
703 struct bus *bus;
704 struct resource *res;
706 /* Find the nodeid */
707 nodeid = amdfam10_nodeid(dev);
709 amdfam10_create_vga_resource(dev, nodeid);
711 /* Set each resource we have found */
712 for (res = dev->resource_list; res; res = res->next) {
713 amdfam10_set_resource(dev, res, nodeid);
716 for (bus = dev->link_list; bus; bus = bus->next) {
717 if (bus->children) {
718 assign_resources(bus);
723 static void mcf0_control_init(struct device *dev)
727 static unsigned amdfam10_scan_chains(device_t dev, unsigned max)
729 unsigned nodeid;
730 struct bus *link;
731 unsigned sblink = sysconf.sblk;
732 device_t io_hub = NULL;
733 u32 next_unitid = 0xff;
735 nodeid = amdfam10_nodeid(dev);
736 if (nodeid == 0) {
737 for (link = dev->link_list; link; link = link->next) {
738 if (link->link_num == sblink) { /* devicetree put IO Hub on link_lsit[3] */
739 io_hub = link->children;
740 if (!io_hub || !io_hub->enabled) {
741 die("I can't find the IO Hub, or IO Hub not enabled, please check the device tree.\n");
743 /* Now that nothing is overlapping it is safe to scan the children. */
744 max = pci_scan_bus(link, 0x00, ((next_unitid - 1) << 3) | 7, 0);
749 return max;
752 static struct device_operations northbridge_operations = {
753 .read_resources = amdfam10_read_resources,
754 .set_resources = amdfam10_set_resources,
755 .enable_resources = pci_dev_enable_resources,
756 .init = mcf0_control_init,
757 .scan_bus = amdfam10_scan_chains,
758 .enable = 0,
759 .ops_pci = 0,
762 static const struct pci_driver mcf0_driver __pci_driver = {
763 .ops = &northbridge_operations,
764 .vendor = PCI_VENDOR_ID_AMD,
765 .device = 0x1200,
768 struct chip_operations northbridge_amd_agesa_family10_ops = {
769 CHIP_NAME("AMD FAM10 Northbridge")
770 .enable_dev = 0,
774 static void amdfam10_domain_read_resources(device_t dev)
776 unsigned reg;
778 /* Find the already assigned resource pairs */
779 get_fx_devs();
780 for (reg = 0x80; reg <= 0xd8; reg+= 0x08) {
781 u32 base, limit;
782 base = f1_read_config32(reg);
783 limit = f1_read_config32(reg + 0x04);
784 /* Is this register allocated? */
785 if ((base & 3) != 0) {
786 unsigned nodeid, reg_link;
787 device_t reg_dev;
788 if (reg<0xc0) { // mmio
789 nodeid = (limit & 0xf) + (base&0x30);
790 } else { // io
791 nodeid = (limit & 0xf) + ((base>>4)&0x30);
793 reg_link = (limit >> 4) & 7;
794 reg_dev = __f0_dev[nodeid];
795 if (reg_dev) {
796 /* Reserve the resource */
797 struct resource *res;
798 res = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
799 if (res) {
800 res->flags = 1;
805 /* FIXME: do we need to check extend conf space?
806 I don't believe that much preset value */
808 #if CONFIG_PCI_64BIT_PREF_MEM == 0
809 pci_domain_read_resources(dev);
810 #else
811 struct bus *link;
812 struct resource *resource;
813 for (link=dev->link_list; link; link = link->next) {
814 /* Initialize the system wide io space constraints */
815 resource = new_resource(dev, 0|(link->link_num<<2));
816 resource->base = 0x400;
817 resource->limit = 0xffffUL;
818 resource->flags = IORESOURCE_IO;
820 /* Initialize the system wide prefetchable memory resources constraints */
821 resource = new_resource(dev, 1|(link->link_num<<2));
822 resource->limit = 0xfcffffffffULL;
823 resource->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
825 /* Initialize the system wide memory resources constraints */
826 resource = new_resource(dev, 2|(link->link_num<<2));
827 resource->limit = 0xfcffffffffULL;
828 resource->flags = IORESOURCE_MEM;
830 #endif
833 static void amdfam10_domain_enable_resources(device_t dev)
835 u32 val;
836 /* Must be called after PCI enumeration and resource allocation */
837 printk(BIOS_DEBUG, "\nFam10 - domain_enable_resources: AmdInitMid.\n");
838 val = agesawrapper_amdinitmid();
839 if (val) {
840 printk(BIOS_DEBUG, "agesawrapper_amdinitmid failed: %x \n", val);
842 printk(BIOS_DEBUG, " ader - leaving domain_enable_resources.\n");
846 static u32 my_find_pci_tolm(struct bus *bus, u32 tolm)
848 struct resource *min;
849 min = 0;
850 search_bus_resources(bus, IORESOURCE_MEM, IORESOURCE_MEM, tolm_test, &min);
851 if (min && tolm > min->base) {
852 tolm = min->base;
854 return tolm;
857 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
858 struct hw_mem_hole_info {
859 unsigned hole_startk;
860 int node_id;
863 static struct hw_mem_hole_info get_hw_mem_hole_info(void)
865 struct hw_mem_hole_info mem_hole;
866 int i;
868 mem_hole.hole_startk = CONFIG_HW_MEM_HOLE_SIZEK;
869 mem_hole.node_id = -1;
871 for (i = 0; i < sysconf.nodes; i++) {
872 dram_base_mask_t d;
873 u32 hole;
874 d = get_dram_base_mask(i);
875 if (!(d.mask & 1)) continue; // no memory on this node
877 hole = pci_read_config32(__f1_dev[i], 0xf0);
878 if (hole & 1) { // we find the hole
879 mem_hole.hole_startk = (hole & (0xff<<24)) >> 10;
880 mem_hole.node_id = i; // record the node No with hole
881 break; // only one hole
885 //We need to double check if there is speical set on base reg and limit reg are not continous instead of hole, it will find out it's hole_startk
886 if (mem_hole.node_id == -1) {
887 resource_t limitk_pri = 0;
888 for (i=0; i<sysconf.nodes; i++) {
889 dram_base_mask_t d;
890 resource_t base_k, limit_k;
891 d = get_dram_base_mask(i);
892 if (!(d.base & 1)) continue;
894 base_k = ((resource_t)(d.base & 0x1fffff00)) <<9;
895 if (base_k > 4 *1024 * 1024) break; // don't need to go to check
896 if (limitk_pri != base_k) { // we find the hole
897 mem_hole.hole_startk = (unsigned)limitk_pri; // must beblow 4G
898 mem_hole.node_id = i;
899 break; //only one hole
902 limit_k = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9;
903 limitk_pri = limit_k;
906 return mem_hole;
908 #endif
910 #if CONFIG_WRITE_HIGH_TABLES==1
911 #define HIGH_TABLES_SIZE 64 // maximum size of high tables in KB
912 extern uint64_t high_tables_base, high_tables_size;
913 #endif
915 #if CONFIG_GFXUMA == 1
916 extern uint64_t uma_memory_base, uma_memory_size;
918 static void add_uma_resource(struct device *dev, int index)
920 struct resource *resource;
922 printk(BIOS_DEBUG, "Adding UMA memory area\n");
923 resource = new_resource(dev, index);
924 resource->base = (resource_t) uma_memory_base;
925 resource->size = (resource_t) uma_memory_size;
926 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
927 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
929 #endif
931 static void amdfam10_domain_set_resources(device_t dev)
933 #if CONFIG_PCI_64BIT_PREF_MEM == 1
934 struct resource *io, *mem1, *mem2;
935 struct resource *res;
936 #endif
937 unsigned long mmio_basek;
938 u32 pci_tolm;
939 int i, idx;
940 struct bus *link;
941 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
942 struct hw_mem_hole_info mem_hole;
943 u32 reset_memhole = 1;
944 #endif
946 #if CONFIG_PCI_64BIT_PREF_MEM == 1
948 for (link = dev->link_list; link; link = link->next) {
949 /* Now reallocate the pci resources memory with the
950 * highest addresses I can manage.
952 mem1 = find_resource(dev, 1|(link->link_num<<2));
953 mem2 = find_resource(dev, 2|(link->link_num<<2));
955 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
956 mem1->base, mem1->limit, mem1->size, mem1->align);
957 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
958 mem2->base, mem2->limit, mem2->size, mem2->align);
960 /* See if both resources have roughly the same limits */
961 if (((mem1->limit <= 0xffffffff) && (mem2->limit <= 0xffffffff)) ||
962 ((mem1->limit > 0xffffffff) && (mem2->limit > 0xffffffff)))
964 /* If so place the one with the most stringent alignment first
966 if (mem2->align > mem1->align) {
967 struct resource *tmp;
968 tmp = mem1;
969 mem1 = mem2;
970 mem2 = tmp;
972 /* Now place the memory as high up as it will go */
973 mem2->base = resource_max(mem2);
974 mem1->limit = mem2->base - 1;
975 mem1->base = resource_max(mem1);
977 else {
978 /* Place the resources as high up as they will go */
979 mem2->base = resource_max(mem2);
980 mem1->base = resource_max(mem1);
983 printk(BIOS_DEBUG, "base1: 0x%08Lx limit1: 0x%08Lx size: 0x%08Lx align: %d\n",
984 mem1->base, mem1->limit, mem1->size, mem1->align);
985 printk(BIOS_DEBUG, "base2: 0x%08Lx limit2: 0x%08Lx size: 0x%08Lx align: %d\n",
986 mem2->base, mem2->limit, mem2->size, mem2->align);
989 for (res = &dev->resource_list; res; res = res->next)
991 res->flags |= IORESOURCE_ASSIGNED;
992 res->flags |= IORESOURCE_STORED;
993 report_resource_stored(dev, res, "");
995 #endif
997 pci_tolm = 0xffffffffUL;
998 for (link = dev->link_list; link; link = link->next) {
999 pci_tolm = my_find_pci_tolm(link, pci_tolm);
1002 // FIXME handle interleaved nodes. If you fix this here, please fix
1003 // amdk8, too.
1004 mmio_basek = pci_tolm >> 10;
1005 /* Round mmio_basek to something the processor can support */
1006 mmio_basek &= ~((1 << 6) -1);
1008 // FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M
1009 // MMIO hole. If you fix this here, please fix amdk8, too.
1010 /* Round the mmio hole to 64M */
1011 mmio_basek &= ~((64*1024) - 1);
1013 #if CONFIG_HW_MEM_HOLE_SIZEK != 0
1014 /* if the hw mem hole is already set in raminit stage, here we will compare
1015 * mmio_basek and hole_basek. if mmio_basek is bigger that hole_basek and will
1016 * use hole_basek as mmio_basek and we don't need to reset hole.
1017 * otherwise We reset the hole to the mmio_basek
1020 mem_hole = get_hw_mem_hole_info();
1022 // Use hole_basek as mmio_basek, and we don't need to reset hole anymore
1023 if ((mem_hole.node_id != -1) && (mmio_basek > mem_hole.hole_startk)) {
1024 mmio_basek = mem_hole.hole_startk;
1025 reset_memhole = 0;
1028 #endif
1030 idx = 0x10;
1031 for (i = 0; i < sysconf.nodes; i++) {
1032 dram_base_mask_t d;
1033 resource_t basek, limitk, sizek; // 4 1T
1034 d = get_dram_base_mask(i);
1036 if (!(d.mask & 1)) continue;
1037 basek = ((resource_t)(d.base & 0x1fffff00)) << 9; // could overflow, we may lost 6 bit here
1038 limitk = ((resource_t)((d.mask + 0x00000100) & 0x1fffff00)) << 9 ;
1039 sizek = limitk - basek;
1041 /* see if we need a hole from 0xa0000 to 0xbffff */
1042 if ((basek < ((8*64)+(8*16))) && (sizek > ((8*64)+(16*16)))) {
1043 ram_resource(dev, (idx | i), basek, ((8*64)+(8*16)) - basek);
1044 idx += 0x10;
1045 basek = (8*64)+(16*16);
1046 sizek = limitk - ((8*64)+(16*16));
1050 //printk(BIOS_DEBUG, "node %d : mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n", i, mmio_basek, basek, limitk);
1052 /* split the region to accomodate pci memory space */
1053 if ((basek < 4*1024*1024 ) && (limitk > mmio_basek)) {
1054 if (basek <= mmio_basek) {
1055 unsigned pre_sizek;
1056 pre_sizek = mmio_basek - basek;
1057 if (pre_sizek>0) {
1058 ram_resource(dev, (idx | i), basek, pre_sizek);
1059 idx += 0x10;
1060 sizek -= pre_sizek;
1061 #if CONFIG_WRITE_HIGH_TABLES==1
1062 if (high_tables_base==0) {
1063 /* Leave some space for ACPI, PIRQ and MP tables */
1064 #if CONFIG_GFXUMA == 1
1065 high_tables_base = uma_memory_base - (HIGH_TABLES_SIZE * 1024);
1066 #else
1067 high_tables_base = (mmio_basek - HIGH_TABLES_SIZE) * 1024;
1068 #endif
1069 high_tables_size = HIGH_TABLES_SIZE * 1024;
1070 printk(BIOS_DEBUG, " split: %dK table at =%08llx\n", HIGH_TABLES_SIZE,
1071 high_tables_base);
1073 #endif
1075 basek = mmio_basek;
1077 if ((basek + sizek) <= 4*1024*1024) {
1078 sizek = 0;
1080 else {
1081 basek = 4*1024*1024;
1082 sizek -= (4*1024*1024 - mmio_basek);
1086 #if CONFIG_GFXUMA == 1
1087 /* Deduct uma memory before reporting because
1088 * this is what the mtrr code expects */
1089 sizek -= uma_memory_size / 1024;
1090 #endif
1091 ram_resource(dev, (idx | i), basek, sizek);
1092 idx += 0x10;
1093 #if CONFIG_WRITE_HIGH_TABLES==1
1094 printk(BIOS_DEBUG, "node %d: mmio_basek=%08lx, basek=%08llx, limitk=%08llx\n",
1095 i, mmio_basek, basek, limitk);
1096 if (high_tables_base==0) {
1097 /* Leave some space for ACPI, PIRQ and MP tables */
1098 #if CONFIG_GFXUMA == 1
1099 high_tables_base = uma_memory_base - (HIGH_TABLES_SIZE * 1024);
1100 #else
1101 high_tables_base = (limitk - HIGH_TABLES_SIZE) * 1024;
1102 #endif
1103 high_tables_size = HIGH_TABLES_SIZE * 1024;
1105 #endif
1108 #if CONFIG_GFXUMA == 1
1109 add_uma_resource(dev, 7);
1110 #endif
1112 for(link = dev->link_list; link; link = link->next) {
1113 if (link->children) {
1114 assign_resources(link);
1119 static u32 amdfam10_domain_scan_bus(device_t dev, u32 max)
1121 u32 reg;
1122 int i;
1123 struct bus *link;
1124 /* Unmap all of the HT chains */
1125 for (reg = 0xe0; reg <= 0xec; reg += 4) {
1126 f1_write_config32(reg, 0);
1128 #if CONFIG_EXT_CONF_SUPPORT == 1
1129 // all nodes
1130 for (i = 0; i< sysconf.nodes; i++) {
1131 int index;
1132 for(index = 0; index < 64; index++) {
1133 pci_write_config32(__f1_dev[i], 0x110, index | (6<<28));
1134 pci_write_config32(__f1_dev[i], 0x114, 0);
1138 #endif
1141 for (link = dev->link_list; link; link = link->next) {
1142 max = pci_scan_bus(link, PCI_DEVFN(CONFIG_CDB, 0), 0xff, max);
1145 /* Tune the hypertransport transaction for best performance.
1146 * Including enabling relaxed ordering if it is safe.
1148 get_fx_devs();
1149 for (i = 0; i < fx_devs; i++) {
1150 device_t f0_dev;
1151 f0_dev = __f0_dev[i];
1152 if (f0_dev && f0_dev->enabled) {
1153 u32 httc;
1154 httc = pci_read_config32(f0_dev, HT_TRANSACTION_CONTROL);
1155 httc &= ~HTTC_RSP_PASS_PW;
1156 if (!dev->link_list->disable_relaxed_ordering) {
1157 httc |= HTTC_RSP_PASS_PW;
1159 printk(BIOS_SPEW, "%s passpw: %s\n",
1160 dev_path(dev),
1161 (!dev->link_list->disable_relaxed_ordering)?
1162 "enabled":"disabled");
1163 pci_write_config32(f0_dev, HT_TRANSACTION_CONTROL, httc);
1166 return max;
1170 static struct device_operations pci_domain_ops = {
1171 .read_resources = amdfam10_domain_read_resources,
1172 .set_resources = amdfam10_domain_set_resources,
1173 .enable_resources = amdfam10_domain_enable_resources,
1174 .init = NULL,
1175 .scan_bus = amdfam10_domain_scan_bus,
1176 #if CONFIG_MMCONF_SUPPORT_DEFAULT
1177 .ops_pci_bus = &pci_ops_mmconf,
1178 #else
1179 .ops_pci_bus = &pci_cf8_conf1,
1180 #endif
1184 static void sysconf_init(device_t dev) // first node
1186 sysconf.sblk = (pci_read_config32(dev, 0x64)>>8) & 7; // don't forget sublink1
1187 sysconf.segbit = 0;
1188 sysconf.ht_c_num = 0;
1190 unsigned ht_c_index;
1192 for (ht_c_index=0; ht_c_index<32; ht_c_index++) {
1193 sysconf.ht_c_conf_bus[ht_c_index] = 0;
1196 sysconf.nodes = ((pci_read_config32(dev, 0x60)>>4) & 7) + 1; //NodeCnt[2:0]
1198 /* Find the bootstrap processors apicid */
1199 sysconf.bsp_apicid = lapicid();
1202 static void add_more_links(device_t dev, unsigned total_links)
1204 struct bus *link, *last = NULL;
1205 int link_num;
1207 for (link = dev->link_list; link; link = link->next)
1208 last = link;
1210 if (last) {
1211 int links = total_links - last->link_num;
1212 link_num = last->link_num;
1213 if (links > 0) {
1214 link = malloc(links*sizeof(*link));
1215 if (!link)
1216 die("Couldn't allocate more links!\n");
1217 memset(link, 0, links*sizeof(*link));
1218 last->next = link;
1221 else {
1222 link_num = -1;
1223 link = malloc(total_links*sizeof(*link));
1224 memset(link, 0, total_links*sizeof(*link));
1225 dev->link_list = link;
1228 for (link_num = link_num + 1; link_num < total_links; link_num++) {
1229 link->link_num = link_num;
1230 link->dev = dev;
1231 link->next = link + 1;
1232 last = link;
1233 link = link->next;
1235 last->next = NULL;
1238 /* dummy read_resources */
1239 static void lapic_read_resources(device_t dev)
1243 static struct device_operations lapic_ops = {
1244 .read_resources = lapic_read_resources,
1245 .set_resources = pci_dev_set_resources,
1246 .enable_resources = pci_dev_enable_resources,
1247 .init = 0,
1248 .scan_bus = 0,
1249 .enable = 0,
1250 .ops_pci = 0,
1253 static u32 cpu_bus_scan(device_t dev, u32 max)
1255 struct bus *cpu_bus;
1256 device_t dev_mc;
1257 #if CONFIG_CBB
1258 device_t pci_domain;
1259 #endif
1260 int i,j;
1261 int nodes;
1262 unsigned nb_cfg_54;
1263 unsigned siblings;
1264 int cores_found;
1265 int disable_siblings;
1266 unsigned ApicIdCoreIdSize;
1268 nb_cfg_54 = 0;
1269 ApicIdCoreIdSize = (cpuid_ecx(0x80000008)>>12 & 0xf);
1270 if (ApicIdCoreIdSize) {
1271 siblings = (1<<ApicIdCoreIdSize)-1;
1272 } else {
1273 siblings = 3; //quad core
1276 disable_siblings = !CONFIG_LOGICAL_CPUS;
1277 #if CONFIG_LOGICAL_CPUS == 1
1278 get_option(&disable_siblings, "multi_core");
1279 #endif
1281 // How can I get the nb_cfg_54 of every node's nb_cfg_54 in bsp???
1282 nb_cfg_54 = read_nb_cfg_54();
1284 #if CONFIG_CBB
1285 dev_mc = dev_find_slot(0, PCI_DEVFN(CONFIG_CDB, 0)); //0x00
1286 if (dev_mc && dev_mc->bus) {
1287 printk(BIOS_DEBUG, "%s found", dev_path(dev_mc));
1288 pci_domain = dev_mc->bus->dev;
1289 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1290 printk(BIOS_DEBUG, "\n%s move to ",dev_path(dev_mc));
1291 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1292 printk(BIOS_DEBUG, "%s",dev_path(dev_mc));
1294 } else {
1295 printk(BIOS_DEBUG, " but it is not under pci_domain directly ");
1297 printk(BIOS_DEBUG, "\n");
1299 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1300 if (!dev_mc) {
1301 dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
1302 if (dev_mc && dev_mc->bus) {
1303 printk(BIOS_DEBUG, "%s found\n", dev_path(dev_mc));
1304 pci_domain = dev_mc->bus->dev;
1305 if (pci_domain && (pci_domain->path.type == DEVICE_PATH_PCI_DOMAIN)) {
1306 if ((pci_domain->link_list) && (pci_domain->link_list->children == dev_mc)) {
1307 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1308 dev_mc->bus->secondary = CONFIG_CBB; // move to 0xff
1309 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1310 while (dev_mc) {
1311 printk(BIOS_DEBUG, "%s move to ",dev_path(dev_mc));
1312 dev_mc->path.pci.devfn -= PCI_DEVFN(0x18,0);
1313 printk(BIOS_DEBUG, "%s\n",dev_path(dev_mc));
1314 dev_mc = dev_mc->sibling;
1321 #endif
1323 dev_mc = dev_find_slot(CONFIG_CBB, PCI_DEVFN(CONFIG_CDB, 0));
1324 if (!dev_mc) {
1325 printk(BIOS_ERR, "%02x:%02x.0 not found", CONFIG_CBB, CONFIG_CDB);
1326 die("");
1329 sysconf_init(dev_mc);
1331 nodes = sysconf.nodes;
1333 #if CONFIG_CBB && (NODE_NUMS > 32)
1334 if (nodes>32) { // need to put node 32 to node 63 to bus 0xfe
1335 if (pci_domain->link_list && !pci_domain->link_list->next) {
1336 struct bus *new_link = new_link(pci_domain);
1337 pci_domain->link_list->next = new_link;
1338 new_link->link_num = 1;
1339 new_link->dev = pci_domain;
1340 new_link->children = 0;
1341 printk(BIOS_DEBUG, "%s links now 2\n", dev_path(pci_domain));
1343 pci_domain->link_list->next->secondary = CONFIG_CBB - 1;
1345 #endif
1346 /* Find which cpus are present */
1347 cpu_bus = dev->link_list;
1348 for (i = 0; i < nodes; i++) {
1349 device_t cdb_dev, cpu;
1350 struct device_path cpu_path;
1351 unsigned busn, devn;
1352 struct bus *pbus;
1354 busn = CONFIG_CBB;
1355 devn = CONFIG_CDB+i;
1356 pbus = dev_mc->bus;
1357 #if CONFIG_CBB && (NODE_NUMS > 32)
1358 if (i>=32) {
1359 busn--;
1360 devn-=32;
1361 pbus = pci_domain->link_list->next);
1363 #endif
1365 /* Find the cpu's pci device */
1366 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1367 if (!cdb_dev) {
1368 /* If I am probing things in a weird order
1369 * ensure all of the cpu's pci devices are found.
1371 int fn;
1372 for(fn = 0; fn <= 5; fn++) { //FBDIMM?
1373 cdb_dev = pci_probe_dev(NULL, pbus,
1374 PCI_DEVFN(devn, fn));
1376 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
1378 if (cdb_dev) {
1379 /* Ok, We need to set the links for that device.
1380 * otherwise the device under it will not be scanned
1382 int linknum;
1383 #if CONFIG_HT3_SUPPORT==1
1384 linknum = 8;
1385 #else
1386 linknum = 4;
1387 #endif
1388 add_more_links(cdb_dev, linknum);
1391 cores_found = 0; // one core
1392 cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
1393 if (cdb_dev && cdb_dev->enabled) {
1394 j = pci_read_config32(cdb_dev, 0xe8);
1395 cores_found = (j >> 12) & 3; // dev is func 3
1396 if (siblings > 3)
1397 cores_found |= (j >> 13) & 4;
1398 printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(cdb_dev), cores_found);
1401 u32 jj;
1402 if (disable_siblings) {
1403 jj = 0;
1404 } else {
1405 jj = cores_found;
1408 for (j = 0; j <=jj; j++ ) {
1409 extern CONST OPTIONS_CONFIG_TOPOLOGY ROMDATA TopologyConfiguration;
1410 u32 modules = TopologyConfiguration.PlatformNumberOfModules;
1411 u32 lapicid_start = 0;
1413 /* Build the cpu device path */
1414 cpu_path.type = DEVICE_PATH_APIC;
1416 * APIC ID calucation is tightly coupled with AGESA v5 code.
1417 * This calculation MUST match the assignment calculation done
1418 * in LocalApicInitializationAtEarly() function.
1419 * And reference GetLocalApicIdForCore()
1421 * Apply apic enumeration rules
1422 * For systems with >= 16 APICs, put the IO-APICs at 0..n and
1423 * put the local-APICs at m..z
1424 * For systems with < 16 APICs, put the Local-APICs at 0..n and
1425 * put the IO-APICs at (n + 1)..z
1427 if (nodes * (cores_found + 1) >= 0x10) {
1428 lapicid_start = 0x10;
1430 cpu_path.apic.apic_id = (lapicid_start * (i/modules + 1)) + ((i % modules) ? (j + (cores_found + 1)) : j);
1432 /* See if I can find the cpu */
1433 cpu = find_dev_path(cpu_bus, &cpu_path);
1435 /* Enable the cpu if I have the processor */
1436 if (cdb_dev && cdb_dev->enabled) {
1437 if (!cpu) {
1438 cpu = alloc_dev(cpu_bus, &cpu_path);
1440 if (cpu) {
1441 cpu->enabled = 1;
1445 /* Disable the cpu if I don't have the processor */
1446 if (cpu && (!cdb_dev || !cdb_dev->enabled)) {
1447 cpu->enabled = 0;
1450 /* Report what I have done */
1451 if (cpu) {
1452 cpu->path.apic.node_id = i;
1453 cpu->path.apic.core_id = j;
1454 if (cpu->path.type == DEVICE_PATH_APIC) {
1455 cpu->ops = &lapic_ops;
1457 printk(BIOS_DEBUG, "CPU: %s %s\n",
1458 dev_path(cpu), cpu->enabled?"enabled":"disabled");
1461 } //j
1463 return max;
1466 static void cpu_bus_init(device_t dev)
1468 initialize_cpus(dev->link_list);
1471 static void cpu_bus_noop(device_t dev)
1475 static void cpu_bus_read_resources(device_t dev)
1477 #if CONFIG_MMCONF_SUPPORT
1478 struct resource *resource = new_resource(dev, 0xc0010058);
1479 resource->base = CONFIG_MMCONF_BASE_ADDRESS;
1480 resource->size = CONFIG_MMCONF_BUS_NUMBER * 4096*256;
1481 resource->flags = IORESOURCE_MEM | IORESOURCE_RESERVE |
1482 IORESOURCE_FIXED | IORESOURCE_STORED | IORESOURCE_ASSIGNED;
1483 #endif
1486 static void cpu_bus_set_resources(device_t dev)
1488 struct resource *resource = find_resource(dev, 0xc0010058);
1489 if (resource) {
1490 report_resource_stored(dev, resource, " <mmconfig>");
1492 pci_dev_set_resources(dev);
1495 static struct device_operations cpu_bus_ops = {
1496 .read_resources = cpu_bus_read_resources,
1497 .set_resources = cpu_bus_set_resources,
1498 .enable_resources = cpu_bus_noop,
1499 .init = cpu_bus_init,
1500 .scan_bus = cpu_bus_scan,
1503 static void root_complex_enable_dev(struct device *dev)
1505 /* Set the operations if it is a special bus type */
1506 if (dev->path.type == DEVICE_PATH_PCI_DOMAIN) {
1507 dev->ops = &pci_domain_ops;
1509 else if (dev->path.type == DEVICE_PATH_APIC_CLUSTER) {
1510 dev->ops = &cpu_bus_ops;
1514 struct chip_operations northbridge_amd_agesa_family10_root_complex_ops = {
1515 CHIP_NAME("AMD FAM10 Root Complex")
1516 .enable_dev = root_complex_enable_dev,