x86/PCI: initialize PCI bus node numbers early
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86 / pci / common.c
blob5db96d4304de76d276a2feb933044c3673cb0abe
1 /*
2 * Low-Level PCI Support for PC
4 * (c) 1999--2000 Martin Mares <mj@ucw.cz>
5 */
7 #include <linux/sched.h>
8 #include <linux/pci.h>
9 #include <linux/ioport.h>
10 #include <linux/init.h>
11 #include <linux/dmi.h>
13 #include <asm/acpi.h>
14 #include <asm/segment.h>
15 #include <asm/io.h>
16 #include <asm/smp.h>
17 #include <asm/pci_x86.h>
19 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
20 PCI_PROBE_MMCONF;
22 unsigned int pci_early_dump_regs;
23 static int pci_bf_sort;
24 int pci_routeirq;
25 int noioapicquirk;
26 #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
27 int noioapicreroute = 0;
28 #else
29 int noioapicreroute = 1;
30 #endif
31 int pcibios_last_bus = -1;
32 unsigned long pirq_table_addr;
33 struct pci_bus *pci_root_bus;
34 struct pci_raw_ops *raw_pci_ops;
35 struct pci_raw_ops *raw_pci_ext_ops;
37 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
38 int reg, int len, u32 *val)
40 if (domain == 0 && reg < 256 && raw_pci_ops)
41 return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
42 if (raw_pci_ext_ops)
43 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
44 return -EINVAL;
47 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
48 int reg, int len, u32 val)
50 if (domain == 0 && reg < 256 && raw_pci_ops)
51 return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
52 if (raw_pci_ext_ops)
53 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
54 return -EINVAL;
57 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
59 return raw_pci_read(pci_domain_nr(bus), bus->number,
60 devfn, where, size, value);
63 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
65 return raw_pci_write(pci_domain_nr(bus), bus->number,
66 devfn, where, size, value);
69 struct pci_ops pci_root_ops = {
70 .read = pci_read,
71 .write = pci_write,
75 * legacy, numa, and acpi all want to call pcibios_scan_root
76 * from their initcalls. This flag prevents that.
78 int pcibios_scanned;
81 * This interrupt-safe spinlock protects all accesses to PCI
82 * configuration space.
84 DEFINE_SPINLOCK(pci_config_lock);
86 static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
88 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
89 printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
90 return 0;
93 static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = {
95 * Systems where PCI IO resource ISA alignment can be skipped
96 * when the ISA enable bit in the bridge control is not set
99 .callback = can_skip_ioresource_align,
100 .ident = "IBM System x3800",
101 .matches = {
102 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
103 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
107 .callback = can_skip_ioresource_align,
108 .ident = "IBM System x3850",
109 .matches = {
110 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
111 DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
115 .callback = can_skip_ioresource_align,
116 .ident = "IBM System x3950",
117 .matches = {
118 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
119 DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
125 void __init dmi_check_skip_isa_align(void)
127 dmi_check_system(can_skip_pciprobe_dmi_table);
130 static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
132 struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
134 if (pci_probe & PCI_NOASSIGN_ROMS) {
135 if (rom_r->parent)
136 return;
137 if (rom_r->start) {
138 /* we deal with BIOS assigned ROM later */
139 return;
141 rom_r->start = rom_r->end = rom_r->flags = 0;
146 * Called after each bus is probed, but before its children
147 * are examined.
150 void __devinit pcibios_fixup_bus(struct pci_bus *b)
152 struct pci_dev *dev;
154 /* root bus? */
155 if (!b->parent)
156 x86_pci_root_bus_res_quirks(b);
157 pci_read_bridge_bases(b);
158 list_for_each_entry(dev, &b->devices, bus_list)
159 pcibios_fixup_device_resources(dev);
163 * Only use DMI information to set this if nothing was passed
164 * on the kernel command line (which was parsed earlier).
167 static int __devinit set_bf_sort(const struct dmi_system_id *d)
169 if (pci_bf_sort == pci_bf_sort_default) {
170 pci_bf_sort = pci_dmi_bf;
171 printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
173 return 0;
177 * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
179 #ifdef __i386__
180 static int __devinit assign_all_busses(const struct dmi_system_id *d)
182 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
183 printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
184 " (pci=assign-busses)\n", d->ident);
185 return 0;
187 #endif
189 static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
190 #ifdef __i386__
192 * Laptops which need pci=assign-busses to see Cardbus cards
195 .callback = assign_all_busses,
196 .ident = "Samsung X20 Laptop",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
199 DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
202 #endif /* __i386__ */
204 .callback = set_bf_sort,
205 .ident = "Dell PowerEdge 1950",
206 .matches = {
207 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
208 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
212 .callback = set_bf_sort,
213 .ident = "Dell PowerEdge 1955",
214 .matches = {
215 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
216 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
220 .callback = set_bf_sort,
221 .ident = "Dell PowerEdge 2900",
222 .matches = {
223 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
224 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
228 .callback = set_bf_sort,
229 .ident = "Dell PowerEdge 2950",
230 .matches = {
231 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
232 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
236 .callback = set_bf_sort,
237 .ident = "Dell PowerEdge R900",
238 .matches = {
239 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
240 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
244 .callback = set_bf_sort,
245 .ident = "HP ProLiant BL20p G3",
246 .matches = {
247 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
248 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
252 .callback = set_bf_sort,
253 .ident = "HP ProLiant BL20p G4",
254 .matches = {
255 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
256 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
260 .callback = set_bf_sort,
261 .ident = "HP ProLiant BL30p G1",
262 .matches = {
263 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
264 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
268 .callback = set_bf_sort,
269 .ident = "HP ProLiant BL25p G1",
270 .matches = {
271 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
272 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
276 .callback = set_bf_sort,
277 .ident = "HP ProLiant BL35p G1",
278 .matches = {
279 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
280 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
284 .callback = set_bf_sort,
285 .ident = "HP ProLiant BL45p G1",
286 .matches = {
287 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
288 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
292 .callback = set_bf_sort,
293 .ident = "HP ProLiant BL45p G2",
294 .matches = {
295 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
296 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
300 .callback = set_bf_sort,
301 .ident = "HP ProLiant BL460c G1",
302 .matches = {
303 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
304 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
308 .callback = set_bf_sort,
309 .ident = "HP ProLiant BL465c G1",
310 .matches = {
311 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
312 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
316 .callback = set_bf_sort,
317 .ident = "HP ProLiant BL480c G1",
318 .matches = {
319 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
320 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
324 .callback = set_bf_sort,
325 .ident = "HP ProLiant BL685c G1",
326 .matches = {
327 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
328 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
332 .callback = set_bf_sort,
333 .ident = "HP ProLiant DL360",
334 .matches = {
335 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
336 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
340 .callback = set_bf_sort,
341 .ident = "HP ProLiant DL380",
342 .matches = {
343 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
344 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
347 #ifdef __i386__
349 .callback = assign_all_busses,
350 .ident = "Compaq EVO N800c",
351 .matches = {
352 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
353 DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
356 #endif
358 .callback = set_bf_sort,
359 .ident = "HP ProLiant DL385 G2",
360 .matches = {
361 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
362 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
366 .callback = set_bf_sort,
367 .ident = "HP ProLiant DL585 G2",
368 .matches = {
369 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
370 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
376 void __init dmi_check_pciprobe(void)
378 dmi_check_system(pciprobe_dmi_table);
381 struct pci_bus * __devinit pcibios_scan_root(int busnum)
383 struct pci_bus *bus = NULL;
384 struct pci_sysdata *sd;
386 while ((bus = pci_find_next_bus(bus)) != NULL) {
387 if (bus->number == busnum) {
388 /* Already scanned */
389 return bus;
393 /* Allocate per-root-bus (not per bus) arch-specific data.
394 * TODO: leak; this memory is never freed.
395 * It's arguable whether it's worth the trouble to care.
397 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
398 if (!sd) {
399 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
400 return NULL;
403 sd->node = get_mp_bus_to_node(busnum);
405 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
406 bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
407 if (!bus)
408 kfree(sd);
410 return bus;
413 extern u8 pci_cache_line_size;
415 int __init pcibios_init(void)
417 struct cpuinfo_x86 *c = &boot_cpu_data;
419 if (!raw_pci_ops) {
420 printk(KERN_WARNING "PCI: System does not support PCI\n");
421 return 0;
425 * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
426 * and P4. It's also good for 386/486s (which actually have 16)
427 * as quite a few PCI devices do not support smaller values.
429 pci_cache_line_size = 32 >> 2;
430 if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
431 pci_cache_line_size = 64 >> 2; /* K7 & K8 */
432 else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
433 pci_cache_line_size = 128 >> 2; /* P4 */
435 pcibios_resource_survey();
437 if (pci_bf_sort >= pci_force_bf)
438 pci_sort_breadthfirst();
439 return 0;
442 char * __devinit pcibios_setup(char *str)
444 if (!strcmp(str, "off")) {
445 pci_probe = 0;
446 return NULL;
447 } else if (!strcmp(str, "bfsort")) {
448 pci_bf_sort = pci_force_bf;
449 return NULL;
450 } else if (!strcmp(str, "nobfsort")) {
451 pci_bf_sort = pci_force_nobf;
452 return NULL;
454 #ifdef CONFIG_PCI_BIOS
455 else if (!strcmp(str, "bios")) {
456 pci_probe = PCI_PROBE_BIOS;
457 return NULL;
458 } else if (!strcmp(str, "nobios")) {
459 pci_probe &= ~PCI_PROBE_BIOS;
460 return NULL;
461 } else if (!strcmp(str, "biosirq")) {
462 pci_probe |= PCI_BIOS_IRQ_SCAN;
463 return NULL;
464 } else if (!strncmp(str, "pirqaddr=", 9)) {
465 pirq_table_addr = simple_strtoul(str+9, NULL, 0);
466 return NULL;
468 #endif
469 #ifdef CONFIG_PCI_DIRECT
470 else if (!strcmp(str, "conf1")) {
471 pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
472 return NULL;
474 else if (!strcmp(str, "conf2")) {
475 pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
476 return NULL;
478 #endif
479 #ifdef CONFIG_PCI_MMCONFIG
480 else if (!strcmp(str, "nommconf")) {
481 pci_probe &= ~PCI_PROBE_MMCONF;
482 return NULL;
484 else if (!strcmp(str, "check_enable_amd_mmconf")) {
485 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
486 return NULL;
488 #endif
489 else if (!strcmp(str, "noacpi")) {
490 acpi_noirq_set();
491 return NULL;
493 else if (!strcmp(str, "noearly")) {
494 pci_probe |= PCI_PROBE_NOEARLY;
495 return NULL;
497 #ifndef CONFIG_X86_VISWS
498 else if (!strcmp(str, "usepirqmask")) {
499 pci_probe |= PCI_USE_PIRQ_MASK;
500 return NULL;
501 } else if (!strncmp(str, "irqmask=", 8)) {
502 pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
503 return NULL;
504 } else if (!strncmp(str, "lastbus=", 8)) {
505 pcibios_last_bus = simple_strtol(str+8, NULL, 0);
506 return NULL;
508 #endif
509 else if (!strcmp(str, "rom")) {
510 pci_probe |= PCI_ASSIGN_ROMS;
511 return NULL;
512 } else if (!strcmp(str, "norom")) {
513 pci_probe |= PCI_NOASSIGN_ROMS;
514 return NULL;
515 } else if (!strcmp(str, "assign-busses")) {
516 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
517 return NULL;
518 } else if (!strcmp(str, "use_crs")) {
519 pci_probe |= PCI_USE__CRS;
520 return NULL;
521 } else if (!strcmp(str, "earlydump")) {
522 pci_early_dump_regs = 1;
523 return NULL;
524 } else if (!strcmp(str, "routeirq")) {
525 pci_routeirq = 1;
526 return NULL;
527 } else if (!strcmp(str, "skip_isa_align")) {
528 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
529 return NULL;
530 } else if (!strcmp(str, "noioapicquirk")) {
531 noioapicquirk = 1;
532 return NULL;
533 } else if (!strcmp(str, "ioapicreroute")) {
534 if (noioapicreroute != -1)
535 noioapicreroute = 0;
536 return NULL;
537 } else if (!strcmp(str, "noioapicreroute")) {
538 if (noioapicreroute != -1)
539 noioapicreroute = 1;
540 return NULL;
542 return str;
545 unsigned int pcibios_assign_all_busses(void)
547 return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
550 int pcibios_enable_device(struct pci_dev *dev, int mask)
552 int err;
554 if ((err = pci_enable_resources(dev, mask)) < 0)
555 return err;
557 if (!pci_dev_msi_enabled(dev))
558 return pcibios_enable_irq(dev);
559 return 0;
562 void pcibios_disable_device (struct pci_dev *dev)
564 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
565 pcibios_disable_irq(dev);
568 int pci_ext_cfg_avail(struct pci_dev *dev)
570 if (raw_pci_ext_ops)
571 return 1;
572 else
573 return 0;
576 struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
578 struct pci_bus *bus = NULL;
579 struct pci_sysdata *sd;
582 * Allocate per-root-bus (not per bus) arch-specific data.
583 * TODO: leak; this memory is never freed.
584 * It's arguable whether it's worth the trouble to care.
586 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
587 if (!sd) {
588 printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno);
589 return NULL;
591 sd->node = node;
592 bus = pci_scan_bus(busno, ops, sd);
593 if (!bus)
594 kfree(sd);
596 return bus;
599 struct pci_bus * __devinit pci_scan_bus_with_sysdata(int busno)
601 return pci_scan_bus_on_node(busno, &pci_root_ops, -1);
605 * NUMA info for PCI busses
607 * Early arch code is responsible for filling in reasonable values here.
608 * A node id of "-1" means "use current node". In other words, if a bus
609 * has a -1 node id, it's not tightly coupled to any particular chunk
610 * of memory (as is the case on some Nehalem systems).
612 #ifdef CONFIG_NUMA
614 #define BUS_NR 256
616 #ifdef CONFIG_X86_64
618 static int mp_bus_to_node[BUS_NR] = {
619 [0 ... BUS_NR - 1] = -1
622 void set_mp_bus_to_node(int busnum, int node)
624 if (busnum >= 0 && busnum < BUS_NR)
625 mp_bus_to_node[busnum] = node;
628 int get_mp_bus_to_node(int busnum)
630 int node = -1;
632 if (busnum < 0 || busnum > (BUS_NR - 1))
633 return node;
635 node = mp_bus_to_node[busnum];
638 * let numa_node_id to decide it later in dma_alloc_pages
639 * if there is no ram on that node
641 if (node != -1 && !node_online(node))
642 node = -1;
644 return node;
647 #else /* CONFIG_X86_32 */
649 static unsigned char mp_bus_to_node[BUS_NR] = {
650 [0 ... BUS_NR - 1] = -1
653 void set_mp_bus_to_node(int busnum, int node)
655 if (busnum >= 0 && busnum < BUS_NR)
656 mp_bus_to_node[busnum] = (unsigned char) node;
659 int get_mp_bus_to_node(int busnum)
661 int node;
663 if (busnum < 0 || busnum > (BUS_NR - 1))
664 return 0;
665 node = mp_bus_to_node[busnum];
666 return node;
669 #endif /* CONFIG_X86_32 */
671 #endif /* CONFIG_NUMA */