Merge tag 'gpio-v3.12-3' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6.git] / drivers / pci / probe.c
blob7ef0f868b3e07bf48941075e269ef3976d3efcf1
1 /*
2 * probe.c - PCI detection and setup code
3 */
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR 3
19 struct resource busn_resource = {
20 .name = "PCI busn",
21 .start = 0,
22 .end = 255,
23 .flags = IORESOURCE_BUS,
26 /* Ugh. Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
30 static LIST_HEAD(pci_domain_busn_res_list);
32 struct pci_domain_busn_res {
33 struct list_head list;
34 struct resource res;
35 int domain_nr;
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
40 struct pci_domain_busn_res *r;
42 list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 if (r->domain_nr == domain_nr)
44 return &r->res;
46 r = kzalloc(sizeof(*r), GFP_KERNEL);
47 if (!r)
48 return NULL;
50 r->domain_nr = domain_nr;
51 r->res.start = 0;
52 r->res.end = 0xff;
53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
55 list_add_tail(&r->list, &pci_domain_busn_res_list);
57 return &r->res;
60 static int find_anything(struct device *dev, void *data)
62 return 1;
66 * Some device drivers need know if pci is initiated.
67 * Basically, we think pci is not initiated when there
68 * is no device to be found on the pci_bus_type.
70 int no_pci_devices(void)
72 struct device *dev;
73 int no_devices;
75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 no_devices = (dev == NULL);
77 put_device(dev);
78 return no_devices;
80 EXPORT_SYMBOL(no_pci_devices);
83 * PCI Bus Class
85 static void release_pcibus_dev(struct device *dev)
87 struct pci_bus *pci_bus = to_pci_bus(dev);
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 pci_bus_remove_resources(pci_bus);
92 pci_release_bus_of_node(pci_bus);
93 kfree(pci_bus);
96 static struct class pcibus_class = {
97 .name = "pci_bus",
98 .dev_release = &release_pcibus_dev,
99 .dev_groups = pcibus_groups,
102 static int __init pcibus_class_init(void)
104 return class_register(&pcibus_class);
106 postcore_initcall(pcibus_class_init);
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
110 u64 size = mask & maxbase; /* Find the significant bits */
111 if (!size)
112 return 0;
114 /* Get the lowest of them to find the decode size, and
115 from that the extent. */
116 size = (size & ~(size-1)) - 1;
118 /* base == maxbase can be valid only if the BAR has
119 already been programmed with all 1s. */
120 if (base == maxbase && ((base | size) & mask) != mask)
121 return 0;
123 return size;
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
128 u32 mem_type;
129 unsigned long flags;
131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 flags |= IORESOURCE_IO;
134 return flags;
137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 flags |= IORESOURCE_MEM;
139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 flags |= IORESOURCE_PREFETCH;
142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 switch (mem_type) {
144 case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 break;
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 /* 1M mem BAR treated as 32-bit BAR */
148 break;
149 case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 flags |= IORESOURCE_MEM_64;
151 break;
152 default:
153 /* mem unknown type treated as 32-bit BAR */
154 break;
156 return flags;
159 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
162 * pci_read_base - read a PCI BAR
163 * @dev: the PCI device
164 * @type: type of the BAR
165 * @res: resource buffer to be filled in
166 * @pos: BAR position in the config space
168 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
171 struct resource *res, unsigned int pos)
173 u32 l, sz, mask;
174 u16 orig_cmd;
175 struct pci_bus_region region, inverted_region;
176 bool bar_too_big = false, bar_disabled = false;
178 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
180 /* No printks while decoding is disabled! */
181 if (!dev->mmio_always_on) {
182 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
183 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
184 pci_write_config_word(dev, PCI_COMMAND,
185 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
189 res->name = pci_name(dev);
191 pci_read_config_dword(dev, pos, &l);
192 pci_write_config_dword(dev, pos, l | mask);
193 pci_read_config_dword(dev, pos, &sz);
194 pci_write_config_dword(dev, pos, l);
197 * All bits set in sz means the device isn't working properly.
198 * If the BAR isn't implemented, all bits must be 0. If it's a
199 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
200 * 1 must be clear.
202 if (!sz || sz == 0xffffffff)
203 goto fail;
206 * I don't know how l can have all bits set. Copied from old code.
207 * Maybe it fixes a bug on some ancient platform.
209 if (l == 0xffffffff)
210 l = 0;
212 if (type == pci_bar_unknown) {
213 res->flags = decode_bar(dev, l);
214 res->flags |= IORESOURCE_SIZEALIGN;
215 if (res->flags & IORESOURCE_IO) {
216 l &= PCI_BASE_ADDRESS_IO_MASK;
217 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
218 } else {
219 l &= PCI_BASE_ADDRESS_MEM_MASK;
220 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
222 } else {
223 res->flags |= (l & IORESOURCE_ROM_ENABLE);
224 l &= PCI_ROM_ADDRESS_MASK;
225 mask = (u32)PCI_ROM_ADDRESS_MASK;
228 if (res->flags & IORESOURCE_MEM_64) {
229 u64 l64 = l;
230 u64 sz64 = sz;
231 u64 mask64 = mask | (u64)~0 << 32;
233 pci_read_config_dword(dev, pos + 4, &l);
234 pci_write_config_dword(dev, pos + 4, ~0);
235 pci_read_config_dword(dev, pos + 4, &sz);
236 pci_write_config_dword(dev, pos + 4, l);
238 l64 |= ((u64)l << 32);
239 sz64 |= ((u64)sz << 32);
241 sz64 = pci_size(l64, sz64, mask64);
243 if (!sz64)
244 goto fail;
246 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
247 bar_too_big = true;
248 goto fail;
251 if ((sizeof(resource_size_t) < 8) && l) {
252 /* Address above 32-bit boundary; disable the BAR */
253 pci_write_config_dword(dev, pos, 0);
254 pci_write_config_dword(dev, pos + 4, 0);
255 region.start = 0;
256 region.end = sz64;
257 bar_disabled = true;
258 } else {
259 region.start = l64;
260 region.end = l64 + sz64;
262 } else {
263 sz = pci_size(l, sz, mask);
265 if (!sz)
266 goto fail;
268 region.start = l;
269 region.end = l + sz;
272 pcibios_bus_to_resource(dev, res, &region);
273 pcibios_resource_to_bus(dev, &inverted_region, res);
276 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
277 * the corresponding resource address (the physical address used by
278 * the CPU. Converting that resource address back to a bus address
279 * should yield the original BAR value:
281 * resource_to_bus(bus_to_resource(A)) == A
283 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
284 * be claimed by the device.
286 if (inverted_region.start != region.start) {
287 dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
288 pos, &region.start);
289 res->flags |= IORESOURCE_UNSET;
290 res->end -= res->start;
291 res->start = 0;
294 goto out;
297 fail:
298 res->flags = 0;
299 out:
300 if (!dev->mmio_always_on &&
301 (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
302 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
304 if (bar_too_big)
305 dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos);
306 if (res->flags && !bar_disabled)
307 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
309 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
312 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
314 unsigned int pos, reg;
316 for (pos = 0; pos < howmany; pos++) {
317 struct resource *res = &dev->resource[pos];
318 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
319 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
322 if (rom) {
323 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
324 dev->rom_base_reg = rom;
325 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
326 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
327 IORESOURCE_SIZEALIGN;
328 __pci_read_base(dev, pci_bar_mem32, res, rom);
332 static void pci_read_bridge_io(struct pci_bus *child)
334 struct pci_dev *dev = child->self;
335 u8 io_base_lo, io_limit_lo;
336 unsigned long io_mask, io_granularity, base, limit;
337 struct pci_bus_region region;
338 struct resource *res;
340 io_mask = PCI_IO_RANGE_MASK;
341 io_granularity = 0x1000;
342 if (dev->io_window_1k) {
343 /* Support 1K I/O space granularity */
344 io_mask = PCI_IO_1K_RANGE_MASK;
345 io_granularity = 0x400;
348 res = child->resource[0];
349 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
350 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
351 base = (io_base_lo & io_mask) << 8;
352 limit = (io_limit_lo & io_mask) << 8;
354 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
355 u16 io_base_hi, io_limit_hi;
357 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
358 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
359 base |= ((unsigned long) io_base_hi << 16);
360 limit |= ((unsigned long) io_limit_hi << 16);
363 if (base <= limit) {
364 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
365 region.start = base;
366 region.end = limit + io_granularity - 1;
367 pcibios_bus_to_resource(dev, res, &region);
368 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
372 static void pci_read_bridge_mmio(struct pci_bus *child)
374 struct pci_dev *dev = child->self;
375 u16 mem_base_lo, mem_limit_lo;
376 unsigned long base, limit;
377 struct pci_bus_region region;
378 struct resource *res;
380 res = child->resource[1];
381 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
382 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
383 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
384 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
385 if (base <= limit) {
386 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
387 region.start = base;
388 region.end = limit + 0xfffff;
389 pcibios_bus_to_resource(dev, res, &region);
390 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
394 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
396 struct pci_dev *dev = child->self;
397 u16 mem_base_lo, mem_limit_lo;
398 unsigned long base, limit;
399 struct pci_bus_region region;
400 struct resource *res;
402 res = child->resource[2];
403 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
404 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
405 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
406 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
408 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
409 u32 mem_base_hi, mem_limit_hi;
411 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
412 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
415 * Some bridges set the base > limit by default, and some
416 * (broken) BIOSes do not initialize them. If we find
417 * this, just assume they are not being used.
419 if (mem_base_hi <= mem_limit_hi) {
420 #if BITS_PER_LONG == 64
421 base |= ((unsigned long) mem_base_hi) << 32;
422 limit |= ((unsigned long) mem_limit_hi) << 32;
423 #else
424 if (mem_base_hi || mem_limit_hi) {
425 dev_err(&dev->dev, "can't handle 64-bit "
426 "address space for bridge\n");
427 return;
429 #endif
432 if (base <= limit) {
433 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
434 IORESOURCE_MEM | IORESOURCE_PREFETCH;
435 if (res->flags & PCI_PREF_RANGE_TYPE_64)
436 res->flags |= IORESOURCE_MEM_64;
437 region.start = base;
438 region.end = limit + 0xfffff;
439 pcibios_bus_to_resource(dev, res, &region);
440 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
444 void pci_read_bridge_bases(struct pci_bus *child)
446 struct pci_dev *dev = child->self;
447 struct resource *res;
448 int i;
450 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
451 return;
453 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
454 &child->busn_res,
455 dev->transparent ? " (subtractive decode)" : "");
457 pci_bus_remove_resources(child);
458 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
459 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
461 pci_read_bridge_io(child);
462 pci_read_bridge_mmio(child);
463 pci_read_bridge_mmio_pref(child);
465 if (dev->transparent) {
466 pci_bus_for_each_resource(child->parent, res, i) {
467 if (res) {
468 pci_bus_add_resource(child, res,
469 PCI_SUBTRACTIVE_DECODE);
470 dev_printk(KERN_DEBUG, &dev->dev,
471 " bridge window %pR (subtractive decode)\n",
472 res);
478 static struct pci_bus *pci_alloc_bus(void)
480 struct pci_bus *b;
482 b = kzalloc(sizeof(*b), GFP_KERNEL);
483 if (!b)
484 return NULL;
486 INIT_LIST_HEAD(&b->node);
487 INIT_LIST_HEAD(&b->children);
488 INIT_LIST_HEAD(&b->devices);
489 INIT_LIST_HEAD(&b->slots);
490 INIT_LIST_HEAD(&b->resources);
491 b->max_bus_speed = PCI_SPEED_UNKNOWN;
492 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
493 return b;
496 static void pci_release_host_bridge_dev(struct device *dev)
498 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
500 if (bridge->release_fn)
501 bridge->release_fn(bridge);
503 pci_free_resource_list(&bridge->windows);
505 kfree(bridge);
508 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
510 struct pci_host_bridge *bridge;
512 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
513 if (!bridge)
514 return NULL;
516 INIT_LIST_HEAD(&bridge->windows);
517 bridge->bus = b;
518 return bridge;
521 const unsigned char pcix_bus_speed[] = {
522 PCI_SPEED_UNKNOWN, /* 0 */
523 PCI_SPEED_66MHz_PCIX, /* 1 */
524 PCI_SPEED_100MHz_PCIX, /* 2 */
525 PCI_SPEED_133MHz_PCIX, /* 3 */
526 PCI_SPEED_UNKNOWN, /* 4 */
527 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
528 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
529 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
530 PCI_SPEED_UNKNOWN, /* 8 */
531 PCI_SPEED_66MHz_PCIX_266, /* 9 */
532 PCI_SPEED_100MHz_PCIX_266, /* A */
533 PCI_SPEED_133MHz_PCIX_266, /* B */
534 PCI_SPEED_UNKNOWN, /* C */
535 PCI_SPEED_66MHz_PCIX_533, /* D */
536 PCI_SPEED_100MHz_PCIX_533, /* E */
537 PCI_SPEED_133MHz_PCIX_533 /* F */
540 const unsigned char pcie_link_speed[] = {
541 PCI_SPEED_UNKNOWN, /* 0 */
542 PCIE_SPEED_2_5GT, /* 1 */
543 PCIE_SPEED_5_0GT, /* 2 */
544 PCIE_SPEED_8_0GT, /* 3 */
545 PCI_SPEED_UNKNOWN, /* 4 */
546 PCI_SPEED_UNKNOWN, /* 5 */
547 PCI_SPEED_UNKNOWN, /* 6 */
548 PCI_SPEED_UNKNOWN, /* 7 */
549 PCI_SPEED_UNKNOWN, /* 8 */
550 PCI_SPEED_UNKNOWN, /* 9 */
551 PCI_SPEED_UNKNOWN, /* A */
552 PCI_SPEED_UNKNOWN, /* B */
553 PCI_SPEED_UNKNOWN, /* C */
554 PCI_SPEED_UNKNOWN, /* D */
555 PCI_SPEED_UNKNOWN, /* E */
556 PCI_SPEED_UNKNOWN /* F */
559 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
561 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
563 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
565 static unsigned char agp_speeds[] = {
566 AGP_UNKNOWN,
567 AGP_1X,
568 AGP_2X,
569 AGP_4X,
570 AGP_8X
573 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
575 int index = 0;
577 if (agpstat & 4)
578 index = 3;
579 else if (agpstat & 2)
580 index = 2;
581 else if (agpstat & 1)
582 index = 1;
583 else
584 goto out;
586 if (agp3) {
587 index += 2;
588 if (index == 5)
589 index = 0;
592 out:
593 return agp_speeds[index];
597 static void pci_set_bus_speed(struct pci_bus *bus)
599 struct pci_dev *bridge = bus->self;
600 int pos;
602 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
603 if (!pos)
604 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
605 if (pos) {
606 u32 agpstat, agpcmd;
608 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
609 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
611 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
612 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
615 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
616 if (pos) {
617 u16 status;
618 enum pci_bus_speed max;
620 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
621 &status);
623 if (status & PCI_X_SSTATUS_533MHZ) {
624 max = PCI_SPEED_133MHz_PCIX_533;
625 } else if (status & PCI_X_SSTATUS_266MHZ) {
626 max = PCI_SPEED_133MHz_PCIX_266;
627 } else if (status & PCI_X_SSTATUS_133MHZ) {
628 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
629 max = PCI_SPEED_133MHz_PCIX_ECC;
630 } else {
631 max = PCI_SPEED_133MHz_PCIX;
633 } else {
634 max = PCI_SPEED_66MHz_PCIX;
637 bus->max_bus_speed = max;
638 bus->cur_bus_speed = pcix_bus_speed[
639 (status & PCI_X_SSTATUS_FREQ) >> 6];
641 return;
644 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
645 if (pos) {
646 u32 linkcap;
647 u16 linksta;
649 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
650 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
652 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
653 pcie_update_link_speed(bus, linksta);
658 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
659 struct pci_dev *bridge, int busnr)
661 struct pci_bus *child;
662 int i;
663 int ret;
666 * Allocate a new bus, and inherit stuff from the parent..
668 child = pci_alloc_bus();
669 if (!child)
670 return NULL;
672 child->parent = parent;
673 child->ops = parent->ops;
674 child->msi = parent->msi;
675 child->sysdata = parent->sysdata;
676 child->bus_flags = parent->bus_flags;
678 /* initialize some portions of the bus device, but don't register it
679 * now as the parent is not properly set up yet.
681 child->dev.class = &pcibus_class;
682 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
685 * Set up the primary, secondary and subordinate
686 * bus numbers.
688 child->number = child->busn_res.start = busnr;
689 child->primary = parent->busn_res.start;
690 child->busn_res.end = 0xff;
692 if (!bridge) {
693 child->dev.parent = parent->bridge;
694 goto add_dev;
697 child->self = bridge;
698 child->bridge = get_device(&bridge->dev);
699 child->dev.parent = child->bridge;
700 pci_set_bus_of_node(child);
701 pci_set_bus_speed(child);
703 /* Set up default resource pointers and names.. */
704 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
705 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
706 child->resource[i]->name = child->name;
708 bridge->subordinate = child;
710 add_dev:
711 ret = device_register(&child->dev);
712 WARN_ON(ret < 0);
714 pcibios_add_bus(child);
716 /* Create legacy_io and legacy_mem files for this bus */
717 pci_create_legacy_files(child);
719 return child;
722 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
724 struct pci_bus *child;
726 child = pci_alloc_child_bus(parent, dev, busnr);
727 if (child) {
728 down_write(&pci_bus_sem);
729 list_add_tail(&child->node, &parent->children);
730 up_write(&pci_bus_sem);
732 return child;
735 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
737 struct pci_bus *parent = child->parent;
739 /* Attempts to fix that up are really dangerous unless
740 we're going to re-assign all bus numbers. */
741 if (!pcibios_assign_all_busses())
742 return;
744 while (parent->parent && parent->busn_res.end < max) {
745 parent->busn_res.end = max;
746 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
747 parent = parent->parent;
752 * If it's a bridge, configure it and scan the bus behind it.
753 * For CardBus bridges, we don't scan behind as the devices will
754 * be handled by the bridge driver itself.
756 * We need to process bridges in two passes -- first we scan those
757 * already configured by the BIOS and after we are done with all of
758 * them, we proceed to assigning numbers to the remaining buses in
759 * order to avoid overlaps between old and new bus numbers.
761 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
763 struct pci_bus *child;
764 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
765 u32 buses, i, j = 0;
766 u16 bctl;
767 u8 primary, secondary, subordinate;
768 int broken = 0;
770 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
771 primary = buses & 0xFF;
772 secondary = (buses >> 8) & 0xFF;
773 subordinate = (buses >> 16) & 0xFF;
775 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
776 secondary, subordinate, pass);
778 if (!primary && (primary != bus->number) && secondary && subordinate) {
779 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
780 primary = bus->number;
783 /* Check if setup is sensible at all */
784 if (!pass &&
785 (primary != bus->number || secondary <= bus->number ||
786 secondary > subordinate)) {
787 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
788 secondary, subordinate);
789 broken = 1;
792 /* Disable MasterAbortMode during probing to avoid reporting
793 of bus errors (in some architectures) */
794 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
795 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
796 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
798 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
799 !is_cardbus && !broken) {
800 unsigned int cmax;
802 * Bus already configured by firmware, process it in the first
803 * pass and just note the configuration.
805 if (pass)
806 goto out;
809 * If we already got to this bus through a different bridge,
810 * don't re-add it. This can happen with the i450NX chipset.
812 * However, we continue to descend down the hierarchy and
813 * scan remaining child buses.
815 child = pci_find_bus(pci_domain_nr(bus), secondary);
816 if (!child) {
817 child = pci_add_new_bus(bus, dev, secondary);
818 if (!child)
819 goto out;
820 child->primary = primary;
821 pci_bus_insert_busn_res(child, secondary, subordinate);
822 child->bridge_ctl = bctl;
825 cmax = pci_scan_child_bus(child);
826 if (cmax > max)
827 max = cmax;
828 if (child->busn_res.end > max)
829 max = child->busn_res.end;
830 } else {
832 * We need to assign a number to this bus which we always
833 * do in the second pass.
835 if (!pass) {
836 if (pcibios_assign_all_busses() || broken)
837 /* Temporarily disable forwarding of the
838 configuration cycles on all bridges in
839 this bus segment to avoid possible
840 conflicts in the second pass between two
841 bridges programmed with overlapping
842 bus ranges. */
843 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
844 buses & ~0xffffff);
845 goto out;
848 /* Clear errors */
849 pci_write_config_word(dev, PCI_STATUS, 0xffff);
851 /* Prevent assigning a bus number that already exists.
852 * This can happen when a bridge is hot-plugged, so in
853 * this case we only re-scan this bus. */
854 child = pci_find_bus(pci_domain_nr(bus), max+1);
855 if (!child) {
856 child = pci_add_new_bus(bus, dev, ++max);
857 if (!child)
858 goto out;
859 pci_bus_insert_busn_res(child, max, 0xff);
861 buses = (buses & 0xff000000)
862 | ((unsigned int)(child->primary) << 0)
863 | ((unsigned int)(child->busn_res.start) << 8)
864 | ((unsigned int)(child->busn_res.end) << 16);
867 * yenta.c forces a secondary latency timer of 176.
868 * Copy that behaviour here.
870 if (is_cardbus) {
871 buses &= ~0xff000000;
872 buses |= CARDBUS_LATENCY_TIMER << 24;
876 * We need to blast all three values with a single write.
878 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
880 if (!is_cardbus) {
881 child->bridge_ctl = bctl;
883 * Adjust subordinate busnr in parent buses.
884 * We do this before scanning for children because
885 * some devices may not be detected if the bios
886 * was lazy.
888 pci_fixup_parent_subordinate_busnr(child, max);
889 /* Now we can scan all subordinate buses... */
890 max = pci_scan_child_bus(child);
892 * now fix it up again since we have found
893 * the real value of max.
895 pci_fixup_parent_subordinate_busnr(child, max);
896 } else {
898 * For CardBus bridges, we leave 4 bus numbers
899 * as cards with a PCI-to-PCI bridge can be
900 * inserted later.
902 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
903 struct pci_bus *parent = bus;
904 if (pci_find_bus(pci_domain_nr(bus),
905 max+i+1))
906 break;
907 while (parent->parent) {
908 if ((!pcibios_assign_all_busses()) &&
909 (parent->busn_res.end > max) &&
910 (parent->busn_res.end <= max+i)) {
911 j = 1;
913 parent = parent->parent;
915 if (j) {
917 * Often, there are two cardbus bridges
918 * -- try to leave one valid bus number
919 * for each one.
921 i /= 2;
922 break;
925 max += i;
926 pci_fixup_parent_subordinate_busnr(child, max);
929 * Set the subordinate bus number to its real value.
931 pci_bus_update_busn_res_end(child, max);
932 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
935 sprintf(child->name,
936 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
937 pci_domain_nr(bus), child->number);
939 /* Has only triggered on CardBus, fixup is in yenta_socket */
940 while (bus->parent) {
941 if ((child->busn_res.end > bus->busn_res.end) ||
942 (child->number > bus->busn_res.end) ||
943 (child->number < bus->number) ||
944 (child->busn_res.end < bus->number)) {
945 dev_info(&child->dev, "%pR %s "
946 "hidden behind%s bridge %s %pR\n",
947 &child->busn_res,
948 (bus->number > child->busn_res.end &&
949 bus->busn_res.end < child->number) ?
950 "wholly" : "partially",
951 bus->self->transparent ? " transparent" : "",
952 dev_name(&bus->dev),
953 &bus->busn_res);
955 bus = bus->parent;
958 out:
959 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
961 return max;
965 * Read interrupt line and base address registers.
966 * The architecture-dependent code can tweak these, of course.
968 static void pci_read_irq(struct pci_dev *dev)
970 unsigned char irq;
972 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
973 dev->pin = irq;
974 if (irq)
975 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
976 dev->irq = irq;
979 void set_pcie_port_type(struct pci_dev *pdev)
981 int pos;
982 u16 reg16;
984 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
985 if (!pos)
986 return;
987 pdev->is_pcie = 1;
988 pdev->pcie_cap = pos;
989 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
990 pdev->pcie_flags_reg = reg16;
991 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
992 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
995 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
997 u32 reg32;
999 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1000 if (reg32 & PCI_EXP_SLTCAP_HPC)
1001 pdev->is_hotplug_bridge = 1;
1004 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1007 * pci_setup_device - fill in class and map information of a device
1008 * @dev: the device structure to fill
1010 * Initialize the device structure with information about the device's
1011 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1012 * Called at initialisation of the PCI subsystem and by CardBus services.
1013 * Returns 0 on success and negative if unknown type of device (not normal,
1014 * bridge or CardBus).
1016 int pci_setup_device(struct pci_dev *dev)
1018 u32 class;
1019 u8 hdr_type;
1020 struct pci_slot *slot;
1021 int pos = 0;
1022 struct pci_bus_region region;
1023 struct resource *res;
1025 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1026 return -EIO;
1028 dev->sysdata = dev->bus->sysdata;
1029 dev->dev.parent = dev->bus->bridge;
1030 dev->dev.bus = &pci_bus_type;
1031 dev->hdr_type = hdr_type & 0x7f;
1032 dev->multifunction = !!(hdr_type & 0x80);
1033 dev->error_state = pci_channel_io_normal;
1034 set_pcie_port_type(dev);
1036 list_for_each_entry(slot, &dev->bus->slots, list)
1037 if (PCI_SLOT(dev->devfn) == slot->number)
1038 dev->slot = slot;
1040 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1041 set this higher, assuming the system even supports it. */
1042 dev->dma_mask = 0xffffffff;
1044 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1045 dev->bus->number, PCI_SLOT(dev->devfn),
1046 PCI_FUNC(dev->devfn));
1048 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1049 dev->revision = class & 0xff;
1050 dev->class = class >> 8; /* upper 3 bytes */
1052 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1053 dev->vendor, dev->device, dev->hdr_type, dev->class);
1055 /* need to have dev->class ready */
1056 dev->cfg_size = pci_cfg_space_size(dev);
1058 /* "Unknown power state" */
1059 dev->current_state = PCI_UNKNOWN;
1061 /* Early fixups, before probing the BARs */
1062 pci_fixup_device(pci_fixup_early, dev);
1063 /* device class may be changed after fixup */
1064 class = dev->class >> 8;
1066 switch (dev->hdr_type) { /* header type */
1067 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1068 if (class == PCI_CLASS_BRIDGE_PCI)
1069 goto bad;
1070 pci_read_irq(dev);
1071 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1072 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1073 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1076 * Do the ugly legacy mode stuff here rather than broken chip
1077 * quirk code. Legacy mode ATA controllers have fixed
1078 * addresses. These are not always echoed in BAR0-3, and
1079 * BAR0-3 in a few cases contain junk!
1081 if (class == PCI_CLASS_STORAGE_IDE) {
1082 u8 progif;
1083 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1084 if ((progif & 1) == 0) {
1085 region.start = 0x1F0;
1086 region.end = 0x1F7;
1087 res = &dev->resource[0];
1088 res->flags = LEGACY_IO_RESOURCE;
1089 pcibios_bus_to_resource(dev, res, &region);
1090 region.start = 0x3F6;
1091 region.end = 0x3F6;
1092 res = &dev->resource[1];
1093 res->flags = LEGACY_IO_RESOURCE;
1094 pcibios_bus_to_resource(dev, res, &region);
1096 if ((progif & 4) == 0) {
1097 region.start = 0x170;
1098 region.end = 0x177;
1099 res = &dev->resource[2];
1100 res->flags = LEGACY_IO_RESOURCE;
1101 pcibios_bus_to_resource(dev, res, &region);
1102 region.start = 0x376;
1103 region.end = 0x376;
1104 res = &dev->resource[3];
1105 res->flags = LEGACY_IO_RESOURCE;
1106 pcibios_bus_to_resource(dev, res, &region);
1109 break;
1111 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1112 if (class != PCI_CLASS_BRIDGE_PCI)
1113 goto bad;
1114 /* The PCI-to-PCI bridge spec requires that subtractive
1115 decoding (i.e. transparent) bridge must have programming
1116 interface code of 0x01. */
1117 pci_read_irq(dev);
1118 dev->transparent = ((dev->class & 0xff) == 1);
1119 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1120 set_pcie_hotplug_bridge(dev);
1121 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1122 if (pos) {
1123 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1124 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1126 break;
1128 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1129 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1130 goto bad;
1131 pci_read_irq(dev);
1132 pci_read_bases(dev, 1, 0);
1133 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1134 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1135 break;
1137 default: /* unknown header */
1138 dev_err(&dev->dev, "unknown header type %02x, "
1139 "ignoring device\n", dev->hdr_type);
1140 return -EIO;
1142 bad:
1143 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1144 "type %02x)\n", dev->class, dev->hdr_type);
1145 dev->class = PCI_CLASS_NOT_DEFINED;
1148 /* We found a fine healthy device, go go go... */
1149 return 0;
1152 static void pci_release_capabilities(struct pci_dev *dev)
1154 pci_vpd_release(dev);
1155 pci_iov_release(dev);
1156 pci_free_cap_save_buffers(dev);
1160 * pci_release_dev - free a pci device structure when all users of it are finished.
1161 * @dev: device that's been disconnected
1163 * Will be called only by the device core when all users of this pci device are
1164 * done.
1166 static void pci_release_dev(struct device *dev)
1168 struct pci_dev *pci_dev;
1170 pci_dev = to_pci_dev(dev);
1171 pci_release_capabilities(pci_dev);
1172 pci_release_of_node(pci_dev);
1173 pcibios_release_device(pci_dev);
1174 pci_bus_put(pci_dev->bus);
1175 kfree(pci_dev);
1179 * pci_cfg_space_size - get the configuration space size of the PCI device.
1180 * @dev: PCI device
1182 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1183 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1184 * access it. Maybe we don't have a way to generate extended config space
1185 * accesses, or the device is behind a reverse Express bridge. So we try
1186 * reading the dword at 0x100 which must either be 0 or a valid extended
1187 * capability header.
1189 int pci_cfg_space_size_ext(struct pci_dev *dev)
1191 u32 status;
1192 int pos = PCI_CFG_SPACE_SIZE;
1194 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1195 goto fail;
1196 if (status == 0xffffffff)
1197 goto fail;
1199 return PCI_CFG_SPACE_EXP_SIZE;
1201 fail:
1202 return PCI_CFG_SPACE_SIZE;
1205 int pci_cfg_space_size(struct pci_dev *dev)
1207 int pos;
1208 u32 status;
1209 u16 class;
1211 class = dev->class >> 8;
1212 if (class == PCI_CLASS_BRIDGE_HOST)
1213 return pci_cfg_space_size_ext(dev);
1215 if (!pci_is_pcie(dev)) {
1216 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1217 if (!pos)
1218 goto fail;
1220 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1221 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1222 goto fail;
1225 return pci_cfg_space_size_ext(dev);
1227 fail:
1228 return PCI_CFG_SPACE_SIZE;
1231 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1233 struct pci_dev *dev;
1235 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1236 if (!dev)
1237 return NULL;
1239 INIT_LIST_HEAD(&dev->bus_list);
1240 dev->dev.type = &pci_dev_type;
1241 dev->bus = pci_bus_get(bus);
1243 return dev;
1245 EXPORT_SYMBOL(pci_alloc_dev);
1247 struct pci_dev *alloc_pci_dev(void)
1249 return pci_alloc_dev(NULL);
1251 EXPORT_SYMBOL(alloc_pci_dev);
1253 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1254 int crs_timeout)
1256 int delay = 1;
1258 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1259 return false;
1261 /* some broken boards return 0 or ~0 if a slot is empty: */
1262 if (*l == 0xffffffff || *l == 0x00000000 ||
1263 *l == 0x0000ffff || *l == 0xffff0000)
1264 return false;
1266 /* Configuration request Retry Status */
1267 while (*l == 0xffff0001) {
1268 if (!crs_timeout)
1269 return false;
1271 msleep(delay);
1272 delay *= 2;
1273 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1274 return false;
1275 /* Card hasn't responded in 60 seconds? Must be stuck. */
1276 if (delay > crs_timeout) {
1277 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1278 "responding\n", pci_domain_nr(bus),
1279 bus->number, PCI_SLOT(devfn),
1280 PCI_FUNC(devfn));
1281 return false;
1285 return true;
1287 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1290 * Read the config data for a PCI device, sanity-check it
1291 * and fill in the dev structure...
1293 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1295 struct pci_dev *dev;
1296 u32 l;
1298 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1299 return NULL;
1301 dev = pci_alloc_dev(bus);
1302 if (!dev)
1303 return NULL;
1305 dev->devfn = devfn;
1306 dev->vendor = l & 0xffff;
1307 dev->device = (l >> 16) & 0xffff;
1309 pci_set_of_node(dev);
1311 if (pci_setup_device(dev)) {
1312 pci_bus_put(dev->bus);
1313 kfree(dev);
1314 return NULL;
1317 return dev;
1320 static void pci_init_capabilities(struct pci_dev *dev)
1322 /* MSI/MSI-X list */
1323 pci_msi_init_pci_dev(dev);
1325 /* Buffers for saving PCIe and PCI-X capabilities */
1326 pci_allocate_cap_save_buffers(dev);
1328 /* Power Management */
1329 pci_pm_init(dev);
1331 /* Vital Product Data */
1332 pci_vpd_pci22_init(dev);
1334 /* Alternative Routing-ID Forwarding */
1335 pci_configure_ari(dev);
1337 /* Single Root I/O Virtualization */
1338 pci_iov_init(dev);
1340 /* Enable ACS P2P upstream forwarding */
1341 pci_enable_acs(dev);
1344 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1346 int ret;
1348 device_initialize(&dev->dev);
1349 dev->dev.release = pci_release_dev;
1351 set_dev_node(&dev->dev, pcibus_to_node(bus));
1352 dev->dev.dma_mask = &dev->dma_mask;
1353 dev->dev.dma_parms = &dev->dma_parms;
1354 dev->dev.coherent_dma_mask = 0xffffffffull;
1356 pci_set_dma_max_seg_size(dev, 65536);
1357 pci_set_dma_seg_boundary(dev, 0xffffffff);
1359 /* Fix up broken headers */
1360 pci_fixup_device(pci_fixup_header, dev);
1362 /* moved out from quirk header fixup code */
1363 pci_reassigndev_resource_alignment(dev);
1365 /* Clear the state_saved flag. */
1366 dev->state_saved = false;
1368 /* Initialize various capabilities */
1369 pci_init_capabilities(dev);
1372 * Add the device to our list of discovered devices
1373 * and the bus list for fixup functions, etc.
1375 down_write(&pci_bus_sem);
1376 list_add_tail(&dev->bus_list, &bus->devices);
1377 up_write(&pci_bus_sem);
1379 ret = pcibios_add_device(dev);
1380 WARN_ON(ret < 0);
1382 /* Notifier could use PCI capabilities */
1383 dev->match_driver = false;
1384 ret = device_add(&dev->dev);
1385 WARN_ON(ret < 0);
1387 pci_proc_attach_device(dev);
1390 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1392 struct pci_dev *dev;
1394 dev = pci_get_slot(bus, devfn);
1395 if (dev) {
1396 pci_dev_put(dev);
1397 return dev;
1400 dev = pci_scan_device(bus, devfn);
1401 if (!dev)
1402 return NULL;
1404 pci_device_add(dev, bus);
1406 return dev;
1408 EXPORT_SYMBOL(pci_scan_single_device);
1410 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1412 int pos;
1413 u16 cap = 0;
1414 unsigned next_fn;
1416 if (pci_ari_enabled(bus)) {
1417 if (!dev)
1418 return 0;
1419 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1420 if (!pos)
1421 return 0;
1423 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1424 next_fn = PCI_ARI_CAP_NFN(cap);
1425 if (next_fn <= fn)
1426 return 0; /* protect against malformed list */
1428 return next_fn;
1431 /* dev may be NULL for non-contiguous multifunction devices */
1432 if (!dev || dev->multifunction)
1433 return (fn + 1) % 8;
1435 return 0;
1438 static int only_one_child(struct pci_bus *bus)
1440 struct pci_dev *parent = bus->self;
1442 if (!parent || !pci_is_pcie(parent))
1443 return 0;
1444 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1445 return 1;
1446 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1447 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1448 return 1;
1449 return 0;
1453 * pci_scan_slot - scan a PCI slot on a bus for devices.
1454 * @bus: PCI bus to scan
1455 * @devfn: slot number to scan (must have zero function.)
1457 * Scan a PCI slot on the specified PCI bus for devices, adding
1458 * discovered devices to the @bus->devices list. New devices
1459 * will not have is_added set.
1461 * Returns the number of new devices found.
1463 int pci_scan_slot(struct pci_bus *bus, int devfn)
1465 unsigned fn, nr = 0;
1466 struct pci_dev *dev;
1468 if (only_one_child(bus) && (devfn > 0))
1469 return 0; /* Already scanned the entire slot */
1471 dev = pci_scan_single_device(bus, devfn);
1472 if (!dev)
1473 return 0;
1474 if (!dev->is_added)
1475 nr++;
1477 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1478 dev = pci_scan_single_device(bus, devfn + fn);
1479 if (dev) {
1480 if (!dev->is_added)
1481 nr++;
1482 dev->multifunction = 1;
1486 /* only one slot has pcie device */
1487 if (bus->self && nr)
1488 pcie_aspm_init_link_state(bus->self);
1490 return nr;
1493 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1495 u8 *smpss = data;
1497 if (!pci_is_pcie(dev))
1498 return 0;
1501 * We don't have a way to change MPS settings on devices that have
1502 * drivers attached. A hot-added device might support only the minimum
1503 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1504 * where devices may be hot-added, we limit the fabric MPS to 128 so
1505 * hot-added devices will work correctly.
1507 * However, if we hot-add a device to a slot directly below a Root
1508 * Port, it's impossible for there to be other existing devices below
1509 * the port. We don't limit the MPS in this case because we can
1510 * reconfigure MPS on both the Root Port and the hot-added device,
1511 * and there are no other devices involved.
1513 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1515 if (dev->is_hotplug_bridge &&
1516 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1517 *smpss = 0;
1519 if (*smpss > dev->pcie_mpss)
1520 *smpss = dev->pcie_mpss;
1522 return 0;
1525 static void pcie_write_mps(struct pci_dev *dev, int mps)
1527 int rc;
1529 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1530 mps = 128 << dev->pcie_mpss;
1532 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1533 dev->bus->self)
1534 /* For "Performance", the assumption is made that
1535 * downstream communication will never be larger than
1536 * the MRRS. So, the MPS only needs to be configured
1537 * for the upstream communication. This being the case,
1538 * walk from the top down and set the MPS of the child
1539 * to that of the parent bus.
1541 * Configure the device MPS with the smaller of the
1542 * device MPSS or the bridge MPS (which is assumed to be
1543 * properly configured at this point to the largest
1544 * allowable MPS based on its parent bus).
1546 mps = min(mps, pcie_get_mps(dev->bus->self));
1549 rc = pcie_set_mps(dev, mps);
1550 if (rc)
1551 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1554 static void pcie_write_mrrs(struct pci_dev *dev)
1556 int rc, mrrs;
1558 /* In the "safe" case, do not configure the MRRS. There appear to be
1559 * issues with setting MRRS to 0 on a number of devices.
1561 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1562 return;
1564 /* For Max performance, the MRRS must be set to the largest supported
1565 * value. However, it cannot be configured larger than the MPS the
1566 * device or the bus can support. This should already be properly
1567 * configured by a prior call to pcie_write_mps.
1569 mrrs = pcie_get_mps(dev);
1571 /* MRRS is a R/W register. Invalid values can be written, but a
1572 * subsequent read will verify if the value is acceptable or not.
1573 * If the MRRS value provided is not acceptable (e.g., too large),
1574 * shrink the value until it is acceptable to the HW.
1576 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1577 rc = pcie_set_readrq(dev, mrrs);
1578 if (!rc)
1579 break;
1581 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1582 mrrs /= 2;
1585 if (mrrs < 128)
1586 dev_err(&dev->dev, "MRRS was unable to be configured with a "
1587 "safe value. If problems are experienced, try running "
1588 "with pci=pcie_bus_safe.\n");
1591 static void pcie_bus_detect_mps(struct pci_dev *dev)
1593 struct pci_dev *bridge = dev->bus->self;
1594 int mps, p_mps;
1596 if (!bridge)
1597 return;
1599 mps = pcie_get_mps(dev);
1600 p_mps = pcie_get_mps(bridge);
1602 if (mps != p_mps)
1603 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1604 mps, pci_name(bridge), p_mps);
1607 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1609 int mps, orig_mps;
1611 if (!pci_is_pcie(dev))
1612 return 0;
1614 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1615 pcie_bus_detect_mps(dev);
1616 return 0;
1619 mps = 128 << *(u8 *)data;
1620 orig_mps = pcie_get_mps(dev);
1622 pcie_write_mps(dev, mps);
1623 pcie_write_mrrs(dev);
1625 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
1626 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1627 orig_mps, pcie_get_readrq(dev));
1629 return 0;
1632 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1633 * parents then children fashion. If this changes, then this code will not
1634 * work as designed.
1636 void pcie_bus_configure_settings(struct pci_bus *bus)
1638 u8 smpss;
1640 if (!bus->self)
1641 return;
1643 if (!pci_is_pcie(bus->self))
1644 return;
1646 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1647 * to be aware of the MPS of the destination. To work around this,
1648 * simply force the MPS of the entire system to the smallest possible.
1650 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1651 smpss = 0;
1653 if (pcie_bus_config == PCIE_BUS_SAFE) {
1654 smpss = bus->self->pcie_mpss;
1656 pcie_find_smpss(bus->self, &smpss);
1657 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1660 pcie_bus_configure_set(bus->self, &smpss);
1661 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1663 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1665 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1667 unsigned int devfn, pass, max = bus->busn_res.start;
1668 struct pci_dev *dev;
1670 dev_dbg(&bus->dev, "scanning bus\n");
1672 /* Go find them, Rover! */
1673 for (devfn = 0; devfn < 0x100; devfn += 8)
1674 pci_scan_slot(bus, devfn);
1676 /* Reserve buses for SR-IOV capability. */
1677 max += pci_iov_bus_range(bus);
1680 * After performing arch-dependent fixup of the bus, look behind
1681 * all PCI-to-PCI bridges on this bus.
1683 if (!bus->is_added) {
1684 dev_dbg(&bus->dev, "fixups for bus\n");
1685 pcibios_fixup_bus(bus);
1686 bus->is_added = 1;
1689 for (pass=0; pass < 2; pass++)
1690 list_for_each_entry(dev, &bus->devices, bus_list) {
1691 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1692 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1693 max = pci_scan_bridge(bus, dev, max, pass);
1697 * We've scanned the bus and so we know all about what's on
1698 * the other side of any bridges that may be on this bus plus
1699 * any devices.
1701 * Return how far we've got finding sub-buses.
1703 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1704 return max;
1708 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1709 * @bridge: Host bridge to set up.
1711 * Default empty implementation. Replace with an architecture-specific setup
1712 * routine, if necessary.
1714 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1716 return 0;
1719 void __weak pcibios_add_bus(struct pci_bus *bus)
1723 void __weak pcibios_remove_bus(struct pci_bus *bus)
1727 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1728 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1730 int error;
1731 struct pci_host_bridge *bridge;
1732 struct pci_bus *b, *b2;
1733 struct pci_host_bridge_window *window, *n;
1734 struct resource *res;
1735 resource_size_t offset;
1736 char bus_addr[64];
1737 char *fmt;
1739 b = pci_alloc_bus();
1740 if (!b)
1741 return NULL;
1743 b->sysdata = sysdata;
1744 b->ops = ops;
1745 b->number = b->busn_res.start = bus;
1746 b2 = pci_find_bus(pci_domain_nr(b), bus);
1747 if (b2) {
1748 /* If we already got to this bus through a different bridge, ignore it */
1749 dev_dbg(&b2->dev, "bus already known\n");
1750 goto err_out;
1753 bridge = pci_alloc_host_bridge(b);
1754 if (!bridge)
1755 goto err_out;
1757 bridge->dev.parent = parent;
1758 bridge->dev.release = pci_release_host_bridge_dev;
1759 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1760 error = pcibios_root_bridge_prepare(bridge);
1761 if (error) {
1762 kfree(bridge);
1763 goto err_out;
1766 error = device_register(&bridge->dev);
1767 if (error) {
1768 put_device(&bridge->dev);
1769 goto err_out;
1771 b->bridge = get_device(&bridge->dev);
1772 device_enable_async_suspend(b->bridge);
1773 pci_set_bus_of_node(b);
1775 if (!parent)
1776 set_dev_node(b->bridge, pcibus_to_node(b));
1778 b->dev.class = &pcibus_class;
1779 b->dev.parent = b->bridge;
1780 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1781 error = device_register(&b->dev);
1782 if (error)
1783 goto class_dev_reg_err;
1785 pcibios_add_bus(b);
1787 /* Create legacy_io and legacy_mem files for this bus */
1788 pci_create_legacy_files(b);
1790 if (parent)
1791 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1792 else
1793 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1795 /* Add initial resources to the bus */
1796 list_for_each_entry_safe(window, n, resources, list) {
1797 list_move_tail(&window->list, &bridge->windows);
1798 res = window->res;
1799 offset = window->offset;
1800 if (res->flags & IORESOURCE_BUS)
1801 pci_bus_insert_busn_res(b, bus, res->end);
1802 else
1803 pci_bus_add_resource(b, res, 0);
1804 if (offset) {
1805 if (resource_type(res) == IORESOURCE_IO)
1806 fmt = " (bus address [%#06llx-%#06llx])";
1807 else
1808 fmt = " (bus address [%#010llx-%#010llx])";
1809 snprintf(bus_addr, sizeof(bus_addr), fmt,
1810 (unsigned long long) (res->start - offset),
1811 (unsigned long long) (res->end - offset));
1812 } else
1813 bus_addr[0] = '\0';
1814 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1817 down_write(&pci_bus_sem);
1818 list_add_tail(&b->node, &pci_root_buses);
1819 up_write(&pci_bus_sem);
1821 return b;
1823 class_dev_reg_err:
1824 put_device(&bridge->dev);
1825 device_unregister(&bridge->dev);
1826 err_out:
1827 kfree(b);
1828 return NULL;
1831 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1833 struct resource *res = &b->busn_res;
1834 struct resource *parent_res, *conflict;
1836 res->start = bus;
1837 res->end = bus_max;
1838 res->flags = IORESOURCE_BUS;
1840 if (!pci_is_root_bus(b))
1841 parent_res = &b->parent->busn_res;
1842 else {
1843 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1844 res->flags |= IORESOURCE_PCI_FIXED;
1847 conflict = insert_resource_conflict(parent_res, res);
1849 if (conflict)
1850 dev_printk(KERN_DEBUG, &b->dev,
1851 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1852 res, pci_is_root_bus(b) ? "domain " : "",
1853 parent_res, conflict->name, conflict);
1855 return conflict == NULL;
1858 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1860 struct resource *res = &b->busn_res;
1861 struct resource old_res = *res;
1862 resource_size_t size;
1863 int ret;
1865 if (res->start > bus_max)
1866 return -EINVAL;
1868 size = bus_max - res->start + 1;
1869 ret = adjust_resource(res, res->start, size);
1870 dev_printk(KERN_DEBUG, &b->dev,
1871 "busn_res: %pR end %s updated to %02x\n",
1872 &old_res, ret ? "can not be" : "is", bus_max);
1874 if (!ret && !res->parent)
1875 pci_bus_insert_busn_res(b, res->start, res->end);
1877 return ret;
1880 void pci_bus_release_busn_res(struct pci_bus *b)
1882 struct resource *res = &b->busn_res;
1883 int ret;
1885 if (!res->flags || !res->parent)
1886 return;
1888 ret = release_resource(res);
1889 dev_printk(KERN_DEBUG, &b->dev,
1890 "busn_res: %pR %s released\n",
1891 res, ret ? "can not be" : "is");
1894 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1895 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1897 struct pci_host_bridge_window *window;
1898 bool found = false;
1899 struct pci_bus *b;
1900 int max;
1902 list_for_each_entry(window, resources, list)
1903 if (window->res->flags & IORESOURCE_BUS) {
1904 found = true;
1905 break;
1908 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1909 if (!b)
1910 return NULL;
1912 if (!found) {
1913 dev_info(&b->dev,
1914 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1915 bus);
1916 pci_bus_insert_busn_res(b, bus, 255);
1919 max = pci_scan_child_bus(b);
1921 if (!found)
1922 pci_bus_update_busn_res_end(b, max);
1924 pci_bus_add_devices(b);
1925 return b;
1927 EXPORT_SYMBOL(pci_scan_root_bus);
1929 /* Deprecated; use pci_scan_root_bus() instead */
1930 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1931 int bus, struct pci_ops *ops, void *sysdata)
1933 LIST_HEAD(resources);
1934 struct pci_bus *b;
1936 pci_add_resource(&resources, &ioport_resource);
1937 pci_add_resource(&resources, &iomem_resource);
1938 pci_add_resource(&resources, &busn_resource);
1939 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1940 if (b)
1941 pci_scan_child_bus(b);
1942 else
1943 pci_free_resource_list(&resources);
1944 return b;
1946 EXPORT_SYMBOL(pci_scan_bus_parented);
1948 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1949 void *sysdata)
1951 LIST_HEAD(resources);
1952 struct pci_bus *b;
1954 pci_add_resource(&resources, &ioport_resource);
1955 pci_add_resource(&resources, &iomem_resource);
1956 pci_add_resource(&resources, &busn_resource);
1957 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1958 if (b) {
1959 pci_scan_child_bus(b);
1960 pci_bus_add_devices(b);
1961 } else {
1962 pci_free_resource_list(&resources);
1964 return b;
1966 EXPORT_SYMBOL(pci_scan_bus);
1969 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1970 * @bridge: PCI bridge for the bus to scan
1972 * Scan a PCI bus and child buses for new devices, add them,
1973 * and enable them, resizing bridge mmio/io resource if necessary
1974 * and possible. The caller must ensure the child devices are already
1975 * removed for resizing to occur.
1977 * Returns the max number of subordinate bus discovered.
1979 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1981 unsigned int max;
1982 struct pci_bus *bus = bridge->subordinate;
1984 max = pci_scan_child_bus(bus);
1986 pci_assign_unassigned_bridge_resources(bridge);
1988 pci_bus_add_devices(bus);
1990 return max;
1994 * pci_rescan_bus - scan a PCI bus for devices.
1995 * @bus: PCI bus to scan
1997 * Scan a PCI bus and child buses for new devices, adds them,
1998 * and enables them.
2000 * Returns the max number of subordinate bus discovered.
2002 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
2004 unsigned int max;
2006 max = pci_scan_child_bus(bus);
2007 pci_assign_unassigned_bus_resources(bus);
2008 pci_bus_add_devices(bus);
2010 return max;
2012 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2014 EXPORT_SYMBOL(pci_add_new_bus);
2015 EXPORT_SYMBOL(pci_scan_slot);
2016 EXPORT_SYMBOL(pci_scan_bridge);
2017 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2019 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
2021 const struct pci_dev *a = to_pci_dev(d_a);
2022 const struct pci_dev *b = to_pci_dev(d_b);
2024 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2025 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2027 if (a->bus->number < b->bus->number) return -1;
2028 else if (a->bus->number > b->bus->number) return 1;
2030 if (a->devfn < b->devfn) return -1;
2031 else if (a->devfn > b->devfn) return 1;
2033 return 0;
2036 void __init pci_sort_breadthfirst(void)
2038 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);