rcu: make rcutorture even more vicious: invoke RCU readers from irq handlers (timers)
[linux-2.6/btrfs-unstable.git] / drivers / pci / probe.c
blob3706ce7972dddc0c389f828a3ec0c4635d1d8efe
1 /*
2 * probe.c - PCI detection and setup code
3 */
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include "pci.h"
15 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
16 #define CARDBUS_RESERVE_BUSNR 3
17 #define PCI_CFG_SPACE_SIZE 256
18 #define PCI_CFG_SPACE_EXP_SIZE 4096
20 /* Ugh. Need to stop exporting this to modules. */
21 LIST_HEAD(pci_root_buses);
22 EXPORT_SYMBOL(pci_root_buses);
25 static int find_anything(struct device *dev, void *data)
27 return 1;
31 * Some device drivers need know if pci is initiated.
32 * Basically, we think pci is not initiated when there
33 * is no device to be found on the pci_bus_type.
35 int no_pci_devices(void)
37 struct device *dev;
38 int no_devices;
40 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
41 no_devices = (dev == NULL);
42 put_device(dev);
43 return no_devices;
45 EXPORT_SYMBOL(no_pci_devices);
47 #ifdef HAVE_PCI_LEGACY
48 /**
49 * pci_create_legacy_files - create legacy I/O port and memory files
50 * @b: bus to create files under
52 * Some platforms allow access to legacy I/O port and ISA memory space on
53 * a per-bus basis. This routine creates the files and ties them into
54 * their associated read, write and mmap files from pci-sysfs.c
56 static void pci_create_legacy_files(struct pci_bus *b)
58 b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2,
59 GFP_ATOMIC);
60 if (b->legacy_io) {
61 b->legacy_io->attr.name = "legacy_io";
62 b->legacy_io->size = 0xffff;
63 b->legacy_io->attr.mode = S_IRUSR | S_IWUSR;
64 b->legacy_io->read = pci_read_legacy_io;
65 b->legacy_io->write = pci_write_legacy_io;
66 device_create_bin_file(&b->dev, b->legacy_io);
68 /* Allocated above after the legacy_io struct */
69 b->legacy_mem = b->legacy_io + 1;
70 b->legacy_mem->attr.name = "legacy_mem";
71 b->legacy_mem->size = 1024*1024;
72 b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
73 b->legacy_mem->mmap = pci_mmap_legacy_mem;
74 device_create_bin_file(&b->dev, b->legacy_mem);
78 void pci_remove_legacy_files(struct pci_bus *b)
80 if (b->legacy_io) {
81 device_remove_bin_file(&b->dev, b->legacy_io);
82 device_remove_bin_file(&b->dev, b->legacy_mem);
83 kfree(b->legacy_io); /* both are allocated here */
86 #else /* !HAVE_PCI_LEGACY */
87 static inline void pci_create_legacy_files(struct pci_bus *bus) { return; }
88 void pci_remove_legacy_files(struct pci_bus *bus) { return; }
89 #endif /* HAVE_PCI_LEGACY */
92 * PCI Bus Class Devices
94 static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
95 int type,
96 struct device_attribute *attr,
97 char *buf)
99 int ret;
100 cpumask_t cpumask;
102 cpumask = pcibus_to_cpumask(to_pci_bus(dev));
103 ret = type?
104 cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask):
105 cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
106 buf[ret++] = '\n';
107 buf[ret] = '\0';
108 return ret;
111 static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
112 struct device_attribute *attr,
113 char *buf)
115 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
118 static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
119 struct device_attribute *attr,
120 char *buf)
122 return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
125 DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL);
126 DEVICE_ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL);
129 * PCI Bus Class
131 static void release_pcibus_dev(struct device *dev)
133 struct pci_bus *pci_bus = to_pci_bus(dev);
135 if (pci_bus->bridge)
136 put_device(pci_bus->bridge);
137 kfree(pci_bus);
140 static struct class pcibus_class = {
141 .name = "pci_bus",
142 .dev_release = &release_pcibus_dev,
145 static int __init pcibus_class_init(void)
147 return class_register(&pcibus_class);
149 postcore_initcall(pcibus_class_init);
152 * Translate the low bits of the PCI base
153 * to the resource type
155 static inline unsigned int pci_calc_resource_flags(unsigned int flags)
157 if (flags & PCI_BASE_ADDRESS_SPACE_IO)
158 return IORESOURCE_IO;
160 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
161 return IORESOURCE_MEM | IORESOURCE_PREFETCH;
163 return IORESOURCE_MEM;
167 * Find the extent of a PCI decode..
169 static u32 pci_size(u32 base, u32 maxbase, u32 mask)
171 u32 size = mask & maxbase; /* Find the significant bits */
172 if (!size)
173 return 0;
175 /* Get the lowest of them to find the decode size, and
176 from that the extent. */
177 size = (size & ~(size-1)) - 1;
179 /* base == maxbase can be valid only if the BAR has
180 already been programmed with all 1s. */
181 if (base == maxbase && ((base | size) & mask) != mask)
182 return 0;
184 return size;
187 static u64 pci_size64(u64 base, u64 maxbase, u64 mask)
189 u64 size = mask & maxbase; /* Find the significant bits */
190 if (!size)
191 return 0;
193 /* Get the lowest of them to find the decode size, and
194 from that the extent. */
195 size = (size & ~(size-1)) - 1;
197 /* base == maxbase can be valid only if the BAR has
198 already been programmed with all 1s. */
199 if (base == maxbase && ((base | size) & mask) != mask)
200 return 0;
202 return size;
205 static inline int is_64bit_memory(u32 mask)
207 if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
208 (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64))
209 return 1;
210 return 0;
213 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
215 unsigned int pos, reg, next;
216 u32 l, sz;
217 struct resource *res;
219 for(pos=0; pos<howmany; pos = next) {
220 u64 l64;
221 u64 sz64;
222 u32 raw_sz;
224 next = pos+1;
225 res = &dev->resource[pos];
226 res->name = pci_name(dev);
227 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
228 pci_read_config_dword(dev, reg, &l);
229 pci_write_config_dword(dev, reg, ~0);
230 pci_read_config_dword(dev, reg, &sz);
231 pci_write_config_dword(dev, reg, l);
232 if (!sz || sz == 0xffffffff)
233 continue;
234 if (l == 0xffffffff)
235 l = 0;
236 raw_sz = sz;
237 if ((l & PCI_BASE_ADDRESS_SPACE) ==
238 PCI_BASE_ADDRESS_SPACE_MEMORY) {
239 sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK);
241 * For 64bit prefetchable memory sz could be 0, if the
242 * real size is bigger than 4G, so we need to check
243 * szhi for that.
245 if (!is_64bit_memory(l) && !sz)
246 continue;
247 res->start = l & PCI_BASE_ADDRESS_MEM_MASK;
248 res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK;
249 } else {
250 sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff);
251 if (!sz)
252 continue;
253 res->start = l & PCI_BASE_ADDRESS_IO_MASK;
254 res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK;
256 res->end = res->start + (unsigned long) sz;
257 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
258 if (is_64bit_memory(l)) {
259 u32 szhi, lhi;
261 pci_read_config_dword(dev, reg+4, &lhi);
262 pci_write_config_dword(dev, reg+4, ~0);
263 pci_read_config_dword(dev, reg+4, &szhi);
264 pci_write_config_dword(dev, reg+4, lhi);
265 sz64 = ((u64)szhi << 32) | raw_sz;
266 l64 = ((u64)lhi << 32) | l;
267 sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK);
268 next++;
269 #if BITS_PER_LONG == 64
270 if (!sz64) {
271 res->start = 0;
272 res->end = 0;
273 res->flags = 0;
274 continue;
276 res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK;
277 res->end = res->start + sz64;
278 #else
279 if (sz64 > 0x100000000ULL) {
280 printk(KERN_ERR "PCI: Unable to handle 64-bit "
281 "BAR for device %s\n", pci_name(dev));
282 res->start = 0;
283 res->flags = 0;
284 } else if (lhi) {
285 /* 64-bit wide address, treat as disabled */
286 pci_write_config_dword(dev, reg,
287 l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK);
288 pci_write_config_dword(dev, reg+4, 0);
289 res->start = 0;
290 res->end = sz;
292 #endif
295 if (rom) {
296 dev->rom_base_reg = rom;
297 res = &dev->resource[PCI_ROM_RESOURCE];
298 res->name = pci_name(dev);
299 pci_read_config_dword(dev, rom, &l);
300 pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE);
301 pci_read_config_dword(dev, rom, &sz);
302 pci_write_config_dword(dev, rom, l);
303 if (l == 0xffffffff)
304 l = 0;
305 if (sz && sz != 0xffffffff) {
306 sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK);
307 if (sz) {
308 res->flags = (l & IORESOURCE_ROM_ENABLE) |
309 IORESOURCE_MEM | IORESOURCE_PREFETCH |
310 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
311 IORESOURCE_SIZEALIGN;
312 res->start = l & PCI_ROM_ADDRESS_MASK;
313 res->end = res->start + (unsigned long) sz;
319 void __devinit pci_read_bridge_bases(struct pci_bus *child)
321 struct pci_dev *dev = child->self;
322 u8 io_base_lo, io_limit_lo;
323 u16 mem_base_lo, mem_limit_lo;
324 unsigned long base, limit;
325 struct resource *res;
326 int i;
328 if (!dev) /* It's a host bus, nothing to read */
329 return;
331 if (dev->transparent) {
332 printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev));
333 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
334 child->resource[i] = child->parent->resource[i - 3];
337 for(i=0; i<3; i++)
338 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
340 res = child->resource[0];
341 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
342 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
343 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
344 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
346 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
347 u16 io_base_hi, io_limit_hi;
348 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
349 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
350 base |= (io_base_hi << 16);
351 limit |= (io_limit_hi << 16);
354 if (base <= limit) {
355 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
356 if (!res->start)
357 res->start = base;
358 if (!res->end)
359 res->end = limit + 0xfff;
362 res = child->resource[1];
363 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
364 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
365 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
366 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
367 if (base <= limit) {
368 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
369 res->start = base;
370 res->end = limit + 0xfffff;
373 res = child->resource[2];
374 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
375 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
376 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
377 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
379 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
380 u32 mem_base_hi, mem_limit_hi;
381 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
382 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
385 * Some bridges set the base > limit by default, and some
386 * (broken) BIOSes do not initialize them. If we find
387 * this, just assume they are not being used.
389 if (mem_base_hi <= mem_limit_hi) {
390 #if BITS_PER_LONG == 64
391 base |= ((long) mem_base_hi) << 32;
392 limit |= ((long) mem_limit_hi) << 32;
393 #else
394 if (mem_base_hi || mem_limit_hi) {
395 printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev));
396 return;
398 #endif
401 if (base <= limit) {
402 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
403 res->start = base;
404 res->end = limit + 0xfffff;
408 static struct pci_bus * pci_alloc_bus(void)
410 struct pci_bus *b;
412 b = kzalloc(sizeof(*b), GFP_KERNEL);
413 if (b) {
414 INIT_LIST_HEAD(&b->node);
415 INIT_LIST_HEAD(&b->children);
416 INIT_LIST_HEAD(&b->devices);
418 return b;
421 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
422 struct pci_dev *bridge, int busnr)
424 struct pci_bus *child;
425 int i;
428 * Allocate a new bus, and inherit stuff from the parent..
430 child = pci_alloc_bus();
431 if (!child)
432 return NULL;
434 child->self = bridge;
435 child->parent = parent;
436 child->ops = parent->ops;
437 child->sysdata = parent->sysdata;
438 child->bus_flags = parent->bus_flags;
439 child->bridge = get_device(&bridge->dev);
441 /* initialize some portions of the bus device, but don't register it
442 * now as the parent is not properly set up yet. This device will get
443 * registered later in pci_bus_add_devices()
445 child->dev.class = &pcibus_class;
446 sprintf(child->dev.bus_id, "%04x:%02x", pci_domain_nr(child), busnr);
449 * Set up the primary, secondary and subordinate
450 * bus numbers.
452 child->number = child->secondary = busnr;
453 child->primary = parent->secondary;
454 child->subordinate = 0xff;
456 /* Set up default resource pointers and names.. */
457 for (i = 0; i < 4; i++) {
458 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
459 child->resource[i]->name = child->name;
461 bridge->subordinate = child;
463 return child;
466 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
468 struct pci_bus *child;
470 child = pci_alloc_child_bus(parent, dev, busnr);
471 if (child) {
472 down_write(&pci_bus_sem);
473 list_add_tail(&child->node, &parent->children);
474 up_write(&pci_bus_sem);
476 return child;
479 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
481 struct pci_bus *parent = child->parent;
483 /* Attempts to fix that up are really dangerous unless
484 we're going to re-assign all bus numbers. */
485 if (!pcibios_assign_all_busses())
486 return;
488 while (parent->parent && parent->subordinate < max) {
489 parent->subordinate = max;
490 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
491 parent = parent->parent;
496 * If it's a bridge, configure it and scan the bus behind it.
497 * For CardBus bridges, we don't scan behind as the devices will
498 * be handled by the bridge driver itself.
500 * We need to process bridges in two passes -- first we scan those
501 * already configured by the BIOS and after we are done with all of
502 * them, we proceed to assigning numbers to the remaining buses in
503 * order to avoid overlaps between old and new bus numbers.
505 int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
507 struct pci_bus *child;
508 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
509 u32 buses, i, j = 0;
510 u16 bctl;
512 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
514 pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n",
515 pci_name(dev), buses & 0xffffff, pass);
517 /* Disable MasterAbortMode during probing to avoid reporting
518 of bus errors (in some architectures) */
519 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
520 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
521 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
523 if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) {
524 unsigned int cmax, busnr;
526 * Bus already configured by firmware, process it in the first
527 * pass and just note the configuration.
529 if (pass)
530 goto out;
531 busnr = (buses >> 8) & 0xFF;
534 * If we already got to this bus through a different bridge,
535 * ignore it. This can happen with the i450NX chipset.
537 if (pci_find_bus(pci_domain_nr(bus), busnr)) {
538 printk(KERN_INFO "PCI: Bus %04x:%02x already known\n",
539 pci_domain_nr(bus), busnr);
540 goto out;
543 child = pci_add_new_bus(bus, dev, busnr);
544 if (!child)
545 goto out;
546 child->primary = buses & 0xFF;
547 child->subordinate = (buses >> 16) & 0xFF;
548 child->bridge_ctl = bctl;
550 cmax = pci_scan_child_bus(child);
551 if (cmax > max)
552 max = cmax;
553 if (child->subordinate > max)
554 max = child->subordinate;
555 } else {
557 * We need to assign a number to this bus which we always
558 * do in the second pass.
560 if (!pass) {
561 if (pcibios_assign_all_busses())
562 /* Temporarily disable forwarding of the
563 configuration cycles on all bridges in
564 this bus segment to avoid possible
565 conflicts in the second pass between two
566 bridges programmed with overlapping
567 bus ranges. */
568 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
569 buses & ~0xffffff);
570 goto out;
573 /* Clear errors */
574 pci_write_config_word(dev, PCI_STATUS, 0xffff);
576 /* Prevent assigning a bus number that already exists.
577 * This can happen when a bridge is hot-plugged */
578 if (pci_find_bus(pci_domain_nr(bus), max+1))
579 goto out;
580 child = pci_add_new_bus(bus, dev, ++max);
581 buses = (buses & 0xff000000)
582 | ((unsigned int)(child->primary) << 0)
583 | ((unsigned int)(child->secondary) << 8)
584 | ((unsigned int)(child->subordinate) << 16);
587 * yenta.c forces a secondary latency timer of 176.
588 * Copy that behaviour here.
590 if (is_cardbus) {
591 buses &= ~0xff000000;
592 buses |= CARDBUS_LATENCY_TIMER << 24;
596 * We need to blast all three values with a single write.
598 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
600 if (!is_cardbus) {
601 child->bridge_ctl = bctl;
603 * Adjust subordinate busnr in parent buses.
604 * We do this before scanning for children because
605 * some devices may not be detected if the bios
606 * was lazy.
608 pci_fixup_parent_subordinate_busnr(child, max);
609 /* Now we can scan all subordinate buses... */
610 max = pci_scan_child_bus(child);
612 * now fix it up again since we have found
613 * the real value of max.
615 pci_fixup_parent_subordinate_busnr(child, max);
616 } else {
618 * For CardBus bridges, we leave 4 bus numbers
619 * as cards with a PCI-to-PCI bridge can be
620 * inserted later.
622 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
623 struct pci_bus *parent = bus;
624 if (pci_find_bus(pci_domain_nr(bus),
625 max+i+1))
626 break;
627 while (parent->parent) {
628 if ((!pcibios_assign_all_busses()) &&
629 (parent->subordinate > max) &&
630 (parent->subordinate <= max+i)) {
631 j = 1;
633 parent = parent->parent;
635 if (j) {
637 * Often, there are two cardbus bridges
638 * -- try to leave one valid bus number
639 * for each one.
641 i /= 2;
642 break;
645 max += i;
646 pci_fixup_parent_subordinate_busnr(child, max);
649 * Set the subordinate bus number to its real value.
651 child->subordinate = max;
652 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
655 sprintf(child->name,
656 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
657 pci_domain_nr(bus), child->number);
659 /* Has only triggered on CardBus, fixup is in yenta_socket */
660 while (bus->parent) {
661 if ((child->subordinate > bus->subordinate) ||
662 (child->number > bus->subordinate) ||
663 (child->number < bus->number) ||
664 (child->subordinate < bus->number)) {
665 pr_debug("PCI: Bus #%02x (-#%02x) is %s "
666 "hidden behind%s bridge #%02x (-#%02x)\n",
667 child->number, child->subordinate,
668 (bus->number > child->subordinate &&
669 bus->subordinate < child->number) ?
670 "wholly" : "partially",
671 bus->self->transparent ? " transparent" : "",
672 bus->number, bus->subordinate);
674 bus = bus->parent;
677 out:
678 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
680 return max;
684 * Read interrupt line and base address registers.
685 * The architecture-dependent code can tweak these, of course.
687 static void pci_read_irq(struct pci_dev *dev)
689 unsigned char irq;
691 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
692 dev->pin = irq;
693 if (irq)
694 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
695 dev->irq = irq;
698 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
701 * pci_setup_device - fill in class and map information of a device
702 * @dev: the device structure to fill
704 * Initialize the device structure with information about the device's
705 * vendor,class,memory and IO-space addresses,IRQ lines etc.
706 * Called at initialisation of the PCI subsystem and by CardBus services.
707 * Returns 0 on success and -1 if unknown type of device (not normal, bridge
708 * or CardBus).
710 static int pci_setup_device(struct pci_dev * dev)
712 u32 class;
714 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
715 dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
717 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
718 dev->revision = class & 0xff;
719 class >>= 8; /* upper 3 bytes */
720 dev->class = class;
721 class >>= 8;
723 pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev),
724 dev->vendor, dev->device, class, dev->hdr_type);
726 /* "Unknown power state" */
727 dev->current_state = PCI_UNKNOWN;
729 /* Early fixups, before probing the BARs */
730 pci_fixup_device(pci_fixup_early, dev);
731 class = dev->class >> 8;
733 switch (dev->hdr_type) { /* header type */
734 case PCI_HEADER_TYPE_NORMAL: /* standard header */
735 if (class == PCI_CLASS_BRIDGE_PCI)
736 goto bad;
737 pci_read_irq(dev);
738 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
739 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
740 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
743 * Do the ugly legacy mode stuff here rather than broken chip
744 * quirk code. Legacy mode ATA controllers have fixed
745 * addresses. These are not always echoed in BAR0-3, and
746 * BAR0-3 in a few cases contain junk!
748 if (class == PCI_CLASS_STORAGE_IDE) {
749 u8 progif;
750 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
751 if ((progif & 1) == 0) {
752 dev->resource[0].start = 0x1F0;
753 dev->resource[0].end = 0x1F7;
754 dev->resource[0].flags = LEGACY_IO_RESOURCE;
755 dev->resource[1].start = 0x3F6;
756 dev->resource[1].end = 0x3F6;
757 dev->resource[1].flags = LEGACY_IO_RESOURCE;
759 if ((progif & 4) == 0) {
760 dev->resource[2].start = 0x170;
761 dev->resource[2].end = 0x177;
762 dev->resource[2].flags = LEGACY_IO_RESOURCE;
763 dev->resource[3].start = 0x376;
764 dev->resource[3].end = 0x376;
765 dev->resource[3].flags = LEGACY_IO_RESOURCE;
768 break;
770 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
771 if (class != PCI_CLASS_BRIDGE_PCI)
772 goto bad;
773 /* The PCI-to-PCI bridge spec requires that subtractive
774 decoding (i.e. transparent) bridge must have programming
775 interface code of 0x01. */
776 pci_read_irq(dev);
777 dev->transparent = ((dev->class & 0xff) == 1);
778 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
779 break;
781 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
782 if (class != PCI_CLASS_BRIDGE_CARDBUS)
783 goto bad;
784 pci_read_irq(dev);
785 pci_read_bases(dev, 1, 0);
786 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
787 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
788 break;
790 default: /* unknown header */
791 printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n",
792 pci_name(dev), dev->hdr_type);
793 return -1;
795 bad:
796 printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n",
797 pci_name(dev), class, dev->hdr_type);
798 dev->class = PCI_CLASS_NOT_DEFINED;
801 /* We found a fine healthy device, go go go... */
802 return 0;
806 * pci_release_dev - free a pci device structure when all users of it are finished.
807 * @dev: device that's been disconnected
809 * Will be called only by the device core when all users of this pci device are
810 * done.
812 static void pci_release_dev(struct device *dev)
814 struct pci_dev *pci_dev;
816 pci_dev = to_pci_dev(dev);
817 pci_vpd_release(pci_dev);
818 kfree(pci_dev);
821 static void set_pcie_port_type(struct pci_dev *pdev)
823 int pos;
824 u16 reg16;
826 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
827 if (!pos)
828 return;
829 pdev->is_pcie = 1;
830 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
831 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
835 * pci_cfg_space_size - get the configuration space size of the PCI device.
836 * @dev: PCI device
838 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
839 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
840 * access it. Maybe we don't have a way to generate extended config space
841 * accesses, or the device is behind a reverse Express bridge. So we try
842 * reading the dword at 0x100 which must either be 0 or a valid extended
843 * capability header.
845 int pci_cfg_space_size_ext(struct pci_dev *dev)
847 u32 status;
849 if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL)
850 goto fail;
851 if (status == 0xffffffff)
852 goto fail;
854 return PCI_CFG_SPACE_EXP_SIZE;
856 fail:
857 return PCI_CFG_SPACE_SIZE;
860 int pci_cfg_space_size(struct pci_dev *dev)
862 int pos;
863 u32 status;
865 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
866 if (!pos) {
867 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
868 if (!pos)
869 goto fail;
871 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
872 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
873 goto fail;
876 return pci_cfg_space_size_ext(dev);
878 fail:
879 return PCI_CFG_SPACE_SIZE;
882 static void pci_release_bus_bridge_dev(struct device *dev)
884 kfree(dev);
887 struct pci_dev *alloc_pci_dev(void)
889 struct pci_dev *dev;
891 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
892 if (!dev)
893 return NULL;
895 INIT_LIST_HEAD(&dev->bus_list);
897 pci_msi_init_pci_dev(dev);
899 return dev;
901 EXPORT_SYMBOL(alloc_pci_dev);
904 * Read the config data for a PCI device, sanity-check it
905 * and fill in the dev structure...
907 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
909 struct pci_dev *dev;
910 u32 l;
911 u8 hdr_type;
912 int delay = 1;
914 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
915 return NULL;
917 /* some broken boards return 0 or ~0 if a slot is empty: */
918 if (l == 0xffffffff || l == 0x00000000 ||
919 l == 0x0000ffff || l == 0xffff0000)
920 return NULL;
922 /* Configuration request Retry Status */
923 while (l == 0xffff0001) {
924 msleep(delay);
925 delay *= 2;
926 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
927 return NULL;
928 /* Card hasn't responded in 60 seconds? Must be stuck. */
929 if (delay > 60 * 1000) {
930 printk(KERN_WARNING "Device %04x:%02x:%02x.%d not "
931 "responding\n", pci_domain_nr(bus),
932 bus->number, PCI_SLOT(devfn),
933 PCI_FUNC(devfn));
934 return NULL;
938 if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
939 return NULL;
941 dev = alloc_pci_dev();
942 if (!dev)
943 return NULL;
945 dev->bus = bus;
946 dev->sysdata = bus->sysdata;
947 dev->dev.parent = bus->bridge;
948 dev->dev.bus = &pci_bus_type;
949 dev->devfn = devfn;
950 dev->hdr_type = hdr_type & 0x7f;
951 dev->multifunction = !!(hdr_type & 0x80);
952 dev->vendor = l & 0xffff;
953 dev->device = (l >> 16) & 0xffff;
954 dev->cfg_size = pci_cfg_space_size(dev);
955 dev->error_state = pci_channel_io_normal;
956 set_pcie_port_type(dev);
958 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
959 set this higher, assuming the system even supports it. */
960 dev->dma_mask = 0xffffffff;
961 if (pci_setup_device(dev) < 0) {
962 kfree(dev);
963 return NULL;
966 pci_vpd_pci22_init(dev);
968 return dev;
971 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
973 device_initialize(&dev->dev);
974 dev->dev.release = pci_release_dev;
975 pci_dev_get(dev);
977 dev->dev.dma_mask = &dev->dma_mask;
978 dev->dev.dma_parms = &dev->dma_parms;
979 dev->dev.coherent_dma_mask = 0xffffffffull;
981 pci_set_dma_max_seg_size(dev, 65536);
982 pci_set_dma_seg_boundary(dev, 0xffffffff);
984 /* Fix up broken headers */
985 pci_fixup_device(pci_fixup_header, dev);
988 * Add the device to our list of discovered devices
989 * and the bus list for fixup functions, etc.
991 down_write(&pci_bus_sem);
992 list_add_tail(&dev->bus_list, &bus->devices);
993 up_write(&pci_bus_sem);
996 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
998 struct pci_dev *dev;
1000 dev = pci_scan_device(bus, devfn);
1001 if (!dev)
1002 return NULL;
1004 pci_device_add(dev, bus);
1006 return dev;
1008 EXPORT_SYMBOL(pci_scan_single_device);
1011 * pci_scan_slot - scan a PCI slot on a bus for devices.
1012 * @bus: PCI bus to scan
1013 * @devfn: slot number to scan (must have zero function.)
1015 * Scan a PCI slot on the specified PCI bus for devices, adding
1016 * discovered devices to the @bus->devices list. New devices
1017 * will not have is_added set.
1019 int pci_scan_slot(struct pci_bus *bus, int devfn)
1021 int func, nr = 0;
1022 int scan_all_fns;
1024 scan_all_fns = pcibios_scan_all_fns(bus, devfn);
1026 for (func = 0; func < 8; func++, devfn++) {
1027 struct pci_dev *dev;
1029 dev = pci_scan_single_device(bus, devfn);
1030 if (dev) {
1031 nr++;
1034 * If this is a single function device,
1035 * don't scan past the first function.
1037 if (!dev->multifunction) {
1038 if (func > 0) {
1039 dev->multifunction = 1;
1040 } else {
1041 break;
1044 } else {
1045 if (func == 0 && !scan_all_fns)
1046 break;
1050 if (bus->self)
1051 pcie_aspm_init_link_state(bus->self);
1053 return nr;
1056 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1058 unsigned int devfn, pass, max = bus->secondary;
1059 struct pci_dev *dev;
1061 pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
1063 /* Go find them, Rover! */
1064 for (devfn = 0; devfn < 0x100; devfn += 8)
1065 pci_scan_slot(bus, devfn);
1068 * After performing arch-dependent fixup of the bus, look behind
1069 * all PCI-to-PCI bridges on this bus.
1071 pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
1072 pcibios_fixup_bus(bus);
1073 for (pass=0; pass < 2; pass++)
1074 list_for_each_entry(dev, &bus->devices, bus_list) {
1075 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1076 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1077 max = pci_scan_bridge(bus, dev, max, pass);
1081 * We've scanned the bus and so we know all about what's on
1082 * the other side of any bridges that may be on this bus plus
1083 * any devices.
1085 * Return how far we've got finding sub-buses.
1087 pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n",
1088 pci_domain_nr(bus), bus->number, max);
1089 return max;
1092 void __attribute__((weak)) set_pci_bus_resources_arch_default(struct pci_bus *b)
1096 struct pci_bus * pci_create_bus(struct device *parent,
1097 int bus, struct pci_ops *ops, void *sysdata)
1099 int error;
1100 struct pci_bus *b;
1101 struct device *dev;
1103 b = pci_alloc_bus();
1104 if (!b)
1105 return NULL;
1107 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
1108 if (!dev){
1109 kfree(b);
1110 return NULL;
1113 b->sysdata = sysdata;
1114 b->ops = ops;
1116 if (pci_find_bus(pci_domain_nr(b), bus)) {
1117 /* If we already got to this bus through a different bridge, ignore it */
1118 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
1119 goto err_out;
1122 down_write(&pci_bus_sem);
1123 list_add_tail(&b->node, &pci_root_buses);
1124 up_write(&pci_bus_sem);
1126 memset(dev, 0, sizeof(*dev));
1127 dev->parent = parent;
1128 dev->release = pci_release_bus_bridge_dev;
1129 sprintf(dev->bus_id, "pci%04x:%02x", pci_domain_nr(b), bus);
1130 error = device_register(dev);
1131 if (error)
1132 goto dev_reg_err;
1133 b->bridge = get_device(dev);
1135 if (!parent)
1136 set_dev_node(b->bridge, pcibus_to_node(b));
1138 b->dev.class = &pcibus_class;
1139 b->dev.parent = b->bridge;
1140 sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus);
1141 error = device_register(&b->dev);
1142 if (error)
1143 goto class_dev_reg_err;
1144 error = device_create_file(&b->dev, &dev_attr_cpuaffinity);
1145 if (error)
1146 goto dev_create_file_err;
1148 /* Create legacy_io and legacy_mem files for this bus */
1149 pci_create_legacy_files(b);
1151 b->number = b->secondary = bus;
1152 b->resource[0] = &ioport_resource;
1153 b->resource[1] = &iomem_resource;
1155 set_pci_bus_resources_arch_default(b);
1157 return b;
1159 dev_create_file_err:
1160 device_unregister(&b->dev);
1161 class_dev_reg_err:
1162 device_unregister(dev);
1163 dev_reg_err:
1164 down_write(&pci_bus_sem);
1165 list_del(&b->node);
1166 up_write(&pci_bus_sem);
1167 err_out:
1168 kfree(dev);
1169 kfree(b);
1170 return NULL;
1173 struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1174 int bus, struct pci_ops *ops, void *sysdata)
1176 struct pci_bus *b;
1178 b = pci_create_bus(parent, bus, ops, sysdata);
1179 if (b)
1180 b->subordinate = pci_scan_child_bus(b);
1181 return b;
1183 EXPORT_SYMBOL(pci_scan_bus_parented);
1185 #ifdef CONFIG_HOTPLUG
1186 EXPORT_SYMBOL(pci_add_new_bus);
1187 EXPORT_SYMBOL(pci_scan_slot);
1188 EXPORT_SYMBOL(pci_scan_bridge);
1189 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1190 #endif
1192 static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b)
1194 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1195 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1197 if (a->bus->number < b->bus->number) return -1;
1198 else if (a->bus->number > b->bus->number) return 1;
1200 if (a->devfn < b->devfn) return -1;
1201 else if (a->devfn > b->devfn) return 1;
1203 return 0;
1207 * Yes, this forcably breaks the klist abstraction temporarily. It
1208 * just wants to sort the klist, not change reference counts and
1209 * take/drop locks rapidly in the process. It does all this while
1210 * holding the lock for the list, so objects can't otherwise be
1211 * added/removed while we're swizzling.
1213 static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list)
1215 struct list_head *pos;
1216 struct klist_node *n;
1217 struct device *dev;
1218 struct pci_dev *b;
1220 list_for_each(pos, list) {
1221 n = container_of(pos, struct klist_node, n_node);
1222 dev = container_of(n, struct device, knode_bus);
1223 b = to_pci_dev(dev);
1224 if (pci_sort_bf_cmp(a, b) <= 0) {
1225 list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node);
1226 return;
1229 list_move_tail(&a->dev.knode_bus.n_node, list);
1232 void __init pci_sort_breadthfirst(void)
1234 LIST_HEAD(sorted_devices);
1235 struct list_head *pos, *tmp;
1236 struct klist_node *n;
1237 struct device *dev;
1238 struct pci_dev *pdev;
1239 struct klist *device_klist;
1241 device_klist = bus_get_device_klist(&pci_bus_type);
1243 spin_lock(&device_klist->k_lock);
1244 list_for_each_safe(pos, tmp, &device_klist->k_list) {
1245 n = container_of(pos, struct klist_node, n_node);
1246 dev = container_of(n, struct device, knode_bus);
1247 pdev = to_pci_dev(dev);
1248 pci_insertion_sort_klist(pdev, &sorted_devices);
1250 list_splice(&sorted_devices, &device_klist->k_list);
1251 spin_unlock(&device_klist->k_lock);