2 * drivers/pci/setup-bus.c
4 * Extruded from code written by
5 * Dave Rusling (david.rusling@reo.mts.dec.com)
6 * David Mosberger (davidm@cs.arizona.edu)
7 * David Miller (davem@redhat.com)
9 * Support routines for initializing a PCI subsystem.
13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14 * PCI-PCI bridges cleanup, sorted resource allocation.
15 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16 * Converted to allocation in 3 passes, which gives
17 * tighter packing. Prefetchable range support.
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
30 struct resource_list_x
{
31 struct resource_list_x
*next
;
34 resource_size_t start
;
39 static void add_to_failed_list(struct resource_list_x
*head
,
40 struct pci_dev
*dev
, struct resource
*res
)
42 struct resource_list_x
*list
= head
;
43 struct resource_list_x
*ln
= list
->next
;
44 struct resource_list_x
*tmp
;
46 tmp
= kmalloc(sizeof(*tmp
), GFP_KERNEL
);
48 pr_warning("add_to_failed_list: kmalloc() failed!\n");
55 tmp
->start
= res
->start
;
57 tmp
->flags
= res
->flags
;
61 static void free_failed_list(struct resource_list_x
*head
)
63 struct resource_list_x
*list
, *tmp
;
65 for (list
= head
->next
; list
;) {
74 static void __dev_sort_resources(struct pci_dev
*dev
,
75 struct resource_list
*head
)
77 u16
class = dev
->class >> 8;
79 /* Don't touch classless devices or host bridges or ioapics. */
80 if (class == PCI_CLASS_NOT_DEFINED
|| class == PCI_CLASS_BRIDGE_HOST
)
83 /* Don't touch ioapic devices already enabled by firmware */
84 if (class == PCI_CLASS_SYSTEM_PIC
) {
86 pci_read_config_word(dev
, PCI_COMMAND
, &command
);
87 if (command
& (PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
))
91 pdev_sort_resources(dev
, head
);
94 static void __assign_resources_sorted(struct resource_list
*head
,
95 struct resource_list_x
*fail_head
)
98 struct resource_list
*list
, *tmp
;
101 for (list
= head
->next
; list
;) {
103 idx
= res
- &list
->dev
->resource
[0];
104 if (pci_assign_resource(list
->dev
, idx
)) {
105 if (fail_head
&& !pci_is_root_bus(list
->dev
->bus
))
106 add_to_failed_list(fail_head
, list
->dev
, res
);
117 static void pdev_assign_resources_sorted(struct pci_dev
*dev
,
118 struct resource_list_x
*fail_head
)
120 struct resource_list head
;
123 __dev_sort_resources(dev
, &head
);
124 __assign_resources_sorted(&head
, fail_head
);
128 static void pbus_assign_resources_sorted(const struct pci_bus
*bus
,
129 struct resource_list_x
*fail_head
)
132 struct resource_list head
;
135 list_for_each_entry(dev
, &bus
->devices
, bus_list
)
136 __dev_sort_resources(dev
, &head
);
138 __assign_resources_sorted(&head
, fail_head
);
141 void pci_setup_cardbus(struct pci_bus
*bus
)
143 struct pci_dev
*bridge
= bus
->self
;
144 struct resource
*res
;
145 struct pci_bus_region region
;
147 dev_info(&bridge
->dev
, "CardBus bridge to [bus %02x-%02x]\n",
148 bus
->secondary
, bus
->subordinate
);
150 res
= bus
->resource
[0];
151 pcibios_resource_to_bus(bridge
, ®ion
, res
);
152 if (res
->flags
& IORESOURCE_IO
) {
154 * The IO resource is allocated a range twice as large as it
155 * would normally need. This allows us to set both IO regs.
157 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
158 pci_write_config_dword(bridge
, PCI_CB_IO_BASE_0
,
160 pci_write_config_dword(bridge
, PCI_CB_IO_LIMIT_0
,
164 res
= bus
->resource
[1];
165 pcibios_resource_to_bus(bridge
, ®ion
, res
);
166 if (res
->flags
& IORESOURCE_IO
) {
167 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
168 pci_write_config_dword(bridge
, PCI_CB_IO_BASE_1
,
170 pci_write_config_dword(bridge
, PCI_CB_IO_LIMIT_1
,
174 res
= bus
->resource
[2];
175 pcibios_resource_to_bus(bridge
, ®ion
, res
);
176 if (res
->flags
& IORESOURCE_MEM
) {
177 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
178 pci_write_config_dword(bridge
, PCI_CB_MEMORY_BASE_0
,
180 pci_write_config_dword(bridge
, PCI_CB_MEMORY_LIMIT_0
,
184 res
= bus
->resource
[3];
185 pcibios_resource_to_bus(bridge
, ®ion
, res
);
186 if (res
->flags
& IORESOURCE_MEM
) {
187 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
188 pci_write_config_dword(bridge
, PCI_CB_MEMORY_BASE_1
,
190 pci_write_config_dword(bridge
, PCI_CB_MEMORY_LIMIT_1
,
194 EXPORT_SYMBOL(pci_setup_cardbus
);
196 /* Initialize bridges with base/limit values we have collected.
197 PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
198 requires that if there is no I/O ports or memory behind the
199 bridge, corresponding range must be turned off by writing base
200 value greater than limit to the bridge's base/limit registers.
202 Note: care must be taken when updating I/O base/limit registers
203 of bridges which support 32-bit I/O. This update requires two
204 config space writes, so it's quite possible that an I/O window of
205 the bridge will have some undesirable address (e.g. 0) after the
206 first write. Ditto 64-bit prefetchable MMIO. */
207 static void pci_setup_bridge_io(struct pci_bus
*bus
)
209 struct pci_dev
*bridge
= bus
->self
;
210 struct resource
*res
;
211 struct pci_bus_region region
;
214 /* Set up the top and bottom of the PCI I/O segment for this bus. */
215 res
= bus
->resource
[0];
216 pcibios_resource_to_bus(bridge
, ®ion
, res
);
217 if (res
->flags
& IORESOURCE_IO
) {
218 pci_read_config_dword(bridge
, PCI_IO_BASE
, &l
);
220 l
|= (region
.start
>> 8) & 0x00f0;
221 l
|= region
.end
& 0xf000;
222 /* Set up upper 16 bits of I/O base/limit. */
223 io_upper16
= (region
.end
& 0xffff0000) | (region
.start
>> 16);
224 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
226 /* Clear upper 16 bits of I/O base/limit. */
229 dev_info(&bridge
->dev
, " bridge window [io disabled]\n");
231 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
232 pci_write_config_dword(bridge
, PCI_IO_BASE_UPPER16
, 0x0000ffff);
233 /* Update lower 16 bits of I/O base/limit. */
234 pci_write_config_dword(bridge
, PCI_IO_BASE
, l
);
235 /* Update upper 16 bits of I/O base/limit. */
236 pci_write_config_dword(bridge
, PCI_IO_BASE_UPPER16
, io_upper16
);
239 static void pci_setup_bridge_mmio(struct pci_bus
*bus
)
241 struct pci_dev
*bridge
= bus
->self
;
242 struct resource
*res
;
243 struct pci_bus_region region
;
246 /* Set up the top and bottom of the PCI Memory segment for this bus. */
247 res
= bus
->resource
[1];
248 pcibios_resource_to_bus(bridge
, ®ion
, res
);
249 if (res
->flags
& IORESOURCE_MEM
) {
250 l
= (region
.start
>> 16) & 0xfff0;
251 l
|= region
.end
& 0xfff00000;
252 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
255 dev_info(&bridge
->dev
, " bridge window [mem disabled]\n");
257 pci_write_config_dword(bridge
, PCI_MEMORY_BASE
, l
);
260 static void pci_setup_bridge_mmio_pref(struct pci_bus
*bus
)
262 struct pci_dev
*bridge
= bus
->self
;
263 struct resource
*res
;
264 struct pci_bus_region region
;
267 /* Clear out the upper 32 bits of PREF limit.
268 If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
269 disables PREF range, which is ok. */
270 pci_write_config_dword(bridge
, PCI_PREF_LIMIT_UPPER32
, 0);
272 /* Set up PREF base/limit. */
274 res
= bus
->resource
[2];
275 pcibios_resource_to_bus(bridge
, ®ion
, res
);
276 if (res
->flags
& IORESOURCE_PREFETCH
) {
277 l
= (region
.start
>> 16) & 0xfff0;
278 l
|= region
.end
& 0xfff00000;
279 if (res
->flags
& IORESOURCE_MEM_64
) {
280 bu
= upper_32_bits(region
.start
);
281 lu
= upper_32_bits(region
.end
);
283 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
286 dev_info(&bridge
->dev
, " bridge window [mem pref disabled]\n");
288 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, l
);
290 /* Set the upper 32 bits of PREF base & limit. */
291 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
, bu
);
292 pci_write_config_dword(bridge
, PCI_PREF_LIMIT_UPPER32
, lu
);
295 static void __pci_setup_bridge(struct pci_bus
*bus
, unsigned long type
)
297 struct pci_dev
*bridge
= bus
->self
;
299 dev_info(&bridge
->dev
, "PCI bridge to [bus %02x-%02x]\n",
300 bus
->secondary
, bus
->subordinate
);
302 if (type
& IORESOURCE_IO
)
303 pci_setup_bridge_io(bus
);
305 if (type
& IORESOURCE_MEM
)
306 pci_setup_bridge_mmio(bus
);
308 if (type
& IORESOURCE_PREFETCH
)
309 pci_setup_bridge_mmio_pref(bus
);
311 pci_write_config_word(bridge
, PCI_BRIDGE_CONTROL
, bus
->bridge_ctl
);
314 static void pci_setup_bridge(struct pci_bus
*bus
)
316 unsigned long type
= IORESOURCE_IO
| IORESOURCE_MEM
|
319 __pci_setup_bridge(bus
, type
);
322 /* Check whether the bridge supports optional I/O and
323 prefetchable memory ranges. If not, the respective
324 base/limit registers must be read-only and read as 0. */
325 static void pci_bridge_check_ranges(struct pci_bus
*bus
)
329 struct pci_dev
*bridge
= bus
->self
;
330 struct resource
*b_res
;
332 b_res
= &bridge
->resource
[PCI_BRIDGE_RESOURCES
];
333 b_res
[1].flags
|= IORESOURCE_MEM
;
335 pci_read_config_word(bridge
, PCI_IO_BASE
, &io
);
337 pci_write_config_word(bridge
, PCI_IO_BASE
, 0xf0f0);
338 pci_read_config_word(bridge
, PCI_IO_BASE
, &io
);
339 pci_write_config_word(bridge
, PCI_IO_BASE
, 0x0);
342 b_res
[0].flags
|= IORESOURCE_IO
;
343 /* DECchip 21050 pass 2 errata: the bridge may miss an address
344 disconnect boundary by one PCI data phase.
345 Workaround: do not use prefetching on this device. */
346 if (bridge
->vendor
== PCI_VENDOR_ID_DEC
&& bridge
->device
== 0x0001)
348 pci_read_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, &pmem
);
350 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
,
352 pci_read_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, &pmem
);
353 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, 0x0);
356 b_res
[2].flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
357 if ((pmem
& PCI_PREF_RANGE_TYPE_MASK
) ==
358 PCI_PREF_RANGE_TYPE_64
) {
359 b_res
[2].flags
|= IORESOURCE_MEM_64
;
360 b_res
[2].flags
|= PCI_PREF_RANGE_TYPE_64
;
364 /* double check if bridge does support 64 bit pref */
365 if (b_res
[2].flags
& IORESOURCE_MEM_64
) {
366 u32 mem_base_hi
, tmp
;
367 pci_read_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
369 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
371 pci_read_config_dword(bridge
, PCI_PREF_BASE_UPPER32
, &tmp
);
373 b_res
[2].flags
&= ~IORESOURCE_MEM_64
;
374 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
379 /* Helper function for sizing routines: find first available
380 bus resource of a given type. Note: we intentionally skip
381 the bus resources which have already been assigned (that is,
382 have non-NULL parent resource). */
383 static struct resource
*find_free_bus_resource(struct pci_bus
*bus
, unsigned long type
)
387 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
390 pci_bus_for_each_resource(bus
, r
, i
) {
391 if (r
== &ioport_resource
|| r
== &iomem_resource
)
393 if (r
&& (r
->flags
& type_mask
) == type
&& !r
->parent
)
399 /* Sizing the IO windows of the PCI-PCI bridge is trivial,
400 since these windows have 4K granularity and the IO ranges
401 of non-bridge PCI devices are limited to 256 bytes.
402 We must be careful with the ISA aliasing though. */
403 static void pbus_size_io(struct pci_bus
*bus
, resource_size_t min_size
)
406 struct resource
*b_res
= find_free_bus_resource(bus
, IORESOURCE_IO
);
407 unsigned long size
= 0, size1
= 0, old_size
;
412 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
415 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
416 struct resource
*r
= &dev
->resource
[i
];
417 unsigned long r_size
;
419 if (r
->parent
|| !(r
->flags
& IORESOURCE_IO
))
421 r_size
= resource_size(r
);
424 /* Might be re-aligned for ISA */
432 old_size
= resource_size(b_res
);
435 /* To be fixed in 2.5: we should have sort of HAVE_ISA
436 flag in the struct pci_bus. */
437 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
438 size
= (size
& 0xff) + ((size
& ~0xffUL
) << 2);
440 size
= ALIGN(size
+ size1
, 4096);
444 if (b_res
->start
|| b_res
->end
)
445 dev_info(&bus
->self
->dev
, "disabling bridge window "
446 "%pR to [bus %02x-%02x] (unused)\n", b_res
,
447 bus
->secondary
, bus
->subordinate
);
451 /* Alignment of the IO window is always 4K */
453 b_res
->end
= b_res
->start
+ size
- 1;
454 b_res
->flags
|= IORESOURCE_STARTALIGN
;
457 /* Calculate the size of the bus and minimal alignment which
458 guarantees that all child resources fit in this size. */
459 static int pbus_size_mem(struct pci_bus
*bus
, unsigned long mask
,
460 unsigned long type
, resource_size_t min_size
)
463 resource_size_t min_align
, align
, size
, old_size
;
464 resource_size_t aligns
[12]; /* Alignments from 1Mb to 2Gb */
465 int order
, max_order
;
466 struct resource
*b_res
= find_free_bus_resource(bus
, type
);
467 unsigned int mem64_mask
= 0;
472 memset(aligns
, 0, sizeof(aligns
));
476 mem64_mask
= b_res
->flags
& IORESOURCE_MEM_64
;
477 b_res
->flags
&= ~IORESOURCE_MEM_64
;
479 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
482 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
483 struct resource
*r
= &dev
->resource
[i
];
484 resource_size_t r_size
;
486 if (r
->parent
|| (r
->flags
& mask
) != type
)
488 r_size
= resource_size(r
);
489 /* For bridges size != alignment */
490 align
= pci_resource_alignment(dev
, r
);
491 order
= __ffs(align
) - 20;
493 dev_warn(&dev
->dev
, "disabling BAR %d: %pR "
494 "(bad alignment %#llx)\n", i
, r
,
495 (unsigned long long) align
);
502 /* Exclude ranges with size > align from
503 calculation of the alignment. */
505 aligns
[order
] += align
;
506 if (order
> max_order
)
508 mem64_mask
&= r
->flags
& IORESOURCE_MEM_64
;
513 old_size
= resource_size(b_res
);
521 for (order
= 0; order
<= max_order
; order
++) {
522 resource_size_t align1
= 1;
524 align1
<<= (order
+ 20);
528 else if (ALIGN(align
+ min_align
, min_align
) < align1
)
529 min_align
= align1
>> 1;
530 align
+= aligns
[order
];
532 size
= ALIGN(size
, min_align
);
534 if (b_res
->start
|| b_res
->end
)
535 dev_info(&bus
->self
->dev
, "disabling bridge window "
536 "%pR to [bus %02x-%02x] (unused)\n", b_res
,
537 bus
->secondary
, bus
->subordinate
);
541 b_res
->start
= min_align
;
542 b_res
->end
= size
+ min_align
- 1;
543 b_res
->flags
|= IORESOURCE_STARTALIGN
;
544 b_res
->flags
|= mem64_mask
;
548 static void pci_bus_size_cardbus(struct pci_bus
*bus
)
550 struct pci_dev
*bridge
= bus
->self
;
551 struct resource
*b_res
= &bridge
->resource
[PCI_BRIDGE_RESOURCES
];
555 * Reserve some resources for CardBus. We reserve
556 * a fixed amount of bus space for CardBus bridges.
559 b_res
[0].end
= pci_cardbus_io_size
- 1;
560 b_res
[0].flags
|= IORESOURCE_IO
| IORESOURCE_SIZEALIGN
;
563 b_res
[1].end
= pci_cardbus_io_size
- 1;
564 b_res
[1].flags
|= IORESOURCE_IO
| IORESOURCE_SIZEALIGN
;
567 * Check whether prefetchable memory is supported
570 pci_read_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, &ctrl
);
571 if (!(ctrl
& PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
)) {
572 ctrl
|= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
;
573 pci_write_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, ctrl
);
574 pci_read_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, &ctrl
);
578 * If we have prefetchable memory support, allocate
579 * two regions. Otherwise, allocate one region of
582 if (ctrl
& PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
) {
584 b_res
[2].end
= pci_cardbus_mem_size
- 1;
585 b_res
[2].flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
| IORESOURCE_SIZEALIGN
;
588 b_res
[3].end
= pci_cardbus_mem_size
- 1;
589 b_res
[3].flags
|= IORESOURCE_MEM
| IORESOURCE_SIZEALIGN
;
592 b_res
[3].end
= pci_cardbus_mem_size
* 2 - 1;
593 b_res
[3].flags
|= IORESOURCE_MEM
| IORESOURCE_SIZEALIGN
;
597 void __ref
pci_bus_size_bridges(struct pci_bus
*bus
)
600 unsigned long mask
, prefmask
;
601 resource_size_t min_mem_size
= 0, min_io_size
= 0;
603 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
604 struct pci_bus
*b
= dev
->subordinate
;
608 switch (dev
->class >> 8) {
609 case PCI_CLASS_BRIDGE_CARDBUS
:
610 pci_bus_size_cardbus(b
);
613 case PCI_CLASS_BRIDGE_PCI
:
615 pci_bus_size_bridges(b
);
624 switch (bus
->self
->class >> 8) {
625 case PCI_CLASS_BRIDGE_CARDBUS
:
626 /* don't size cardbuses yet. */
629 case PCI_CLASS_BRIDGE_PCI
:
630 pci_bridge_check_ranges(bus
);
631 if (bus
->self
->is_hotplug_bridge
) {
632 min_io_size
= pci_hotplug_io_size
;
633 min_mem_size
= pci_hotplug_mem_size
;
636 pbus_size_io(bus
, min_io_size
);
637 /* If the bridge supports prefetchable range, size it
638 separately. If it doesn't, or its prefetchable window
639 has already been allocated by arch code, try
640 non-prefetchable range for both types of PCI memory
642 mask
= IORESOURCE_MEM
;
643 prefmask
= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
644 if (pbus_size_mem(bus
, prefmask
, prefmask
, min_mem_size
))
645 mask
= prefmask
; /* Success, size non-prefetch only. */
647 min_mem_size
+= min_mem_size
;
648 pbus_size_mem(bus
, mask
, IORESOURCE_MEM
, min_mem_size
);
652 EXPORT_SYMBOL(pci_bus_size_bridges
);
654 static void __ref
__pci_bus_assign_resources(const struct pci_bus
*bus
,
655 struct resource_list_x
*fail_head
)
660 pbus_assign_resources_sorted(bus
, fail_head
);
662 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
663 b
= dev
->subordinate
;
667 __pci_bus_assign_resources(b
, fail_head
);
669 switch (dev
->class >> 8) {
670 case PCI_CLASS_BRIDGE_PCI
:
671 if (!pci_is_enabled(dev
))
675 case PCI_CLASS_BRIDGE_CARDBUS
:
676 pci_setup_cardbus(b
);
680 dev_info(&dev
->dev
, "not setting up bridge for bus "
681 "%04x:%02x\n", pci_domain_nr(b
), b
->number
);
687 void __ref
pci_bus_assign_resources(const struct pci_bus
*bus
)
689 __pci_bus_assign_resources(bus
, NULL
);
691 EXPORT_SYMBOL(pci_bus_assign_resources
);
693 static void __ref
__pci_bridge_assign_resources(const struct pci_dev
*bridge
,
694 struct resource_list_x
*fail_head
)
698 pdev_assign_resources_sorted((struct pci_dev
*)bridge
, fail_head
);
700 b
= bridge
->subordinate
;
704 __pci_bus_assign_resources(b
, fail_head
);
706 switch (bridge
->class >> 8) {
707 case PCI_CLASS_BRIDGE_PCI
:
711 case PCI_CLASS_BRIDGE_CARDBUS
:
712 pci_setup_cardbus(b
);
716 dev_info(&bridge
->dev
, "not setting up bridge for bus "
717 "%04x:%02x\n", pci_domain_nr(b
), b
->number
);
721 static void pci_bridge_release_resources(struct pci_bus
*bus
,
725 bool changed
= false;
728 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
732 for (idx
= PCI_BRIDGE_RESOURCES
; idx
<= PCI_BRIDGE_RESOURCE_END
;
734 r
= &dev
->resource
[idx
];
735 if ((r
->flags
& type_mask
) != type
)
740 * if there are children under that, we should release them
743 release_child_resources(r
);
744 if (!release_resource(r
)) {
745 dev_printk(KERN_DEBUG
, &dev
->dev
,
746 "resource %d %pR released\n", idx
, r
);
747 /* keep the old size */
748 r
->end
= resource_size(r
) - 1;
756 /* avoiding touch the one without PREF */
757 if (type
& IORESOURCE_PREFETCH
)
758 type
= IORESOURCE_PREFETCH
;
759 __pci_setup_bridge(bus
, type
);
768 * try to release pci bridge resources that is from leaf bridge,
769 * so we can allocate big new one later
771 static void __ref
pci_bus_release_bridge_resources(struct pci_bus
*bus
,
773 enum release_type rel_type
)
776 bool is_leaf_bridge
= true;
778 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
779 struct pci_bus
*b
= dev
->subordinate
;
783 is_leaf_bridge
= false;
785 if ((dev
->class >> 8) != PCI_CLASS_BRIDGE_PCI
)
788 if (rel_type
== whole_subtree
)
789 pci_bus_release_bridge_resources(b
, type
,
793 if (pci_is_root_bus(bus
))
796 if ((bus
->self
->class >> 8) != PCI_CLASS_BRIDGE_PCI
)
799 if ((rel_type
== whole_subtree
) || is_leaf_bridge
)
800 pci_bridge_release_resources(bus
, type
);
803 static void pci_bus_dump_res(struct pci_bus
*bus
)
805 struct resource
*res
;
808 pci_bus_for_each_resource(bus
, res
, i
) {
809 if (!res
|| !res
->end
|| !res
->flags
)
812 dev_printk(KERN_DEBUG
, &bus
->dev
, "resource %d %pR\n", i
, res
);
816 static void pci_bus_dump_resources(struct pci_bus
*bus
)
822 pci_bus_dump_res(bus
);
824 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
825 b
= dev
->subordinate
;
829 pci_bus_dump_resources(b
);
833 static int __init
pci_bus_get_depth(struct pci_bus
*bus
)
838 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
840 struct pci_bus
*b
= dev
->subordinate
;
844 ret
= pci_bus_get_depth(b
);
851 static int __init
pci_get_max_depth(void)
856 list_for_each_entry(bus
, &pci_root_buses
, node
) {
859 ret
= pci_bus_get_depth(bus
);
868 * first try will not touch pci bridge res
869 * second and later try will clear small leaf bridge res
870 * will stop till to the max deepth if can not find good one
873 pci_assign_unassigned_resources(void)
877 enum release_type rel_type
= leaf_only
;
878 struct resource_list_x head
, *list
;
879 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
881 unsigned long failed_type
;
882 int max_depth
= pci_get_max_depth();
887 pci_try_num
= max_depth
+ 1;
888 printk(KERN_DEBUG
"PCI: max bus depth: %d pci_try_num: %d\n",
889 max_depth
, pci_try_num
);
892 /* Depth first, calculate sizes and alignments of all
893 subordinate buses. */
894 list_for_each_entry(bus
, &pci_root_buses
, node
) {
895 pci_bus_size_bridges(bus
);
897 /* Depth last, allocate resources and update the hardware. */
898 list_for_each_entry(bus
, &pci_root_buses
, node
) {
899 __pci_bus_assign_resources(bus
, &head
);
903 /* any device complain? */
905 goto enable_and_dump
;
907 for (list
= head
.next
; list
;) {
908 failed_type
|= list
->flags
;
912 * io port are tight, don't try extra
913 * or if reach the limit, don't want to try more
915 failed_type
&= type_mask
;
916 if ((failed_type
== IORESOURCE_IO
) || (tried_times
>= pci_try_num
)) {
917 free_failed_list(&head
);
918 goto enable_and_dump
;
921 printk(KERN_DEBUG
"PCI: No. %d try to assign unassigned res\n",
924 /* third times and later will not check if it is leaf */
925 if ((tried_times
+ 1) > 2)
926 rel_type
= whole_subtree
;
929 * Try to release leaf bridge's resources that doesn't fit resource of
930 * child device under that bridge
932 for (list
= head
.next
; list
;) {
933 bus
= list
->dev
->bus
;
934 pci_bus_release_bridge_resources(bus
, list
->flags
& type_mask
,
938 /* restore size and flags */
939 for (list
= head
.next
; list
;) {
940 struct resource
*res
= list
->res
;
942 res
->start
= list
->start
;
943 res
->end
= list
->end
;
944 res
->flags
= list
->flags
;
945 if (list
->dev
->subordinate
)
950 free_failed_list(&head
);
955 /* Depth last, update the hardware. */
956 list_for_each_entry(bus
, &pci_root_buses
, node
)
957 pci_enable_bridges(bus
);
959 /* dump the resource on buses */
960 list_for_each_entry(bus
, &pci_root_buses
, node
) {
961 pci_bus_dump_resources(bus
);
965 void pci_assign_unassigned_bridge_resources(struct pci_dev
*bridge
)
967 struct pci_bus
*parent
= bridge
->subordinate
;
969 struct resource_list_x head
, *list
;
971 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
977 pci_bus_size_bridges(parent
);
978 __pci_bridge_assign_resources(bridge
, &head
);
979 retval
= pci_reenable_device(bridge
);
980 pci_set_master(bridge
);
981 pci_enable_bridges(parent
);
988 if (tried_times
>= 2) {
989 /* still fail, don't need to try more */
990 free_failed_list(&head
);
994 printk(KERN_DEBUG
"PCI: No. %d try to assign unassigned res\n",
998 * Try to release leaf bridge's resources that doesn't fit resource of
999 * child device under that bridge
1001 for (list
= head
.next
; list
;) {
1002 struct pci_bus
*bus
= list
->dev
->bus
;
1003 unsigned long flags
= list
->flags
;
1005 pci_bus_release_bridge_resources(bus
, flags
& type_mask
,
1009 /* restore size and flags */
1010 for (list
= head
.next
; list
;) {
1011 struct resource
*res
= list
->res
;
1013 res
->start
= list
->start
;
1014 res
->end
= list
->end
;
1015 res
->flags
= list
->flags
;
1016 if (list
->dev
->subordinate
)
1021 free_failed_list(&head
);
1025 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources
);