2 * drivers/pci/setup-bus.c
4 * Extruded from code written by
5 * Dave Rusling (david.rusling@reo.mts.dec.com)
6 * David Mosberger (davidm@cs.arizona.edu)
7 * David Miller (davem@redhat.com)
9 * Support routines for initializing a PCI subsystem.
13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14 * PCI-PCI bridges cleanup, sorted resource allocation.
15 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16 * Converted to allocation in 3 passes, which gives
17 * tighter packing. Prefetchable range support.
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
30 struct resource_list_x
{
31 struct resource_list_x
*next
;
34 resource_size_t start
;
39 static void add_to_failed_list(struct resource_list_x
*head
,
40 struct pci_dev
*dev
, struct resource
*res
)
42 struct resource_list_x
*list
= head
;
43 struct resource_list_x
*ln
= list
->next
;
44 struct resource_list_x
*tmp
;
46 tmp
= kmalloc(sizeof(*tmp
), GFP_KERNEL
);
48 pr_warning("add_to_failed_list: kmalloc() failed!\n");
55 tmp
->start
= res
->start
;
57 tmp
->flags
= res
->flags
;
61 static void free_failed_list(struct resource_list_x
*head
)
63 struct resource_list_x
*list
, *tmp
;
65 for (list
= head
->next
; list
;) {
74 static void __dev_sort_resources(struct pci_dev
*dev
,
75 struct resource_list
*head
)
77 u16
class = dev
->class >> 8;
79 /* Don't touch classless devices or host bridges or ioapics. */
80 if (class == PCI_CLASS_NOT_DEFINED
|| class == PCI_CLASS_BRIDGE_HOST
)
83 /* Don't touch ioapic devices already enabled by firmware */
84 if (class == PCI_CLASS_SYSTEM_PIC
) {
86 pci_read_config_word(dev
, PCI_COMMAND
, &command
);
87 if (command
& (PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
))
91 pdev_sort_resources(dev
, head
);
94 static void __assign_resources_sorted(struct resource_list
*head
,
95 struct resource_list_x
*fail_head
)
98 struct resource_list
*list
, *tmp
;
101 for (list
= head
->next
; list
;) {
103 idx
= res
- &list
->dev
->resource
[0];
105 if (pci_assign_resource(list
->dev
, idx
)) {
106 if (fail_head
&& !pci_is_root_bus(list
->dev
->bus
)) {
108 * if the failed res is for ROM BAR, and it will
109 * be enabled later, don't add it to the list
111 if (!((idx
== PCI_ROM_RESOURCE
) &&
112 (!(res
->flags
& IORESOURCE_ROM_ENABLE
))))
113 add_to_failed_list(fail_head
, list
->dev
, res
);
125 static void pdev_assign_resources_sorted(struct pci_dev
*dev
,
126 struct resource_list_x
*fail_head
)
128 struct resource_list head
;
131 __dev_sort_resources(dev
, &head
);
132 __assign_resources_sorted(&head
, fail_head
);
136 static void pbus_assign_resources_sorted(const struct pci_bus
*bus
,
137 struct resource_list_x
*fail_head
)
140 struct resource_list head
;
143 list_for_each_entry(dev
, &bus
->devices
, bus_list
)
144 __dev_sort_resources(dev
, &head
);
146 __assign_resources_sorted(&head
, fail_head
);
149 void pci_setup_cardbus(struct pci_bus
*bus
)
151 struct pci_dev
*bridge
= bus
->self
;
152 struct resource
*res
;
153 struct pci_bus_region region
;
155 dev_info(&bridge
->dev
, "CardBus bridge to [bus %02x-%02x]\n",
156 bus
->secondary
, bus
->subordinate
);
158 res
= bus
->resource
[0];
159 pcibios_resource_to_bus(bridge
, ®ion
, res
);
160 if (res
->flags
& IORESOURCE_IO
) {
162 * The IO resource is allocated a range twice as large as it
163 * would normally need. This allows us to set both IO regs.
165 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
166 pci_write_config_dword(bridge
, PCI_CB_IO_BASE_0
,
168 pci_write_config_dword(bridge
, PCI_CB_IO_LIMIT_0
,
172 res
= bus
->resource
[1];
173 pcibios_resource_to_bus(bridge
, ®ion
, res
);
174 if (res
->flags
& IORESOURCE_IO
) {
175 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
176 pci_write_config_dword(bridge
, PCI_CB_IO_BASE_1
,
178 pci_write_config_dword(bridge
, PCI_CB_IO_LIMIT_1
,
182 res
= bus
->resource
[2];
183 pcibios_resource_to_bus(bridge
, ®ion
, res
);
184 if (res
->flags
& IORESOURCE_MEM
) {
185 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
186 pci_write_config_dword(bridge
, PCI_CB_MEMORY_BASE_0
,
188 pci_write_config_dword(bridge
, PCI_CB_MEMORY_LIMIT_0
,
192 res
= bus
->resource
[3];
193 pcibios_resource_to_bus(bridge
, ®ion
, res
);
194 if (res
->flags
& IORESOURCE_MEM
) {
195 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
196 pci_write_config_dword(bridge
, PCI_CB_MEMORY_BASE_1
,
198 pci_write_config_dword(bridge
, PCI_CB_MEMORY_LIMIT_1
,
202 EXPORT_SYMBOL(pci_setup_cardbus
);
204 /* Initialize bridges with base/limit values we have collected.
205 PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
206 requires that if there is no I/O ports or memory behind the
207 bridge, corresponding range must be turned off by writing base
208 value greater than limit to the bridge's base/limit registers.
210 Note: care must be taken when updating I/O base/limit registers
211 of bridges which support 32-bit I/O. This update requires two
212 config space writes, so it's quite possible that an I/O window of
213 the bridge will have some undesirable address (e.g. 0) after the
214 first write. Ditto 64-bit prefetchable MMIO. */
215 static void pci_setup_bridge_io(struct pci_bus
*bus
)
217 struct pci_dev
*bridge
= bus
->self
;
218 struct resource
*res
;
219 struct pci_bus_region region
;
222 /* Set up the top and bottom of the PCI I/O segment for this bus. */
223 res
= bus
->resource
[0];
224 pcibios_resource_to_bus(bridge
, ®ion
, res
);
225 if (res
->flags
& IORESOURCE_IO
) {
226 pci_read_config_dword(bridge
, PCI_IO_BASE
, &l
);
228 l
|= (region
.start
>> 8) & 0x00f0;
229 l
|= region
.end
& 0xf000;
230 /* Set up upper 16 bits of I/O base/limit. */
231 io_upper16
= (region
.end
& 0xffff0000) | (region
.start
>> 16);
232 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
234 /* Clear upper 16 bits of I/O base/limit. */
237 dev_info(&bridge
->dev
, " bridge window [io disabled]\n");
239 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
240 pci_write_config_dword(bridge
, PCI_IO_BASE_UPPER16
, 0x0000ffff);
241 /* Update lower 16 bits of I/O base/limit. */
242 pci_write_config_dword(bridge
, PCI_IO_BASE
, l
);
243 /* Update upper 16 bits of I/O base/limit. */
244 pci_write_config_dword(bridge
, PCI_IO_BASE_UPPER16
, io_upper16
);
247 static void pci_setup_bridge_mmio(struct pci_bus
*bus
)
249 struct pci_dev
*bridge
= bus
->self
;
250 struct resource
*res
;
251 struct pci_bus_region region
;
254 /* Set up the top and bottom of the PCI Memory segment for this bus. */
255 res
= bus
->resource
[1];
256 pcibios_resource_to_bus(bridge
, ®ion
, res
);
257 if (res
->flags
& IORESOURCE_MEM
) {
258 l
= (region
.start
>> 16) & 0xfff0;
259 l
|= region
.end
& 0xfff00000;
260 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
263 dev_info(&bridge
->dev
, " bridge window [mem disabled]\n");
265 pci_write_config_dword(bridge
, PCI_MEMORY_BASE
, l
);
268 static void pci_setup_bridge_mmio_pref(struct pci_bus
*bus
)
270 struct pci_dev
*bridge
= bus
->self
;
271 struct resource
*res
;
272 struct pci_bus_region region
;
275 /* Clear out the upper 32 bits of PREF limit.
276 If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
277 disables PREF range, which is ok. */
278 pci_write_config_dword(bridge
, PCI_PREF_LIMIT_UPPER32
, 0);
280 /* Set up PREF base/limit. */
282 res
= bus
->resource
[2];
283 pcibios_resource_to_bus(bridge
, ®ion
, res
);
284 if (res
->flags
& IORESOURCE_PREFETCH
) {
285 l
= (region
.start
>> 16) & 0xfff0;
286 l
|= region
.end
& 0xfff00000;
287 if (res
->flags
& IORESOURCE_MEM_64
) {
288 bu
= upper_32_bits(region
.start
);
289 lu
= upper_32_bits(region
.end
);
291 dev_info(&bridge
->dev
, " bridge window %pR\n", res
);
294 dev_info(&bridge
->dev
, " bridge window [mem pref disabled]\n");
296 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, l
);
298 /* Set the upper 32 bits of PREF base & limit. */
299 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
, bu
);
300 pci_write_config_dword(bridge
, PCI_PREF_LIMIT_UPPER32
, lu
);
303 static void __pci_setup_bridge(struct pci_bus
*bus
, unsigned long type
)
305 struct pci_dev
*bridge
= bus
->self
;
307 dev_info(&bridge
->dev
, "PCI bridge to [bus %02x-%02x]\n",
308 bus
->secondary
, bus
->subordinate
);
310 if (type
& IORESOURCE_IO
)
311 pci_setup_bridge_io(bus
);
313 if (type
& IORESOURCE_MEM
)
314 pci_setup_bridge_mmio(bus
);
316 if (type
& IORESOURCE_PREFETCH
)
317 pci_setup_bridge_mmio_pref(bus
);
319 pci_write_config_word(bridge
, PCI_BRIDGE_CONTROL
, bus
->bridge_ctl
);
322 static void pci_setup_bridge(struct pci_bus
*bus
)
324 unsigned long type
= IORESOURCE_IO
| IORESOURCE_MEM
|
327 __pci_setup_bridge(bus
, type
);
330 /* Check whether the bridge supports optional I/O and
331 prefetchable memory ranges. If not, the respective
332 base/limit registers must be read-only and read as 0. */
333 static void pci_bridge_check_ranges(struct pci_bus
*bus
)
337 struct pci_dev
*bridge
= bus
->self
;
338 struct resource
*b_res
;
340 b_res
= &bridge
->resource
[PCI_BRIDGE_RESOURCES
];
341 b_res
[1].flags
|= IORESOURCE_MEM
;
343 pci_read_config_word(bridge
, PCI_IO_BASE
, &io
);
345 pci_write_config_word(bridge
, PCI_IO_BASE
, 0xf0f0);
346 pci_read_config_word(bridge
, PCI_IO_BASE
, &io
);
347 pci_write_config_word(bridge
, PCI_IO_BASE
, 0x0);
350 b_res
[0].flags
|= IORESOURCE_IO
;
351 if (bridge
->vendor
== PCI_VENDOR_ID_DEC
&& bridge
->device
== 0x0001)
353 pci_read_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, &pmem
);
355 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
,
357 pci_read_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, &pmem
);
358 pci_write_config_dword(bridge
, PCI_PREF_MEMORY_BASE
, 0x0);
361 b_res
[2].flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
362 if ((pmem
& PCI_PREF_RANGE_TYPE_MASK
) ==
363 PCI_PREF_RANGE_TYPE_64
) {
364 b_res
[2].flags
|= IORESOURCE_MEM_64
;
365 b_res
[2].flags
|= PCI_PREF_RANGE_TYPE_64
;
369 /* double check if bridge does support 64 bit pref */
370 if (b_res
[2].flags
& IORESOURCE_MEM_64
) {
371 u32 mem_base_hi
, tmp
;
372 pci_read_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
374 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
376 pci_read_config_dword(bridge
, PCI_PREF_BASE_UPPER32
, &tmp
);
378 b_res
[2].flags
&= ~IORESOURCE_MEM_64
;
379 pci_write_config_dword(bridge
, PCI_PREF_BASE_UPPER32
,
384 /* Helper function for sizing routines: find first available
385 bus resource of a given type. Note: we intentionally skip
386 the bus resources which have already been assigned (that is,
387 have non-NULL parent resource). */
388 static struct resource
*find_free_bus_resource(struct pci_bus
*bus
, unsigned long type
)
392 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
395 pci_bus_for_each_resource(bus
, r
, i
) {
396 if (r
== &ioport_resource
|| r
== &iomem_resource
)
398 if (r
&& (r
->flags
& type_mask
) == type
&& !r
->parent
)
404 /* Sizing the IO windows of the PCI-PCI bridge is trivial,
405 since these windows have 4K granularity and the IO ranges
406 of non-bridge PCI devices are limited to 256 bytes.
407 We must be careful with the ISA aliasing though. */
408 static void pbus_size_io(struct pci_bus
*bus
, resource_size_t min_size
)
411 struct resource
*b_res
= find_free_bus_resource(bus
, IORESOURCE_IO
);
412 unsigned long size
= 0, size1
= 0, old_size
;
417 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
420 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
421 struct resource
*r
= &dev
->resource
[i
];
422 unsigned long r_size
;
424 if (r
->parent
|| !(r
->flags
& IORESOURCE_IO
))
426 r_size
= resource_size(r
);
429 /* Might be re-aligned for ISA */
437 old_size
= resource_size(b_res
);
440 /* To be fixed in 2.5: we should have sort of HAVE_ISA
441 flag in the struct pci_bus. */
442 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
443 size
= (size
& 0xff) + ((size
& ~0xffUL
) << 2);
445 size
= ALIGN(size
+ size1
, 4096);
449 if (b_res
->start
|| b_res
->end
)
450 dev_info(&bus
->self
->dev
, "disabling bridge window "
451 "%pR to [bus %02x-%02x] (unused)\n", b_res
,
452 bus
->secondary
, bus
->subordinate
);
456 /* Alignment of the IO window is always 4K */
458 b_res
->end
= b_res
->start
+ size
- 1;
459 b_res
->flags
|= IORESOURCE_STARTALIGN
;
462 /* Calculate the size of the bus and minimal alignment which
463 guarantees that all child resources fit in this size. */
464 static int pbus_size_mem(struct pci_bus
*bus
, unsigned long mask
,
465 unsigned long type
, resource_size_t min_size
)
468 resource_size_t min_align
, align
, size
, old_size
;
469 resource_size_t aligns
[12]; /* Alignments from 1Mb to 2Gb */
470 int order
, max_order
;
471 struct resource
*b_res
= find_free_bus_resource(bus
, type
);
472 unsigned int mem64_mask
= 0;
477 memset(aligns
, 0, sizeof(aligns
));
481 mem64_mask
= b_res
->flags
& IORESOURCE_MEM_64
;
482 b_res
->flags
&= ~IORESOURCE_MEM_64
;
484 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
487 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
488 struct resource
*r
= &dev
->resource
[i
];
489 resource_size_t r_size
;
491 if (r
->parent
|| (r
->flags
& mask
) != type
)
493 r_size
= resource_size(r
);
494 /* For bridges size != alignment */
495 align
= pci_resource_alignment(dev
, r
);
496 order
= __ffs(align
) - 20;
498 dev_warn(&dev
->dev
, "disabling BAR %d: %pR "
499 "(bad alignment %#llx)\n", i
, r
,
500 (unsigned long long) align
);
507 /* Exclude ranges with size > align from
508 calculation of the alignment. */
510 aligns
[order
] += align
;
511 if (order
> max_order
)
513 mem64_mask
&= r
->flags
& IORESOURCE_MEM_64
;
518 old_size
= resource_size(b_res
);
526 for (order
= 0; order
<= max_order
; order
++) {
527 resource_size_t align1
= 1;
529 align1
<<= (order
+ 20);
533 else if (ALIGN(align
+ min_align
, min_align
) < align1
)
534 min_align
= align1
>> 1;
535 align
+= aligns
[order
];
537 size
= ALIGN(size
, min_align
);
539 if (b_res
->start
|| b_res
->end
)
540 dev_info(&bus
->self
->dev
, "disabling bridge window "
541 "%pR to [bus %02x-%02x] (unused)\n", b_res
,
542 bus
->secondary
, bus
->subordinate
);
546 b_res
->start
= min_align
;
547 b_res
->end
= size
+ min_align
- 1;
548 b_res
->flags
|= IORESOURCE_STARTALIGN
;
549 b_res
->flags
|= mem64_mask
;
553 static void pci_bus_size_cardbus(struct pci_bus
*bus
)
555 struct pci_dev
*bridge
= bus
->self
;
556 struct resource
*b_res
= &bridge
->resource
[PCI_BRIDGE_RESOURCES
];
560 * Reserve some resources for CardBus. We reserve
561 * a fixed amount of bus space for CardBus bridges.
564 b_res
[0].end
= pci_cardbus_io_size
- 1;
565 b_res
[0].flags
|= IORESOURCE_IO
| IORESOURCE_SIZEALIGN
;
568 b_res
[1].end
= pci_cardbus_io_size
- 1;
569 b_res
[1].flags
|= IORESOURCE_IO
| IORESOURCE_SIZEALIGN
;
572 * Check whether prefetchable memory is supported
575 pci_read_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, &ctrl
);
576 if (!(ctrl
& PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
)) {
577 ctrl
|= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
;
578 pci_write_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, ctrl
);
579 pci_read_config_word(bridge
, PCI_CB_BRIDGE_CONTROL
, &ctrl
);
583 * If we have prefetchable memory support, allocate
584 * two regions. Otherwise, allocate one region of
587 if (ctrl
& PCI_CB_BRIDGE_CTL_PREFETCH_MEM0
) {
589 b_res
[2].end
= pci_cardbus_mem_size
- 1;
590 b_res
[2].flags
|= IORESOURCE_MEM
| IORESOURCE_PREFETCH
| IORESOURCE_SIZEALIGN
;
593 b_res
[3].end
= pci_cardbus_mem_size
- 1;
594 b_res
[3].flags
|= IORESOURCE_MEM
| IORESOURCE_SIZEALIGN
;
597 b_res
[3].end
= pci_cardbus_mem_size
* 2 - 1;
598 b_res
[3].flags
|= IORESOURCE_MEM
| IORESOURCE_SIZEALIGN
;
602 void __ref
pci_bus_size_bridges(struct pci_bus
*bus
)
605 unsigned long mask
, prefmask
;
606 resource_size_t min_mem_size
= 0, min_io_size
= 0;
608 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
609 struct pci_bus
*b
= dev
->subordinate
;
613 switch (dev
->class >> 8) {
614 case PCI_CLASS_BRIDGE_CARDBUS
:
615 pci_bus_size_cardbus(b
);
618 case PCI_CLASS_BRIDGE_PCI
:
620 pci_bus_size_bridges(b
);
629 switch (bus
->self
->class >> 8) {
630 case PCI_CLASS_BRIDGE_CARDBUS
:
631 /* don't size cardbuses yet. */
634 case PCI_CLASS_BRIDGE_PCI
:
635 pci_bridge_check_ranges(bus
);
636 if (bus
->self
->is_hotplug_bridge
) {
637 min_io_size
= pci_hotplug_io_size
;
638 min_mem_size
= pci_hotplug_mem_size
;
641 pbus_size_io(bus
, min_io_size
);
642 /* If the bridge supports prefetchable range, size it
643 separately. If it doesn't, or its prefetchable window
644 has already been allocated by arch code, try
645 non-prefetchable range for both types of PCI memory
647 mask
= IORESOURCE_MEM
;
648 prefmask
= IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
649 if (pbus_size_mem(bus
, prefmask
, prefmask
, min_mem_size
))
650 mask
= prefmask
; /* Success, size non-prefetch only. */
652 min_mem_size
+= min_mem_size
;
653 pbus_size_mem(bus
, mask
, IORESOURCE_MEM
, min_mem_size
);
657 EXPORT_SYMBOL(pci_bus_size_bridges
);
659 static void __ref
__pci_bus_assign_resources(const struct pci_bus
*bus
,
660 struct resource_list_x
*fail_head
)
665 pbus_assign_resources_sorted(bus
, fail_head
);
667 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
668 b
= dev
->subordinate
;
672 __pci_bus_assign_resources(b
, fail_head
);
674 switch (dev
->class >> 8) {
675 case PCI_CLASS_BRIDGE_PCI
:
676 if (!pci_is_enabled(dev
))
680 case PCI_CLASS_BRIDGE_CARDBUS
:
681 pci_setup_cardbus(b
);
685 dev_info(&dev
->dev
, "not setting up bridge for bus "
686 "%04x:%02x\n", pci_domain_nr(b
), b
->number
);
692 void __ref
pci_bus_assign_resources(const struct pci_bus
*bus
)
694 __pci_bus_assign_resources(bus
, NULL
);
696 EXPORT_SYMBOL(pci_bus_assign_resources
);
698 static void __ref
__pci_bridge_assign_resources(const struct pci_dev
*bridge
,
699 struct resource_list_x
*fail_head
)
703 pdev_assign_resources_sorted((struct pci_dev
*)bridge
, fail_head
);
705 b
= bridge
->subordinate
;
709 __pci_bus_assign_resources(b
, fail_head
);
711 switch (bridge
->class >> 8) {
712 case PCI_CLASS_BRIDGE_PCI
:
716 case PCI_CLASS_BRIDGE_CARDBUS
:
717 pci_setup_cardbus(b
);
721 dev_info(&bridge
->dev
, "not setting up bridge for bus "
722 "%04x:%02x\n", pci_domain_nr(b
), b
->number
);
726 static void pci_bridge_release_resources(struct pci_bus
*bus
,
730 bool changed
= false;
733 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
737 for (idx
= PCI_BRIDGE_RESOURCES
; idx
<= PCI_BRIDGE_RESOURCE_END
;
739 r
= &dev
->resource
[idx
];
740 if ((r
->flags
& type_mask
) != type
)
745 * if there are children under that, we should release them
748 release_child_resources(r
);
749 if (!release_resource(r
)) {
750 dev_printk(KERN_DEBUG
, &dev
->dev
,
751 "resource %d %pR released\n", idx
, r
);
752 /* keep the old size */
753 r
->end
= resource_size(r
) - 1;
761 /* avoiding touch the one without PREF */
762 if (type
& IORESOURCE_PREFETCH
)
763 type
= IORESOURCE_PREFETCH
;
764 __pci_setup_bridge(bus
, type
);
773 * try to release pci bridge resources that is from leaf bridge,
774 * so we can allocate big new one later
776 static void __ref
pci_bus_release_bridge_resources(struct pci_bus
*bus
,
778 enum release_type rel_type
)
781 bool is_leaf_bridge
= true;
783 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
784 struct pci_bus
*b
= dev
->subordinate
;
788 is_leaf_bridge
= false;
790 if ((dev
->class >> 8) != PCI_CLASS_BRIDGE_PCI
)
793 if (rel_type
== whole_subtree
)
794 pci_bus_release_bridge_resources(b
, type
,
798 if (pci_is_root_bus(bus
))
801 if ((bus
->self
->class >> 8) != PCI_CLASS_BRIDGE_PCI
)
804 if ((rel_type
== whole_subtree
) || is_leaf_bridge
)
805 pci_bridge_release_resources(bus
, type
);
808 static void pci_bus_dump_res(struct pci_bus
*bus
)
810 struct resource
*res
;
813 pci_bus_for_each_resource(bus
, res
, i
) {
814 if (!res
|| !res
->end
|| !res
->flags
)
817 dev_printk(KERN_DEBUG
, &bus
->dev
, "resource %d %pR\n", i
, res
);
821 static void pci_bus_dump_resources(struct pci_bus
*bus
)
827 pci_bus_dump_res(bus
);
829 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
830 b
= dev
->subordinate
;
834 pci_bus_dump_resources(b
);
839 pci_assign_unassigned_resources(void)
843 /* Depth first, calculate sizes and alignments of all
844 subordinate buses. */
845 list_for_each_entry(bus
, &pci_root_buses
, node
) {
846 pci_bus_size_bridges(bus
);
848 /* Depth last, allocate resources and update the hardware. */
849 list_for_each_entry(bus
, &pci_root_buses
, node
) {
850 pci_bus_assign_resources(bus
);
851 pci_enable_bridges(bus
);
854 /* dump the resource on buses */
855 list_for_each_entry(bus
, &pci_root_buses
, node
) {
856 pci_bus_dump_resources(bus
);
860 void pci_assign_unassigned_bridge_resources(struct pci_dev
*bridge
)
862 struct pci_bus
*parent
= bridge
->subordinate
;
864 struct resource_list_x head
, *list
;
866 unsigned long type_mask
= IORESOURCE_IO
| IORESOURCE_MEM
|
872 pci_bus_size_bridges(parent
);
873 __pci_bridge_assign_resources(bridge
, &head
);
880 if (tried_times
>= 2) {
881 /* still fail, don't need to try more */
882 free_failed_list(&head
);
886 printk(KERN_DEBUG
"PCI: No. %d try to assign unassigned res\n",
890 * Try to release leaf bridge's resources that doesn't fit resource of
891 * child device under that bridge
893 for (list
= head
.next
; list
;) {
894 struct pci_bus
*bus
= list
->dev
->bus
;
895 unsigned long flags
= list
->flags
;
897 pci_bus_release_bridge_resources(bus
, flags
& type_mask
,
901 /* restore size and flags */
902 for (list
= head
.next
; list
;) {
903 struct resource
*res
= list
->res
;
905 res
->start
= list
->start
;
906 res
->end
= list
->end
;
907 res
->flags
= list
->flags
;
908 if (list
->dev
->subordinate
)
913 free_failed_list(&head
);
918 retval
= pci_reenable_device(bridge
);
919 pci_set_master(bridge
);
920 pci_enable_bridges(parent
);
922 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources
);