4 * Copyright (c) 2004 - 2009 Paul Mundt
5 * Copyright (c) 2002 M. R. Brown
7 * Modelled after arch/mips/pci/pci.c:
8 * Copyright (C) 2003, 04 Ralf Baechle (ralf@linux-mips.org)
10 * This file is subject to the terms and conditions of the GNU General Public
11 * License. See the file "COPYING" in the main directory of this archive
14 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/init.h>
18 #include <linux/types.h>
19 #include <linux/dma-debug.h>
21 #include <linux/mutex.h>
23 unsigned long PCIBIOS_MIN_IO
= 0x0000;
24 unsigned long PCIBIOS_MIN_MEM
= 0;
27 * The PCI controller list.
29 static struct pci_channel
*hose_head
, **hose_tail
= &hose_head
;
31 static int pci_initialized
;
33 static void __devinit
pcibios_scanbus(struct pci_channel
*hose
)
35 static int next_busno
;
38 bus
= pci_scan_bus(next_busno
, hose
->pci_ops
, hose
);
40 next_busno
= bus
->subordinate
+ 1;
41 /* Don't allow 8-bit bus number overflow inside the hose -
42 reserve some space for bridges. */
46 pci_bus_size_bridges(bus
);
47 pci_bus_assign_resources(bus
);
48 pci_enable_bridges(bus
);
52 static DEFINE_MUTEX(pci_scan_mutex
);
54 void __devinit
register_pci_controller(struct pci_channel
*hose
)
56 if (request_resource(&iomem_resource
, hose
->mem_resource
) < 0)
58 if (request_resource(&ioport_resource
, hose
->io_resource
) < 0) {
59 release_resource(hose
->mem_resource
);
64 hose_tail
= &hose
->next
;
67 * Do not panic here but later - this might hapen before console init.
69 if (!hose
->io_map_base
) {
71 "registering PCI controller with io_map_base unset\n");
75 * Scan the bus if it is register after the PCI subsystem
78 if (pci_initialized
) {
79 mutex_lock(&pci_scan_mutex
);
80 pcibios_scanbus(hose
);
81 mutex_unlock(&pci_scan_mutex
);
88 "Skipping PCI bus scan due to resource conflict\n");
91 static int __init
pcibios_init(void)
93 struct pci_channel
*hose
;
95 /* Scan all of the recorded PCI controllers. */
96 for (hose
= hose_head
; hose
; hose
= hose
->next
)
97 pcibios_scanbus(hose
);
99 pci_fixup_irqs(pci_common_swizzle
, pcibios_map_platform_irq
);
101 dma_debug_add_bus(&pci_bus_type
);
107 subsys_initcall(pcibios_init
);
109 static void pcibios_fixup_device_resources(struct pci_dev
*dev
,
112 /* Update device resources. */
113 struct pci_channel
*hose
= bus
->sysdata
;
114 unsigned long offset
= 0;
117 for (i
= 0; i
< PCI_NUM_RESOURCES
; i
++) {
118 if (!dev
->resource
[i
].start
)
120 if (dev
->resource
[i
].flags
& IORESOURCE_PCI_FIXED
)
122 if (dev
->resource
[i
].flags
& IORESOURCE_IO
)
123 offset
= hose
->io_offset
;
124 else if (dev
->resource
[i
].flags
& IORESOURCE_MEM
)
125 offset
= hose
->mem_offset
;
127 dev
->resource
[i
].start
+= offset
;
128 dev
->resource
[i
].end
+= offset
;
133 * Called after each bus is probed, but before its children
136 void __devinit
pcibios_fixup_bus(struct pci_bus
*bus
)
138 struct pci_dev
*dev
= bus
->self
;
139 struct list_head
*ln
;
140 struct pci_channel
*chan
= bus
->sysdata
;
143 bus
->resource
[0] = chan
->io_resource
;
144 bus
->resource
[1] = chan
->mem_resource
;
147 for (ln
= bus
->devices
.next
; ln
!= &bus
->devices
; ln
= ln
->next
) {
150 if ((dev
->class >> 8) != PCI_CLASS_BRIDGE_PCI
)
151 pcibios_fixup_device_resources(dev
, bus
);
156 * We need to avoid collisions with `mirrored' VGA ports
157 * and other strange ISA hardware, so we always want the
158 * addresses to be allocated in the 0x000-0x0ff region
161 void pcibios_align_resource(void *data
, struct resource
*res
,
162 resource_size_t size
, resource_size_t align
)
164 struct pci_dev
*dev
= data
;
165 struct pci_channel
*chan
= dev
->sysdata
;
166 resource_size_t start
= res
->start
;
168 if (res
->flags
& IORESOURCE_IO
) {
169 if (start
< PCIBIOS_MIN_IO
+ chan
->io_resource
->start
)
170 start
= PCIBIOS_MIN_IO
+ chan
->io_resource
->start
;
173 * Put everything into 0x00-0xff region modulo 0x400.
176 start
= (start
+ 0x3ff) & ~0x3ff;
179 } else if (res
->flags
& IORESOURCE_MEM
) {
180 if (start
< PCIBIOS_MIN_MEM
+ chan
->mem_resource
->start
)
181 start
= PCIBIOS_MIN_MEM
+ chan
->mem_resource
->start
;
187 void pcibios_resource_to_bus(struct pci_dev
*dev
, struct pci_bus_region
*region
,
188 struct resource
*res
)
190 struct pci_channel
*hose
= dev
->sysdata
;
191 unsigned long offset
= 0;
193 if (res
->flags
& IORESOURCE_IO
)
194 offset
= hose
->io_offset
;
195 else if (res
->flags
& IORESOURCE_MEM
)
196 offset
= hose
->mem_offset
;
198 region
->start
= res
->start
- offset
;
199 region
->end
= res
->end
- offset
;
203 pcibios_bus_to_resource(struct pci_dev
*dev
, struct resource
*res
,
204 struct pci_bus_region
*region
)
206 struct pci_channel
*hose
= dev
->sysdata
;
207 unsigned long offset
= 0;
209 if (res
->flags
& IORESOURCE_IO
)
210 offset
= hose
->io_offset
;
211 else if (res
->flags
& IORESOURCE_MEM
)
212 offset
= hose
->mem_offset
;
214 res
->start
= region
->start
+ offset
;
215 res
->end
= region
->end
+ offset
;
218 int pcibios_enable_device(struct pci_dev
*dev
, int mask
)
224 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
226 for (idx
=0; idx
< PCI_NUM_RESOURCES
; idx
++) {
227 /* Only set up the requested stuff */
228 if (!(mask
& (1<<idx
)))
231 r
= &dev
->resource
[idx
];
232 if (!(r
->flags
& (IORESOURCE_IO
| IORESOURCE_MEM
)))
234 if ((idx
== PCI_ROM_RESOURCE
) &&
235 (!(r
->flags
& IORESOURCE_ROM_ENABLE
)))
237 if (!r
->start
&& r
->end
) {
238 printk(KERN_ERR
"PCI: Device %s not available "
239 "because of resource collisions\n",
243 if (r
->flags
& IORESOURCE_IO
)
244 cmd
|= PCI_COMMAND_IO
;
245 if (r
->flags
& IORESOURCE_MEM
)
246 cmd
|= PCI_COMMAND_MEMORY
;
248 if (cmd
!= old_cmd
) {
249 printk("PCI: Enabling device %s (%04x -> %04x)\n",
250 pci_name(dev
), old_cmd
, cmd
);
251 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
257 * If we set up a device for bus mastering, we need to check and set
258 * the latency timer as it may not be properly set.
260 static unsigned int pcibios_max_latency
= 255;
262 void pcibios_set_master(struct pci_dev
*dev
)
265 pci_read_config_byte(dev
, PCI_LATENCY_TIMER
, &lat
);
267 lat
= (64 <= pcibios_max_latency
) ? 64 : pcibios_max_latency
;
268 else if (lat
> pcibios_max_latency
)
269 lat
= pcibios_max_latency
;
272 printk(KERN_INFO
"PCI: Setting latency timer of device %s to %d\n",
274 pci_write_config_byte(dev
, PCI_LATENCY_TIMER
, lat
);
277 void __init
pcibios_update_irq(struct pci_dev
*dev
, int irq
)
279 pci_write_config_byte(dev
, PCI_INTERRUPT_LINE
, irq
);
282 char * __devinit
pcibios_setup(char *str
)
287 int pci_mmap_page_range(struct pci_dev
*dev
, struct vm_area_struct
*vma
,
288 enum pci_mmap_state mmap_state
, int write_combine
)
291 * I/O space can be accessed via normal processor loads and stores on
292 * this platform but for now we elect not to do this and portable
293 * drivers should not do this anyway.
295 if (mmap_state
== pci_mmap_io
)
299 * Ignore write-combine; for now only return uncached mappings.
301 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
303 return remap_pfn_range(vma
, vma
->vm_start
, vma
->vm_pgoff
,
304 vma
->vm_end
- vma
->vm_start
,
308 static void __iomem
*ioport_map_pci(struct pci_dev
*dev
,
309 unsigned long port
, unsigned int nr
)
311 struct pci_channel
*chan
= dev
->sysdata
;
313 if (!chan
->io_map_base
)
314 chan
->io_map_base
= generic_io_base
;
316 return (void __iomem
*)(chan
->io_map_base
+ port
);
319 void __iomem
*pci_iomap(struct pci_dev
*dev
, int bar
, unsigned long maxlen
)
321 resource_size_t start
= pci_resource_start(dev
, bar
);
322 resource_size_t len
= pci_resource_len(dev
, bar
);
323 unsigned long flags
= pci_resource_flags(dev
, bar
);
325 if (unlikely(!len
|| !start
))
327 if (maxlen
&& len
> maxlen
)
330 if (flags
& IORESOURCE_IO
)
331 return ioport_map_pci(dev
, start
, len
);
334 * Presently the IORESOURCE_MEM case is a bit special, most
335 * SH7751 style PCI controllers have PCI memory at a fixed
336 * location in the address space where no remapping is desired.
337 * With the IORESOURCE_MEM case more care has to be taken
338 * to inhibit page table mapping for legacy cores, but this is
339 * punted off to __ioremap().
342 if (flags
& IORESOURCE_MEM
) {
343 if (flags
& IORESOURCE_CACHEABLE
)
344 return ioremap(start
, len
);
346 return ioremap_nocache(start
, len
);
351 EXPORT_SYMBOL(pci_iomap
);
353 void pci_iounmap(struct pci_dev
*dev
, void __iomem
*addr
)
357 EXPORT_SYMBOL(pci_iounmap
);
359 #ifdef CONFIG_HOTPLUG
360 EXPORT_SYMBOL(pcibios_resource_to_bus
);
361 EXPORT_SYMBOL(pcibios_bus_to_resource
);
362 EXPORT_SYMBOL(PCIBIOS_MIN_IO
);
363 EXPORT_SYMBOL(PCIBIOS_MIN_MEM
);