2 * Broadcom specific AMBA
5 * Licensed under the GNU/GPL. See COPYING for details.
8 #include "bcma_private.h"
9 #include <linux/slab.h>
10 #include <linux/bcma/bcma.h>
11 #include <linux/pci.h>
12 #include <linux/module.h>
14 static void bcma_host_pci_switch_core(struct bcma_device
*core
)
16 int win2
= core
->bus
->host_is_pcie2
?
17 BCMA_PCIE2_BAR0_WIN2
: BCMA_PCI_BAR0_WIN2
;
19 pci_write_config_dword(core
->bus
->host_pci
, BCMA_PCI_BAR0_WIN
,
21 pci_write_config_dword(core
->bus
->host_pci
, win2
, core
->wrap
);
22 core
->bus
->mapped_core
= core
;
23 bcma_debug(core
->bus
, "Switched to core: 0x%X\n", core
->id
.id
);
26 /* Provides access to the requested core. Returns base offset that has to be
27 * used. It makes use of fixed windows when possible. */
28 static u16
bcma_host_pci_provide_access_to_core(struct bcma_device
*core
)
30 switch (core
->id
.id
) {
31 case BCMA_CORE_CHIPCOMMON
:
32 return 3 * BCMA_CORE_SIZE
;
34 return 2 * BCMA_CORE_SIZE
;
37 if (core
->bus
->mapped_core
!= core
)
38 bcma_host_pci_switch_core(core
);
42 static u8
bcma_host_pci_read8(struct bcma_device
*core
, u16 offset
)
44 offset
+= bcma_host_pci_provide_access_to_core(core
);
45 return ioread8(core
->bus
->mmio
+ offset
);
48 static u16
bcma_host_pci_read16(struct bcma_device
*core
, u16 offset
)
50 offset
+= bcma_host_pci_provide_access_to_core(core
);
51 return ioread16(core
->bus
->mmio
+ offset
);
54 static u32
bcma_host_pci_read32(struct bcma_device
*core
, u16 offset
)
56 offset
+= bcma_host_pci_provide_access_to_core(core
);
57 return ioread32(core
->bus
->mmio
+ offset
);
60 static void bcma_host_pci_write8(struct bcma_device
*core
, u16 offset
,
63 offset
+= bcma_host_pci_provide_access_to_core(core
);
64 iowrite8(value
, core
->bus
->mmio
+ offset
);
67 static void bcma_host_pci_write16(struct bcma_device
*core
, u16 offset
,
70 offset
+= bcma_host_pci_provide_access_to_core(core
);
71 iowrite16(value
, core
->bus
->mmio
+ offset
);
74 static void bcma_host_pci_write32(struct bcma_device
*core
, u16 offset
,
77 offset
+= bcma_host_pci_provide_access_to_core(core
);
78 iowrite32(value
, core
->bus
->mmio
+ offset
);
81 #ifdef CONFIG_BCMA_BLOCKIO
82 static void bcma_host_pci_block_read(struct bcma_device
*core
, void *buffer
,
83 size_t count
, u16 offset
, u8 reg_width
)
85 void __iomem
*addr
= core
->bus
->mmio
+ offset
;
86 if (core
->bus
->mapped_core
!= core
)
87 bcma_host_pci_switch_core(core
);
90 ioread8_rep(addr
, buffer
, count
);
94 ioread16_rep(addr
, buffer
, count
>> 1);
98 ioread32_rep(addr
, buffer
, count
>> 2);
105 static void bcma_host_pci_block_write(struct bcma_device
*core
,
106 const void *buffer
, size_t count
,
107 u16 offset
, u8 reg_width
)
109 void __iomem
*addr
= core
->bus
->mmio
+ offset
;
110 if (core
->bus
->mapped_core
!= core
)
111 bcma_host_pci_switch_core(core
);
114 iowrite8_rep(addr
, buffer
, count
);
118 iowrite16_rep(addr
, buffer
, count
>> 1);
122 iowrite32_rep(addr
, buffer
, count
>> 2);
130 static u32
bcma_host_pci_aread32(struct bcma_device
*core
, u16 offset
)
132 if (core
->bus
->mapped_core
!= core
)
133 bcma_host_pci_switch_core(core
);
134 return ioread32(core
->bus
->mmio
+ (1 * BCMA_CORE_SIZE
) + offset
);
137 static void bcma_host_pci_awrite32(struct bcma_device
*core
, u16 offset
,
140 if (core
->bus
->mapped_core
!= core
)
141 bcma_host_pci_switch_core(core
);
142 iowrite32(value
, core
->bus
->mmio
+ (1 * BCMA_CORE_SIZE
) + offset
);
145 static const struct bcma_host_ops bcma_host_pci_ops
= {
146 .read8
= bcma_host_pci_read8
,
147 .read16
= bcma_host_pci_read16
,
148 .read32
= bcma_host_pci_read32
,
149 .write8
= bcma_host_pci_write8
,
150 .write16
= bcma_host_pci_write16
,
151 .write32
= bcma_host_pci_write32
,
152 #ifdef CONFIG_BCMA_BLOCKIO
153 .block_read
= bcma_host_pci_block_read
,
154 .block_write
= bcma_host_pci_block_write
,
156 .aread32
= bcma_host_pci_aread32
,
157 .awrite32
= bcma_host_pci_awrite32
,
160 static int bcma_host_pci_probe(struct pci_dev
*dev
,
161 const struct pci_device_id
*id
)
163 struct bcma_bus
*bus
;
168 bus
= kzalloc(sizeof(*bus
), GFP_KERNEL
);
172 /* Basic PCI configuration */
173 err
= pci_enable_device(dev
);
177 err
= pci_request_regions(dev
, "bcma-pci-bridge");
179 goto err_pci_disable
;
182 /* Disable the RETRY_TIMEOUT register (0x41) to keep
183 * PCI Tx retries from interfering with C3 CPU state */
184 pci_read_config_dword(dev
, 0x40, &val
);
185 if ((val
& 0x0000ff00) != 0)
186 pci_write_config_dword(dev
, 0x40, val
& 0xffff00ff);
188 /* SSB needed additional powering up, do we have any AMBA PCI cards? */
189 if (!pci_is_pcie(dev
)) {
190 bcma_err(bus
, "PCI card detected, they are not supported.\n");
192 goto err_pci_release_regions
;
195 bus
->dev
= &dev
->dev
;
199 bus
->mmio
= pci_iomap(dev
, 0, ~0UL);
201 goto err_pci_release_regions
;
205 bus
->hosttype
= BCMA_HOSTTYPE_PCI
;
206 bus
->ops
= &bcma_host_pci_ops
;
208 bus
->boardinfo
.vendor
= bus
->host_pci
->subsystem_vendor
;
209 bus
->boardinfo
.type
= bus
->host_pci
->subsystem_device
;
211 /* Initialize struct, detect chip */
214 /* Scan bus to find out generation of PCIe core */
215 err
= bcma_bus_scan(bus
);
217 goto err_pci_unmap_mmio
;
219 if (bcma_find_core(bus
, BCMA_CORE_PCIE2
))
220 bus
->host_is_pcie2
= true;
223 err
= bcma_bus_register(bus
);
225 goto err_unregister_cores
;
227 pci_set_drvdata(dev
, bus
);
232 err_unregister_cores
:
233 bcma_unregister_cores(bus
);
235 pci_iounmap(dev
, bus
->mmio
);
236 err_pci_release_regions
:
237 pci_release_regions(dev
);
239 pci_disable_device(dev
);
245 static void bcma_host_pci_remove(struct pci_dev
*dev
)
247 struct bcma_bus
*bus
= pci_get_drvdata(dev
);
249 bcma_bus_unregister(bus
);
250 pci_iounmap(dev
, bus
->mmio
);
251 pci_release_regions(dev
);
252 pci_disable_device(dev
);
256 #ifdef CONFIG_PM_SLEEP
257 static int bcma_host_pci_suspend(struct device
*dev
)
259 struct bcma_bus
*bus
= dev_get_drvdata(dev
);
261 bus
->mapped_core
= NULL
;
263 return bcma_bus_suspend(bus
);
266 static int bcma_host_pci_resume(struct device
*dev
)
268 struct bcma_bus
*bus
= dev_get_drvdata(dev
);
270 return bcma_bus_resume(bus
);
273 static SIMPLE_DEV_PM_OPS(bcma_pm_ops
, bcma_host_pci_suspend
,
274 bcma_host_pci_resume
);
275 #define BCMA_PM_OPS (&bcma_pm_ops)
277 #else /* CONFIG_PM_SLEEP */
279 #define BCMA_PM_OPS NULL
281 #endif /* CONFIG_PM_SLEEP */
283 static const struct pci_device_id bcma_pci_bridge_tbl
[] = {
284 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x0576) },
285 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x4313) },
286 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 43224) }, /* 0xa8d8 */
287 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x4331) },
288 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x4353) },
289 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x4357) },
290 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x4358) },
291 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x4359) },
292 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x4360) },
293 { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, 0x4365, PCI_VENDOR_ID_DELL
, 0x0016) },
294 { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, 0x4365, PCI_VENDOR_ID_DELL
, 0x0018) },
295 { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, 0x4365, PCI_VENDOR_ID_FOXCONN
, 0xe092) },
296 { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM
, 0x4365, PCI_VENDOR_ID_HP
, 0x804a) },
297 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x43a0) },
298 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x43a9) },
299 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x43aa) },
300 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x43b1) },
301 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 0x4727) },
302 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 43227) }, /* 0xa8db, BCM43217 (sic!) */
303 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM
, 43228) }, /* 0xa8dc */
306 MODULE_DEVICE_TABLE(pci
, bcma_pci_bridge_tbl
);
308 static struct pci_driver bcma_pci_bridge_driver
= {
309 .name
= "bcma-pci-bridge",
310 .id_table
= bcma_pci_bridge_tbl
,
311 .probe
= bcma_host_pci_probe
,
312 .remove
= bcma_host_pci_remove
,
313 .driver
.pm
= BCMA_PM_OPS
,
316 int __init
bcma_host_pci_init(void)
318 return pci_register_driver(&bcma_pci_bridge_driver
);
321 void __exit
bcma_host_pci_exit(void)
323 pci_unregister_driver(&bcma_pci_bridge_driver
);
326 /**************************************************
327 * Runtime ops for drivers.
328 **************************************************/
330 /* See also pcicore_up */
331 void bcma_host_pci_up(struct bcma_bus
*bus
)
333 if (bus
->hosttype
!= BCMA_HOSTTYPE_PCI
)
336 if (bus
->host_is_pcie2
)
337 bcma_core_pcie2_up(&bus
->drv_pcie2
);
339 bcma_core_pci_up(&bus
->drv_pci
[0]);
341 EXPORT_SYMBOL_GPL(bcma_host_pci_up
);
343 /* See also pcicore_down */
344 void bcma_host_pci_down(struct bcma_bus
*bus
)
346 if (bus
->hosttype
!= BCMA_HOSTTYPE_PCI
)
349 if (!bus
->host_is_pcie2
)
350 bcma_core_pci_down(&bus
->drv_pci
[0]);
352 EXPORT_SYMBOL_GPL(bcma_host_pci_down
);
354 /* See also si_pci_setup */
355 int bcma_host_pci_irq_ctl(struct bcma_bus
*bus
, struct bcma_device
*core
,
358 struct pci_dev
*pdev
;
362 if (bus
->hosttype
!= BCMA_HOSTTYPE_PCI
) {
363 /* This bcma device is not on a PCI host-bus. So the IRQs are
364 * not routed through the PCI core.
365 * So we must not enable routing through the PCI core. */
369 pdev
= bus
->host_pci
;
371 err
= pci_read_config_dword(pdev
, BCMA_PCI_IRQMASK
, &tmp
);
375 coremask
= BIT(core
->core_index
) << 8;
381 err
= pci_write_config_dword(pdev
, BCMA_PCI_IRQMASK
, tmp
);
386 EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl
);