IB: Fix RCU lockdep splats
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / powerpc / platforms / powernv / pci.c
blob85bb66d7f93354edcf09e342cabe5438885b4a82
1 /*
2 * Support PCI/PCIe on PowerNV platforms
4 * Currently supports only P5IOC2
6 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/bootmem.h>
20 #include <linux/irq.h>
21 #include <linux/io.h>
22 #include <linux/msi.h>
24 #include <asm/sections.h>
25 #include <asm/io.h>
26 #include <asm/prom.h>
27 #include <asm/pci-bridge.h>
28 #include <asm/machdep.h>
29 #include <asm/ppc-pci.h>
30 #include <asm/opal.h>
31 #include <asm/iommu.h>
32 #include <asm/tce.h>
33 #include <asm/abs_addr.h>
35 #include "powernv.h"
36 #include "pci.h"
38 /* Delay in usec */
39 #define PCI_RESET_DELAY_US 3000000
41 #define cfg_dbg(fmt...) do { } while(0)
42 //#define cfg_dbg(fmt...) printk(fmt)
44 #ifdef CONFIG_PCI_MSI
45 static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
47 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
48 struct pnv_phb *phb = hose->private_data;
50 return (phb && phb->msi_map) ? 0 : -ENODEV;
53 static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
55 unsigned int id;
57 spin_lock(&phb->lock);
58 id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
59 if (id >= phb->msi_count && phb->msi_next)
60 id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
61 if (id >= phb->msi_count) {
62 spin_unlock(&phb->lock);
63 return 0;
65 __set_bit(id, phb->msi_map);
66 spin_unlock(&phb->lock);
67 return id + phb->msi_base;
70 static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
72 unsigned int id;
74 if (WARN_ON(hwirq < phb->msi_base ||
75 hwirq >= (phb->msi_base + phb->msi_count)))
76 return;
77 id = hwirq - phb->msi_base;
78 spin_lock(&phb->lock);
79 __clear_bit(id, phb->msi_map);
80 spin_unlock(&phb->lock);
83 static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
85 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
86 struct pnv_phb *phb = hose->private_data;
87 struct msi_desc *entry;
88 struct msi_msg msg;
89 unsigned int hwirq, virq;
90 int rc;
92 if (WARN_ON(!phb))
93 return -ENODEV;
95 list_for_each_entry(entry, &pdev->msi_list, list) {
96 if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
97 pr_warn("%s: Supports only 64-bit MSIs\n",
98 pci_name(pdev));
99 return -ENXIO;
101 hwirq = pnv_get_one_msi(phb);
102 if (!hwirq) {
103 pr_warn("%s: Failed to find a free MSI\n",
104 pci_name(pdev));
105 return -ENOSPC;
107 virq = irq_create_mapping(NULL, hwirq);
108 if (virq == NO_IRQ) {
109 pr_warn("%s: Failed to map MSI to linux irq\n",
110 pci_name(pdev));
111 pnv_put_msi(phb, hwirq);
112 return -ENOMEM;
114 rc = phb->msi_setup(phb, pdev, hwirq, entry->msi_attrib.is_64,
115 &msg);
116 if (rc) {
117 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
118 irq_dispose_mapping(virq);
119 pnv_put_msi(phb, hwirq);
120 return rc;
122 irq_set_msi_desc(virq, entry);
123 write_msi_msg(virq, &msg);
125 return 0;
128 static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
130 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
131 struct pnv_phb *phb = hose->private_data;
132 struct msi_desc *entry;
134 if (WARN_ON(!phb))
135 return;
137 list_for_each_entry(entry, &pdev->msi_list, list) {
138 if (entry->irq == NO_IRQ)
139 continue;
140 irq_set_msi_desc(entry->irq, NULL);
141 pnv_put_msi(phb, virq_to_hw(entry->irq));
142 irq_dispose_mapping(entry->irq);
145 #endif /* CONFIG_PCI_MSI */
147 static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus,
148 u32 bdfn)
150 s64 rc;
151 u8 fstate;
152 u16 pcierr;
153 u32 pe_no;
155 /* Get PE# if we support IODA */
156 pe_no = phb->bdfn_to_pe ? phb->bdfn_to_pe(phb, bus, bdfn & 0xff) : 0;
158 /* Read freeze status */
159 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr,
160 NULL);
161 if (rc) {
162 pr_warning("PCI %d: Failed to read EEH status for PE#%d,"
163 " err %lld\n", phb->hose->global_number, pe_no, rc);
164 return;
166 cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n",
167 bdfn, pe_no, fstate);
168 if (fstate != 0) {
169 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
170 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
171 if (rc) {
172 pr_warning("PCI %d: Failed to clear EEH freeze state"
173 " for PE#%d, err %lld\n",
174 phb->hose->global_number, pe_no, rc);
179 static int pnv_pci_read_config(struct pci_bus *bus,
180 unsigned int devfn,
181 int where, int size, u32 *val)
183 struct pci_controller *hose = pci_bus_to_host(bus);
184 struct pnv_phb *phb = hose->private_data;
185 u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
186 s64 rc;
188 if (hose == NULL)
189 return PCIBIOS_DEVICE_NOT_FOUND;
191 switch (size) {
192 case 1: {
193 u8 v8;
194 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
195 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
196 break;
198 case 2: {
199 u16 v16;
200 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
201 &v16);
202 *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff;
203 break;
205 case 4: {
206 u32 v32;
207 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
208 *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff;
209 break;
211 default:
212 return PCIBIOS_FUNC_NOT_SUPPORTED;
214 cfg_dbg("pnv_pci_read_config bus: %x devfn: %x +%x/%x -> %08x\n",
215 bus->number, devfn, where, size, *val);
217 /* Check if the PHB got frozen due to an error (no response) */
218 pnv_pci_config_check_eeh(phb, bus, bdfn);
220 return PCIBIOS_SUCCESSFUL;
223 static int pnv_pci_write_config(struct pci_bus *bus,
224 unsigned int devfn,
225 int where, int size, u32 val)
227 struct pci_controller *hose = pci_bus_to_host(bus);
228 struct pnv_phb *phb = hose->private_data;
229 u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
231 if (hose == NULL)
232 return PCIBIOS_DEVICE_NOT_FOUND;
234 cfg_dbg("pnv_pci_write_config bus: %x devfn: %x +%x/%x -> %08x\n",
235 bus->number, devfn, where, size, val);
236 switch (size) {
237 case 1:
238 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
239 break;
240 case 2:
241 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
242 break;
243 case 4:
244 opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
245 break;
246 default:
247 return PCIBIOS_FUNC_NOT_SUPPORTED;
249 /* Check if the PHB got frozen due to an error (no response) */
250 pnv_pci_config_check_eeh(phb, bus, bdfn);
252 return PCIBIOS_SUCCESSFUL;
255 struct pci_ops pnv_pci_ops = {
256 .read = pnv_pci_read_config,
257 .write = pnv_pci_write_config,
260 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
261 unsigned long uaddr, enum dma_data_direction direction,
262 struct dma_attrs *attrs)
264 u64 proto_tce;
265 u64 *tcep;
266 u64 rpn;
268 proto_tce = TCE_PCI_READ; // Read allowed
270 if (direction != DMA_TO_DEVICE)
271 proto_tce |= TCE_PCI_WRITE;
273 tcep = ((u64 *)tbl->it_base) + index;
275 while (npages--) {
276 /* can't move this out since we might cross LMB boundary */
277 rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
278 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
280 uaddr += TCE_PAGE_SIZE;
281 tcep++;
283 return 0;
286 static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
288 u64 *tcep = ((u64 *)tbl->it_base) + index;
290 while (npages--)
291 *(tcep++) = 0;
294 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
295 void *tce_mem, u64 tce_size,
296 u64 dma_offset)
298 tbl->it_blocksize = 16;
299 tbl->it_base = (unsigned long)tce_mem;
300 tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT;
301 tbl->it_index = 0;
302 tbl->it_size = tce_size >> 3;
303 tbl->it_busno = 0;
304 tbl->it_type = TCE_PCI;
307 static struct iommu_table * __devinit
308 pnv_pci_setup_bml_iommu(struct pci_controller *hose)
310 struct iommu_table *tbl;
311 const __be64 *basep;
312 const __be32 *sizep;
314 basep = of_get_property(hose->dn, "linux,tce-base", NULL);
315 sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
316 if (basep == NULL || sizep == NULL) {
317 pr_err("PCI: %s has missing tce entries !\n", hose->dn->full_name);
318 return NULL;
320 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
321 if (WARN_ON(!tbl))
322 return NULL;
323 pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
324 be32_to_cpup(sizep), 0);
325 iommu_init_table(tbl, hose->node);
326 return tbl;
329 static void __devinit pnv_pci_dma_fallback_setup(struct pci_controller *hose,
330 struct pci_dev *pdev)
332 struct device_node *np = pci_bus_to_OF_node(hose->bus);
333 struct pci_dn *pdn;
335 if (np == NULL)
336 return;
337 pdn = PCI_DN(np);
338 if (!pdn->iommu_table)
339 pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
340 if (!pdn->iommu_table)
341 return;
342 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
345 static void __devinit pnv_pci_dma_dev_setup(struct pci_dev *pdev)
347 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
348 struct pnv_phb *phb = hose->private_data;
350 /* If we have no phb structure, try to setup a fallback based on
351 * the device-tree (RTAS PCI for example)
353 if (phb && phb->dma_dev_setup)
354 phb->dma_dev_setup(phb, pdev);
355 else
356 pnv_pci_dma_fallback_setup(hose, pdev);
359 static int pnv_pci_probe_mode(struct pci_bus *bus)
361 struct pci_controller *hose = pci_bus_to_host(bus);
362 const __be64 *tstamp;
363 u64 now, target;
366 /* We hijack this as a way to ensure we have waited long
367 * enough since the reset was lifted on the PCI bus
369 if (bus != hose->bus)
370 return PCI_PROBE_NORMAL;
371 tstamp = of_get_property(hose->dn, "reset-clear-timestamp", NULL);
372 if (!tstamp || !*tstamp)
373 return PCI_PROBE_NORMAL;
375 now = mftb() / tb_ticks_per_usec;
376 target = (be64_to_cpup(tstamp) / tb_ticks_per_usec)
377 + PCI_RESET_DELAY_US;
379 pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n",
380 hose->global_number, target, now);
382 if (now < target)
383 msleep((target - now + 999) / 1000);
385 return PCI_PROBE_NORMAL;
388 void __init pnv_pci_init(void)
390 struct device_node *np;
392 pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
394 /* We do not want to just probe */
395 pci_probe_only = 0;
397 /* OPAL absent, try POPAL first then RTAS detection of PHBs */
398 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
399 #ifdef CONFIG_PPC_POWERNV_RTAS
400 init_pci_config_tokens();
401 find_and_init_phbs();
402 #endif /* CONFIG_PPC_POWERNV_RTAS */
403 } else {
404 /* OPAL is here, do our normal stuff */
406 /* Look for p5ioc2 IO-Hubs */
407 for_each_compatible_node(np, NULL, "ibm,p5ioc2")
408 pnv_pci_init_p5ioc2_hub(np);
411 /* Setup the linkage between OF nodes and PHBs */
412 pci_devs_phb_init();
414 /* Configure IOMMU DMA hooks */
415 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
416 ppc_md.tce_build = pnv_tce_build;
417 ppc_md.tce_free = pnv_tce_free;
418 ppc_md.pci_probe_mode = pnv_pci_probe_mode;
419 set_pci_dma_ops(&dma_iommu_ops);
421 /* Configure MSIs */
422 #ifdef CONFIG_PCI_MSI
423 ppc_md.msi_check_device = pnv_msi_check_device;
424 ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
425 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
426 #endif