initial commit with v2.6.9
[linux-2.6.9-moxart.git] / arch / ppc64 / kernel / pSeries_iommu.c
blobabbbe282333d6f1538e4086de0757b7495e36243
1 /*
2 * arch/ppc64/kernel/pSeries_iommu.c
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
6 * Rewrite, cleanup:
8 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
10 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/config.h>
29 #include <linux/init.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/mm.h>
33 #include <linux/spinlock.h>
34 #include <linux/string.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <asm/io.h>
38 #include <asm/prom.h>
39 #include <asm/rtas.h>
40 #include <asm/ppcdebug.h>
41 #include <asm/iommu.h>
42 #include <asm/pci-bridge.h>
43 #include <asm/machdep.h>
44 #include <asm/abs_addr.h>
45 #include <asm/plpar_wrappers.h>
46 #include "pci.h"
49 static void tce_build_pSeries(struct iommu_table *tbl, long index,
50 long npages, unsigned long uaddr,
51 enum dma_data_direction direction)
53 union tce_entry t;
54 union tce_entry *tp;
56 t.te_word = 0;
57 t.te_rdwr = 1; // Read allowed
59 if (direction != DMA_TO_DEVICE)
60 t.te_pciwr = 1;
62 tp = ((union tce_entry *)tbl->it_base) + index;
64 while (npages--) {
65 /* can't move this out since we might cross LMB boundary */
66 t.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
68 tp->te_word = t.te_word;
70 uaddr += PAGE_SIZE;
71 tp++;
76 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
78 union tce_entry t;
79 union tce_entry *tp;
81 t.te_word = 0;
82 tp = ((union tce_entry *)tbl->it_base) + index;
84 while (npages--) {
85 tp->te_word = t.te_word;
87 tp++;
92 static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
93 long npages, unsigned long uaddr,
94 enum dma_data_direction direction)
96 u64 rc;
97 union tce_entry tce;
99 tce.te_word = 0;
100 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
101 tce.te_rdwr = 1;
102 if (direction != DMA_TO_DEVICE)
103 tce.te_pciwr = 1;
105 while (npages--) {
106 rc = plpar_tce_put((u64)tbl->it_index,
107 (u64)tcenum << 12,
108 tce.te_word );
110 if (rc && printk_ratelimit()) {
111 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
112 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
113 printk("\ttcenum = 0x%lx\n", (u64)tcenum);
114 printk("\ttce val = 0x%lx\n", tce.te_word );
115 show_stack(current, (unsigned long *)__get_SP());
118 tcenum++;
119 tce.te_rpn++;
123 DEFINE_PER_CPU(void *, tce_page) = NULL;
125 static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
126 long npages, unsigned long uaddr,
127 enum dma_data_direction direction)
129 u64 rc;
130 union tce_entry tce, *tcep;
131 long l, limit;
133 if (npages == 1)
134 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
135 direction);
137 tcep = __get_cpu_var(tce_page);
139 /* This is safe to do since interrupts are off when we're called
140 * from iommu_alloc{,_sg}()
142 if (!tcep) {
143 tcep = (void *)__get_free_page(GFP_ATOMIC);
144 /* If allocation fails, fall back to the loop implementation */
145 if (!tcep)
146 return tce_build_pSeriesLP(tbl, tcenum, npages,
147 uaddr, direction);
148 __get_cpu_var(tce_page) = tcep;
151 tce.te_word = 0;
152 tce.te_rpn = (virt_to_abs(uaddr)) >> PAGE_SHIFT;
153 tce.te_rdwr = 1;
154 if (direction != DMA_TO_DEVICE)
155 tce.te_pciwr = 1;
157 /* We can map max one pageful of TCEs at a time */
158 do {
160 * Set up the page with TCE data, looping through and setting
161 * the values.
163 limit = min_t(long, npages, PAGE_SIZE/sizeof(union tce_entry));
165 for (l = 0; l < limit; l++) {
166 tcep[l] = tce;
167 tce.te_rpn++;
170 rc = plpar_tce_put_indirect((u64)tbl->it_index,
171 (u64)tcenum << 12,
172 (u64)virt_to_abs(tcep),
173 limit);
175 npages -= limit;
176 tcenum += limit;
177 } while (npages > 0 && !rc);
179 if (rc && printk_ratelimit()) {
180 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
181 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
182 printk("\tnpages = 0x%lx\n", (u64)npages);
183 printk("\ttce[0] val = 0x%lx\n", tcep[0].te_word);
184 show_stack(current, (unsigned long *)__get_SP());
188 static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
190 u64 rc;
191 union tce_entry tce;
193 tce.te_word = 0;
195 while (npages--) {
196 rc = plpar_tce_put((u64)tbl->it_index,
197 (u64)tcenum << 12,
198 tce.te_word);
200 if (rc && printk_ratelimit()) {
201 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%ld\n", rc);
202 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
203 printk("\ttcenum = 0x%lx\n", (u64)tcenum);
204 printk("\ttce val = 0x%lx\n", tce.te_word );
205 show_stack(current, (unsigned long *)__get_SP());
208 tcenum++;
213 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
215 u64 rc;
216 union tce_entry tce;
218 tce.te_word = 0;
220 rc = plpar_tce_stuff((u64)tbl->it_index,
221 (u64)tcenum << 12,
222 tce.te_word,
223 npages);
225 if (rc && printk_ratelimit()) {
226 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
227 printk("\trc = %ld\n", rc);
228 printk("\tindex = 0x%lx\n", (u64)tbl->it_index);
229 printk("\tnpages = 0x%lx\n", (u64)npages);
230 printk("\ttce val = 0x%lx\n", tce.te_word );
231 show_stack(current, (unsigned long *)__get_SP());
236 static void iommu_buses_init(void)
238 struct pci_controller *phb, *tmp;
239 struct device_node *dn, *first_dn;
240 int num_slots, num_slots_ilog2;
241 int first_phb = 1;
242 unsigned long tcetable_ilog2;
245 * We default to a TCE table that maps 2GB (4MB table, 22 bits),
246 * however some machines have a 3GB IO hole and for these we
247 * create a table that maps 1GB (2MB table, 21 bits)
249 if (io_hole_start < 0x80000000UL)
250 tcetable_ilog2 = 21;
251 else
252 tcetable_ilog2 = 22;
254 /* XXX Should we be using pci_root_buses instead? -ojn
257 list_for_each_entry_safe(phb, tmp, &hose_list, list_node) {
258 first_dn = ((struct device_node *)phb->arch_data)->child;
260 /* Carve 2GB into the largest dma_window_size possible */
261 for (dn = first_dn, num_slots = 0; dn != NULL; dn = dn->sibling)
262 num_slots++;
263 num_slots_ilog2 = __ilog2(num_slots);
265 if ((1<<num_slots_ilog2) != num_slots)
266 num_slots_ilog2++;
268 phb->dma_window_size = 1 << (tcetable_ilog2 - num_slots_ilog2);
270 /* Reserve 16MB of DMA space on the first PHB.
271 * We should probably be more careful and use firmware props.
272 * In reality this space is remapped, not lost. But we don't
273 * want to get that smart to handle it -- too much work.
275 phb->dma_window_base_cur = first_phb ? (1 << 12) : 0;
276 first_phb = 0;
278 for (dn = first_dn; dn != NULL; dn = dn->sibling)
279 iommu_devnode_init(dn);
284 static void iommu_buses_init_lpar(struct list_head *bus_list)
286 struct list_head *ln;
287 struct pci_bus *bus;
288 struct device_node *busdn;
289 unsigned int *dma_window;
291 for (ln=bus_list->next; ln != bus_list; ln=ln->next) {
292 bus = pci_bus_b(ln);
293 busdn = PCI_GET_DN(bus);
295 dma_window = (unsigned int *)get_property(busdn, "ibm,dma-window", NULL);
296 if (dma_window) {
297 /* Bussubno hasn't been copied yet.
298 * Do it now because iommu_table_setparms_lpar needs it.
300 busdn->bussubno = bus->number;
301 iommu_devnode_init(busdn);
304 /* look for a window on a bridge even if the PHB had one */
305 iommu_buses_init_lpar(&bus->children);
310 static void iommu_table_setparms(struct pci_controller *phb,
311 struct device_node *dn,
312 struct iommu_table *tbl)
314 struct device_node *node;
315 unsigned long *basep;
316 unsigned int *sizep;
318 node = (struct device_node *)phb->arch_data;
320 if (get_property(node, "linux,has-tce-table", NULL) == NULL) {
321 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has no tce table !\n",
322 dn->full_name);
323 return;
325 basep = (unsigned long *)get_property(node, "linux,tce-base", NULL);
326 sizep = (unsigned int *)get_property(node, "linux,tce-size", NULL);
327 if (basep == NULL || sizep == NULL) {
328 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %s has missing tce"
329 " entries !\n", dn->full_name);
330 return;
332 memset((void *)(*basep), 0, *sizep);
334 tbl->it_busno = phb->bus->number;
336 /* Units of tce entries */
337 tbl->it_offset = phb->dma_window_base_cur;
339 /* Adjust the current table offset to the next
340 * region. Measured in TCE entries. Force an
341 * alignment to the size allotted per IOA. This
342 * makes it easier to remove the 1st 16MB.
344 phb->dma_window_base_cur += (phb->dma_window_size>>3);
345 phb->dma_window_base_cur &=
346 ~((phb->dma_window_size>>3)-1);
348 /* Set the tce table size - measured in pages */
349 tbl->it_size = ((phb->dma_window_base_cur -
350 tbl->it_offset) << 3) >> PAGE_SHIFT;
352 /* Test if we are going over 2GB of DMA space */
353 if (phb->dma_window_base_cur > (1 << 19))
354 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
356 tbl->it_base = *basep;
357 tbl->it_index = 0;
358 tbl->it_entrysize = sizeof(union tce_entry);
359 tbl->it_blocksize = 16;
360 tbl->it_type = TCE_PCI;
364 * iommu_table_setparms_lpar
366 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
368 * ToDo: properly interpret the ibm,dma-window property. The definition is:
369 * logical-bus-number (1 word)
370 * phys-address (#address-cells words)
371 * size (#cell-size words)
373 * Currently we hard code these sizes (more or less).
375 static void iommu_table_setparms_lpar(struct pci_controller *phb,
376 struct device_node *dn,
377 struct iommu_table *tbl)
379 unsigned int *dma_window;
381 dma_window = (unsigned int *)get_property(dn, "ibm,dma-window", NULL);
383 if (!dma_window)
384 panic("iommu_table_setparms_lpar: device %s has no"
385 " ibm,dma-window property!\n", dn->full_name);
387 tbl->it_busno = dn->bussubno;
388 tbl->it_size = (((((unsigned long)dma_window[4] << 32) |
389 (unsigned long)dma_window[5]) >> PAGE_SHIFT) << 3) >> PAGE_SHIFT;
390 tbl->it_offset = ((((unsigned long)dma_window[2] << 32) |
391 (unsigned long)dma_window[3]) >> 12);
392 tbl->it_base = 0;
393 tbl->it_index = dma_window[0];
394 tbl->it_entrysize = sizeof(union tce_entry);
395 tbl->it_blocksize = 16;
396 tbl->it_type = TCE_PCI;
400 void iommu_devnode_init(struct device_node *dn)
402 struct iommu_table *tbl;
404 tbl = (struct iommu_table *)kmalloc(sizeof(struct iommu_table),
405 GFP_KERNEL);
407 if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
408 iommu_table_setparms_lpar(dn->phb, dn, tbl);
409 else
410 iommu_table_setparms(dn->phb, dn, tbl);
412 dn->iommu_table = iommu_init_table(tbl);
416 void iommu_setup_pSeries(void)
418 struct pci_dev *dev = NULL;
419 struct device_node *dn, *mydn;
421 if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
422 iommu_buses_init_lpar(&pci_root_buses);
423 else
424 iommu_buses_init();
426 /* Now copy the iommu_table ptr from the bus devices down to every
427 * pci device_node. This means get_iommu_table() won't need to search
428 * up the device tree to find it.
430 while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
431 mydn = dn = PCI_GET_DN(dev);
433 while (dn && dn->iommu_table == NULL)
434 dn = dn->parent;
435 if (dn)
436 mydn->iommu_table = dn->iommu_table;
441 /* These are called very early. */
442 void tce_init_pSeries(void)
444 if (!(systemcfg->platform & PLATFORM_LPAR)) {
445 ppc_md.tce_build = tce_build_pSeries;
446 ppc_md.tce_free = tce_free_pSeries;
447 } else if (cur_cpu_spec->firmware_features & FW_FEATURE_MULTITCE) {
448 ppc_md.tce_build = tce_buildmulti_pSeriesLP;
449 ppc_md.tce_free = tce_freemulti_pSeriesLP;
450 } else {
451 ppc_md.tce_build = tce_build_pSeriesLP;
452 ppc_md.tce_free = tce_free_pSeriesLP;
455 pci_iommu_init();