2 * IOMMU implementation for Cell Broadband Processor Architecture
4 * (C) Copyright IBM Corporation 2006-2008
6 * Author: Jeremy Kerr <jk@ozlabs.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
28 #include <linux/notifier.h>
30 #include <linux/of_platform.h>
33 #include <asm/iommu.h>
34 #include <asm/machdep.h>
35 #include <asm/pci-bridge.h>
38 #include <asm/firmware.h>
39 #include <asm/cell-regs.h>
41 #include "interrupt.h"
43 /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
44 * instead of leaving them mapped to some dummy page. This can be
45 * enabled once the appropriate workarounds for spider bugs have
48 #define CELL_IOMMU_REAL_UNMAP
50 /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
51 * IO PTEs based on the transfer direction. That can be enabled
52 * once spider-net has been fixed to pass the correct direction
53 * to the DMA mapping functions
55 #define CELL_IOMMU_STRICT_PROTECTION
60 /* IOC mmap registers */
61 #define IOC_Reg_Size 0x2000
63 #define IOC_IOPT_CacheInvd 0x908
64 #define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
65 #define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
66 #define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
68 #define IOC_IOST_Origin 0x918
69 #define IOC_IOST_Origin_E 0x8000000000000000ul
70 #define IOC_IOST_Origin_HW 0x0000000000000800ul
71 #define IOC_IOST_Origin_HL 0x0000000000000400ul
73 #define IOC_IO_ExcpStat 0x920
74 #define IOC_IO_ExcpStat_V 0x8000000000000000ul
75 #define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
76 #define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
77 #define IOC_IO_ExcpStat_SPF_P 0x4000000000000000ul
78 #define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
79 #define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
80 #define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
82 #define IOC_IO_ExcpMask 0x928
83 #define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
84 #define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
86 #define IOC_IOCmd_Offset 0x1000
88 #define IOC_IOCmd_Cfg 0xc00
89 #define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
92 /* Segment table entries */
93 #define IOSTE_V 0x8000000000000000ul /* valid */
94 #define IOSTE_H 0x4000000000000000ul /* cache hint */
95 #define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
96 #define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
97 #define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
98 #define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
99 #define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
100 #define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
101 #define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
103 /* Page table entries */
104 #define IOPTE_PP_W 0x8000000000000000ul /* protection: write */
105 #define IOPTE_PP_R 0x4000000000000000ul /* protection: read */
106 #define IOPTE_M 0x2000000000000000ul /* coherency required */
107 #define IOPTE_SO_R 0x1000000000000000ul /* ordering: writes */
108 #define IOPTE_SO_RW 0x1800000000000000ul /* ordering: r & w */
109 #define IOPTE_RPN_Mask 0x07fffffffffff000ul /* RPN */
110 #define IOPTE_H 0x0000000000000800ul /* cache hint */
111 #define IOPTE_IOID_Mask 0x00000000000007fful /* ioid */
115 #define IO_SEGMENT_SHIFT 28
116 #define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT)
118 /* The high bit needs to be set on every DMA address */
119 #define SPIDER_DMA_OFFSET 0x80000000ul
121 struct iommu_window
{
122 struct list_head list
;
123 struct cbe_iommu
*iommu
;
124 unsigned long offset
;
127 struct iommu_table table
;
134 void __iomem
*xlate_regs
;
135 void __iomem
*cmd_regs
;
139 struct list_head windows
;
142 /* Static array of iommus, one per node
143 * each contains a list of windows, keyed from dma_window property
144 * - on bus setup, look for a matching window, or create one
145 * - on dev setup, assign iommu_table ptr
147 static struct cbe_iommu iommus
[NR_IOMMUS
];
148 static int cbe_nr_iommus
;
150 static void invalidate_tce_cache(struct cbe_iommu
*iommu
, unsigned long *pte
,
153 unsigned long __iomem
*reg
;
157 reg
= iommu
->xlate_regs
+ IOC_IOPT_CacheInvd
;
160 /* we can invalidate up to 1 << 11 PTEs at once */
161 n
= min(n_ptes
, 1l << 11);
162 val
= (((n
/*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask
)
163 | (__pa(pte
) & IOC_IOPT_CacheInvd_IOPTE_Mask
)
164 | IOC_IOPT_CacheInvd_Busy
;
167 while (in_be64(reg
) & IOC_IOPT_CacheInvd_Busy
)
175 static void tce_build_cell(struct iommu_table
*tbl
, long index
, long npages
,
176 unsigned long uaddr
, enum dma_data_direction direction
)
179 unsigned long *io_pte
, base_pte
;
180 struct iommu_window
*window
=
181 container_of(tbl
, struct iommu_window
, table
);
183 /* implementing proper protection causes problems with the spidernet
184 * driver - check mapping directions later, but allow read & write by
186 #ifdef CELL_IOMMU_STRICT_PROTECTION
187 /* to avoid referencing a global, we use a trick here to setup the
188 * protection bit. "prot" is setup to be 3 fields of 4 bits apprended
189 * together for each of the 3 supported direction values. It is then
190 * shifted left so that the fields matching the desired direction
191 * lands on the appropriate bits, and other bits are masked out.
193 const unsigned long prot
= 0xc48;
195 ((prot
<< (52 + 4 * direction
)) & (IOPTE_PP_W
| IOPTE_PP_R
))
196 | IOPTE_M
| IOPTE_SO_RW
| (window
->ioid
& IOPTE_IOID_Mask
);
198 base_pte
= IOPTE_PP_W
| IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
|
199 (window
->ioid
& IOPTE_IOID_Mask
);
202 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- tbl
->it_offset
);
204 for (i
= 0; i
< npages
; i
++, uaddr
+= IOMMU_PAGE_SIZE
)
205 io_pte
[i
] = base_pte
| (__pa(uaddr
) & IOPTE_RPN_Mask
);
209 invalidate_tce_cache(window
->iommu
, io_pte
, npages
);
211 pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
212 index
, npages
, direction
, base_pte
);
215 static void tce_free_cell(struct iommu_table
*tbl
, long index
, long npages
)
219 unsigned long *io_pte
, pte
;
220 struct iommu_window
*window
=
221 container_of(tbl
, struct iommu_window
, table
);
223 pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index
, npages
);
225 #ifdef CELL_IOMMU_REAL_UNMAP
228 /* spider bridge does PCI reads after freeing - insert a mapping
229 * to a scratch page instead of an invalid entry */
230 pte
= IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
| __pa(window
->iommu
->pad_page
)
231 | (window
->ioid
& IOPTE_IOID_Mask
);
234 io_pte
= (unsigned long *)tbl
->it_base
+ (index
- tbl
->it_offset
);
236 for (i
= 0; i
< npages
; i
++)
241 invalidate_tce_cache(window
->iommu
, io_pte
, npages
);
244 static irqreturn_t
ioc_interrupt(int irq
, void *data
)
247 struct cbe_iommu
*iommu
= data
;
249 stat
= in_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
);
251 /* Might want to rate limit it */
252 printk(KERN_ERR
"iommu: DMA exception 0x%016lx\n", stat
);
253 printk(KERN_ERR
" V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
254 !!(stat
& IOC_IO_ExcpStat_V
),
255 (stat
& IOC_IO_ExcpStat_SPF_S
) ? 'S' : ' ',
256 (stat
& IOC_IO_ExcpStat_SPF_P
) ? 'P' : ' ',
257 (stat
& IOC_IO_ExcpStat_RW_Mask
) ? "Read" : "Write",
258 (unsigned int)(stat
& IOC_IO_ExcpStat_IOID_Mask
));
259 printk(KERN_ERR
" page=0x%016lx\n",
260 stat
& IOC_IO_ExcpStat_ADDR_Mask
);
262 /* clear interrupt */
263 stat
&= ~IOC_IO_ExcpStat_V
;
264 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
, stat
);
269 static int cell_iommu_find_ioc(int nid
, unsigned long *base
)
271 struct device_node
*np
;
276 /* First look for new style /be nodes */
277 for_each_node_by_name(np
, "ioc") {
278 if (of_node_to_nid(np
) != nid
)
280 if (of_address_to_resource(np
, 0, &r
)) {
281 printk(KERN_ERR
"iommu: can't get address for %s\n",
290 /* Ok, let's try the old way */
291 for_each_node_by_type(np
, "cpu") {
292 const unsigned int *nidp
;
293 const unsigned long *tmp
;
295 nidp
= of_get_property(np
, "node-id", NULL
);
296 if (nidp
&& *nidp
== nid
) {
297 tmp
= of_get_property(np
, "ioc-translation", NULL
);
309 static void cell_iommu_setup_page_tables(struct cbe_iommu
*iommu
,
310 unsigned long dbase
, unsigned long dsize
,
311 unsigned long fbase
, unsigned long fsize
)
315 unsigned long reg
, segments
, pages_per_segment
, ptab_size
, stab_size
,
320 base
= min(fbase
, dbase
);
322 segments
= max(dbase
+ dsize
, fbase
+ fsize
) >> IO_SEGMENT_SHIFT
;
323 pages_per_segment
= 1ull << IO_PAGENO_BITS
;
325 pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n",
326 __FUNCTION__
, iommu
->nid
, segments
, pages_per_segment
);
328 /* set up the segment table */
329 stab_size
= segments
* sizeof(unsigned long);
330 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, get_order(stab_size
));
332 iommu
->stab
= page_address(page
);
333 clear_page(iommu
->stab
);
335 /* ... and the page tables. Since these are contiguous, we can treat
336 * the page tables as one array of ptes, like pSeries does.
338 ptab_size
= segments
* pages_per_segment
* sizeof(unsigned long);
339 pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__
,
340 iommu
->nid
, ptab_size
, get_order(ptab_size
));
341 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, get_order(ptab_size
));
344 iommu
->ptab
= page_address(page
);
345 memset(iommu
->ptab
, 0, ptab_size
);
347 /* number of pages needed for a page table */
348 n_pte_pages
= (pages_per_segment
*
349 sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT
;
351 pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
352 __FUNCTION__
, iommu
->nid
, iommu
->stab
, iommu
->ptab
,
355 /* initialise the STEs */
356 reg
= IOSTE_V
| ((n_pte_pages
- 1) << 5);
358 if (IOMMU_PAGE_SIZE
== 0x1000)
360 else if (IOMMU_PAGE_SIZE
== 0x10000)
363 extern void __unknown_page_size_error(void);
364 __unknown_page_size_error();
367 pr_debug("Setting up IOMMU stab:\n");
368 for (i
= base
>> IO_SEGMENT_SHIFT
; i
< segments
; i
++) {
369 iommu
->stab
[i
] = reg
|
370 (__pa(iommu
->ptab
) + n_pte_pages
* IOMMU_PAGE_SIZE
* i
);
371 pr_debug("\t[%d] 0x%016lx\n", i
, iommu
->stab
[i
]);
375 static void cell_iommu_enable_hardware(struct cbe_iommu
*iommu
)
378 unsigned long reg
, xlate_base
;
381 if (cell_iommu_find_ioc(iommu
->nid
, &xlate_base
))
382 panic("%s: missing IOC register mappings for node %d\n",
383 __FUNCTION__
, iommu
->nid
);
385 iommu
->xlate_regs
= ioremap(xlate_base
, IOC_Reg_Size
);
386 iommu
->cmd_regs
= iommu
->xlate_regs
+ IOC_IOCmd_Offset
;
388 /* ensure that the STEs have updated */
391 /* setup interrupts for the iommu. */
392 reg
= in_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
);
393 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpStat
,
394 reg
& ~IOC_IO_ExcpStat_V
);
395 out_be64(iommu
->xlate_regs
+ IOC_IO_ExcpMask
,
396 IOC_IO_ExcpMask_PFE
| IOC_IO_ExcpMask_SFE
);
398 virq
= irq_create_mapping(NULL
,
399 IIC_IRQ_IOEX_ATI
| (iommu
->nid
<< IIC_IRQ_NODE_SHIFT
));
400 BUG_ON(virq
== NO_IRQ
);
402 ret
= request_irq(virq
, ioc_interrupt
, IRQF_DISABLED
,
406 /* set the IOC segment table origin register (and turn on the iommu) */
407 reg
= IOC_IOST_Origin_E
| __pa(iommu
->stab
) | IOC_IOST_Origin_HW
;
408 out_be64(iommu
->xlate_regs
+ IOC_IOST_Origin
, reg
);
409 in_be64(iommu
->xlate_regs
+ IOC_IOST_Origin
);
411 /* turn on IO translation */
412 reg
= in_be64(iommu
->cmd_regs
+ IOC_IOCmd_Cfg
) | IOC_IOCmd_Cfg_TE
;
413 out_be64(iommu
->cmd_regs
+ IOC_IOCmd_Cfg
, reg
);
416 static void cell_iommu_setup_hardware(struct cbe_iommu
*iommu
,
417 unsigned long base
, unsigned long size
)
419 cell_iommu_setup_page_tables(iommu
, base
, size
, 0, 0);
420 cell_iommu_enable_hardware(iommu
);
423 #if 0/* Unused for now */
424 static struct iommu_window
*find_window(struct cbe_iommu
*iommu
,
425 unsigned long offset
, unsigned long size
)
427 struct iommu_window
*window
;
429 /* todo: check for overlapping (but not equal) windows) */
431 list_for_each_entry(window
, &(iommu
->windows
), list
) {
432 if (window
->offset
== offset
&& window
->size
== size
)
440 static inline u32
cell_iommu_get_ioid(struct device_node
*np
)
444 ioid
= of_get_property(np
, "ioid", NULL
);
446 printk(KERN_WARNING
"iommu: missing ioid for %s using 0\n",
454 static struct iommu_window
* __init
455 cell_iommu_setup_window(struct cbe_iommu
*iommu
, struct device_node
*np
,
456 unsigned long offset
, unsigned long size
,
457 unsigned long pte_offset
)
459 struct iommu_window
*window
;
463 ioid
= cell_iommu_get_ioid(np
);
465 window
= kmalloc_node(sizeof(*window
), GFP_KERNEL
, iommu
->nid
);
466 BUG_ON(window
== NULL
);
468 window
->offset
= offset
;
471 window
->iommu
= iommu
;
473 window
->table
.it_blocksize
= 16;
474 window
->table
.it_base
= (unsigned long)iommu
->ptab
;
475 window
->table
.it_index
= iommu
->nid
;
476 window
->table
.it_offset
= (offset
>> IOMMU_PAGE_SHIFT
) + pte_offset
;
477 window
->table
.it_size
= size
>> IOMMU_PAGE_SHIFT
;
479 iommu_init_table(&window
->table
, iommu
->nid
);
481 pr_debug("\tioid %d\n", window
->ioid
);
482 pr_debug("\tblocksize %ld\n", window
->table
.it_blocksize
);
483 pr_debug("\tbase 0x%016lx\n", window
->table
.it_base
);
484 pr_debug("\toffset 0x%lx\n", window
->table
.it_offset
);
485 pr_debug("\tsize %ld\n", window
->table
.it_size
);
487 list_add(&window
->list
, &iommu
->windows
);
492 /* We need to map and reserve the first IOMMU page since it's used
493 * by the spider workaround. In theory, we only need to do that when
494 * running on spider but it doesn't really matter.
496 * This code also assumes that we have a window that starts at 0,
497 * which is the case on all spider based blades.
499 page
= alloc_pages_node(iommu
->nid
, GFP_KERNEL
, 0);
501 iommu
->pad_page
= page_address(page
);
502 clear_page(iommu
->pad_page
);
504 __set_bit(0, window
->table
.it_map
);
505 tce_build_cell(&window
->table
, window
->table
.it_offset
, 1,
506 (unsigned long)iommu
->pad_page
, DMA_TO_DEVICE
);
507 window
->table
.it_hint
= window
->table
.it_blocksize
;
512 static struct cbe_iommu
*cell_iommu_for_node(int nid
)
516 for (i
= 0; i
< cbe_nr_iommus
; i
++)
517 if (iommus
[i
].nid
== nid
)
522 static unsigned long cell_dma_direct_offset
;
524 static unsigned long dma_iommu_fixed_base
;
525 struct dma_mapping_ops dma_iommu_fixed_ops
;
527 static void cell_dma_dev_setup_iommu(struct device
*dev
)
529 struct iommu_window
*window
;
530 struct cbe_iommu
*iommu
;
531 struct dev_archdata
*archdata
= &dev
->archdata
;
533 /* Current implementation uses the first window available in that
534 * node's iommu. We -might- do something smarter later though it may
537 iommu
= cell_iommu_for_node(archdata
->numa_node
);
538 if (iommu
== NULL
|| list_empty(&iommu
->windows
)) {
539 printk(KERN_ERR
"iommu: missing iommu for %s (node %d)\n",
540 archdata
->of_node
? archdata
->of_node
->full_name
: "?",
541 archdata
->numa_node
);
544 window
= list_entry(iommu
->windows
.next
, struct iommu_window
, list
);
546 archdata
->dma_data
= &window
->table
;
549 static void cell_dma_dev_setup_fixed(struct device
*dev
);
551 static void cell_dma_dev_setup(struct device
*dev
)
553 struct dev_archdata
*archdata
= &dev
->archdata
;
555 /* Order is important here, these are not mutually exclusive */
556 if (get_dma_ops(dev
) == &dma_iommu_fixed_ops
)
557 cell_dma_dev_setup_fixed(dev
);
558 else if (get_pci_dma_ops() == &dma_iommu_ops
)
559 cell_dma_dev_setup_iommu(dev
);
560 else if (get_pci_dma_ops() == &dma_direct_ops
)
561 archdata
->dma_data
= (void *)cell_dma_direct_offset
;
566 static void cell_pci_dma_dev_setup(struct pci_dev
*dev
)
568 cell_dma_dev_setup(&dev
->dev
);
571 static int cell_of_bus_notify(struct notifier_block
*nb
, unsigned long action
,
574 struct device
*dev
= data
;
576 /* We are only intereted in device addition */
577 if (action
!= BUS_NOTIFY_ADD_DEVICE
)
580 /* We use the PCI DMA ops */
581 dev
->archdata
.dma_ops
= get_pci_dma_ops();
583 cell_dma_dev_setup(dev
);
588 static struct notifier_block cell_of_bus_notifier
= {
589 .notifier_call
= cell_of_bus_notify
592 static int __init
cell_iommu_get_window(struct device_node
*np
,
596 const void *dma_window
;
599 /* Use ibm,dma-window if available, else, hard code ! */
600 dma_window
= of_get_property(np
, "ibm,dma-window", NULL
);
601 if (dma_window
== NULL
) {
607 of_parse_dma_window(np
, dma_window
, &index
, base
, size
);
611 static struct cbe_iommu
* __init
cell_iommu_alloc(struct device_node
*np
)
613 struct cbe_iommu
*iommu
;
617 nid
= of_node_to_nid(np
);
619 printk(KERN_ERR
"iommu: failed to get node for %s\n",
623 pr_debug("iommu: setting up iommu for node %d (%s)\n",
626 /* XXX todo: If we can have multiple windows on the same IOMMU, which
627 * isn't the case today, we probably want here to check wether the
628 * iommu for that node is already setup.
629 * However, there might be issue with getting the size right so let's
630 * ignore that for now. We might want to completely get rid of the
631 * multiple window support since the cell iommu supports per-page ioids
634 if (cbe_nr_iommus
>= NR_IOMMUS
) {
635 printk(KERN_ERR
"iommu: too many IOMMUs detected ! (%s)\n",
640 /* Init base fields */
645 snprintf(iommu
->name
, sizeof(iommu
->name
), "iommu%d", i
);
646 INIT_LIST_HEAD(&iommu
->windows
);
651 static void __init
cell_iommu_init_one(struct device_node
*np
,
652 unsigned long offset
)
654 struct cbe_iommu
*iommu
;
655 unsigned long base
, size
;
657 iommu
= cell_iommu_alloc(np
);
661 /* Obtain a window for it */
662 cell_iommu_get_window(np
, &base
, &size
);
664 pr_debug("\ttranslating window 0x%lx...0x%lx\n",
665 base
, base
+ size
- 1);
667 /* Initialize the hardware */
668 cell_iommu_setup_hardware(iommu
, base
, size
);
670 /* Setup the iommu_table */
671 cell_iommu_setup_window(iommu
, np
, base
, size
,
672 offset
>> IOMMU_PAGE_SHIFT
);
675 static void __init
cell_disable_iommus(void)
678 unsigned long base
, val
;
679 void __iomem
*xregs
, *cregs
;
681 /* Make sure IOC translation is disabled on all nodes */
682 for_each_online_node(node
) {
683 if (cell_iommu_find_ioc(node
, &base
))
685 xregs
= ioremap(base
, IOC_Reg_Size
);
688 cregs
= xregs
+ IOC_IOCmd_Offset
;
690 pr_debug("iommu: cleaning up iommu on node %d\n", node
);
692 out_be64(xregs
+ IOC_IOST_Origin
, 0);
693 (void)in_be64(xregs
+ IOC_IOST_Origin
);
694 val
= in_be64(cregs
+ IOC_IOCmd_Cfg
);
695 val
&= ~IOC_IOCmd_Cfg_TE
;
696 out_be64(cregs
+ IOC_IOCmd_Cfg
, val
);
697 (void)in_be64(cregs
+ IOC_IOCmd_Cfg
);
703 static int __init
cell_iommu_init_disabled(void)
705 struct device_node
*np
= NULL
;
706 unsigned long base
= 0, size
;
708 /* When no iommu is present, we use direct DMA ops */
709 set_pci_dma_ops(&dma_direct_ops
);
711 /* First make sure all IOC translation is turned off */
712 cell_disable_iommus();
714 /* If we have no Axon, we set up the spider DMA magic offset */
715 if (of_find_node_by_name(NULL
, "axon") == NULL
)
716 cell_dma_direct_offset
= SPIDER_DMA_OFFSET
;
718 /* Now we need to check to see where the memory is mapped
719 * in PCI space. We assume that all busses use the same dma
720 * window which is always the case so far on Cell, thus we
721 * pick up the first pci-internal node we can find and check
722 * the DMA window from there.
724 for_each_node_by_name(np
, "axon") {
725 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
727 if (cell_iommu_get_window(np
, &base
, &size
) == 0)
731 for_each_node_by_name(np
, "pci-internal") {
732 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
734 if (cell_iommu_get_window(np
, &base
, &size
) == 0)
740 /* If we found a DMA window, we check if it's big enough to enclose
741 * all of physical memory. If not, we force enable IOMMU
743 if (np
&& size
< lmb_end_of_DRAM()) {
744 printk(KERN_WARNING
"iommu: force-enabled, dma window"
745 " (%ldMB) smaller than total memory (%ldMB)\n",
746 size
>> 20, lmb_end_of_DRAM() >> 20);
750 cell_dma_direct_offset
+= base
;
752 if (cell_dma_direct_offset
!= 0)
753 ppc_md
.pci_dma_dev_setup
= cell_pci_dma_dev_setup
;
755 printk("iommu: disabled, direct DMA offset is 0x%lx\n",
756 cell_dma_direct_offset
);
762 * Fixed IOMMU mapping support
764 * This code adds support for setting up a fixed IOMMU mapping on certain
765 * cell machines. For 64-bit devices this avoids the performance overhead of
766 * mapping and unmapping pages at runtime. 32-bit devices are unable to use
769 * The fixed mapping is established at boot, and maps all of physical memory
770 * 1:1 into device space at some offset. On machines with < 30 GB of memory
771 * we setup the fixed mapping immediately above the normal IOMMU window.
773 * For example a machine with 4GB of memory would end up with the normal
774 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
775 * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
776 * 3GB, plus any offset required by firmware. The firmware offset is encoded
777 * in the "dma-ranges" property.
779 * On machines with 30GB or more of memory, we are unable to place the fixed
780 * mapping above the normal IOMMU window as we would run out of address space.
781 * Instead we move the normal IOMMU window to coincide with the hash page
782 * table, this region does not need to be part of the fixed mapping as no
783 * device should ever be DMA'ing to it. We then setup the fixed mapping
787 static u64
cell_iommu_get_fixed_address(struct device
*dev
)
789 u64 cpu_addr
, size
, best_size
, pci_addr
= OF_BAD_ADDR
;
790 struct device_node
*np
;
791 const u32
*ranges
= NULL
;
794 np
= of_node_get(dev
->archdata
.of_node
);
796 ranges
= of_get_property(np
, "dma-ranges", &len
);
799 np
= of_get_next_parent(np
);
803 dev_dbg(dev
, "iommu: no dma-ranges found\n");
809 /* dma-ranges format:
811 * 2 cells: pci address
812 * 2 cells: parent address
815 for (i
= 0, best
= -1, best_size
= 0; i
< len
; i
+= 7) {
816 cpu_addr
= of_translate_dma_address(np
, ranges
+i
+ 3);
817 size
= of_read_number(ranges
+ i
+ 5, 2);
819 if (cpu_addr
== 0 && size
> best_size
) {
826 pci_addr
= of_read_number(ranges
+ best
+ 1, 2);
828 dev_dbg(dev
, "iommu: no suitable range found!\n");
836 static int dma_set_mask_and_switch(struct device
*dev
, u64 dma_mask
)
838 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
841 if (dma_mask
== DMA_BIT_MASK(64) &&
842 cell_iommu_get_fixed_address(dev
) != OF_BAD_ADDR
)
844 dev_dbg(dev
, "iommu: 64-bit OK, using fixed ops\n");
845 set_dma_ops(dev
, &dma_iommu_fixed_ops
);
847 dev_dbg(dev
, "iommu: not 64-bit, using default ops\n");
848 set_dma_ops(dev
, get_pci_dma_ops());
851 cell_dma_dev_setup(dev
);
853 *dev
->dma_mask
= dma_mask
;
858 static void cell_dma_dev_setup_fixed(struct device
*dev
)
860 struct dev_archdata
*archdata
= &dev
->archdata
;
863 addr
= cell_iommu_get_fixed_address(dev
) + dma_iommu_fixed_base
;
864 archdata
->dma_data
= (void *)addr
;
866 dev_dbg(dev
, "iommu: fixed addr = %lx\n", addr
);
869 static void cell_iommu_setup_fixed_ptab(struct cbe_iommu
*iommu
,
870 struct device_node
*np
, unsigned long dbase
, unsigned long dsize
,
871 unsigned long fbase
, unsigned long fsize
)
873 unsigned long base_pte
, uaddr
, *io_pte
;
876 dma_iommu_fixed_base
= fbase
;
878 /* convert from bytes into page table indices */
879 dbase
= dbase
>> IOMMU_PAGE_SHIFT
;
880 dsize
= dsize
>> IOMMU_PAGE_SHIFT
;
881 fbase
= fbase
>> IOMMU_PAGE_SHIFT
;
882 fsize
= fsize
>> IOMMU_PAGE_SHIFT
;
884 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize
, fbase
);
886 io_pte
= iommu
->ptab
;
887 base_pte
= IOPTE_PP_W
| IOPTE_PP_R
| IOPTE_M
| IOPTE_SO_RW
888 | (cell_iommu_get_ioid(np
) & IOPTE_IOID_Mask
);
891 for (i
= fbase
; i
< fbase
+ fsize
; i
++, uaddr
+= IOMMU_PAGE_SIZE
) {
892 /* Don't touch the dynamic region */
893 if (i
>= dbase
&& i
< (dbase
+ dsize
)) {
894 pr_debug("iommu: fixed/dynamic overlap, skipping\n");
897 io_pte
[i
] = base_pte
| (__pa(uaddr
) & IOPTE_RPN_Mask
);
903 static int __init
cell_iommu_fixed_mapping_init(void)
905 unsigned long dbase
, dsize
, fbase
, fsize
, hbase
, hend
;
906 struct cbe_iommu
*iommu
;
907 struct device_node
*np
;
909 /* The fixed mapping is only supported on axon machines */
910 np
= of_find_node_by_name(NULL
, "axon");
912 pr_debug("iommu: fixed mapping disabled, no axons found\n");
916 /* We must have dma-ranges properties for fixed mapping to work */
917 for (np
= NULL
; (np
= of_find_all_nodes(np
));) {
918 if (of_find_property(np
, "dma-ranges", NULL
))
924 pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
928 /* The default setup is to have the fixed mapping sit after the
929 * dynamic region, so find the top of the largest IOMMU window
930 * on any axon, then add the size of RAM and that's our max value.
931 * If that is > 32GB we have to do other shennanigans.
934 for_each_node_by_name(np
, "axon") {
935 cell_iommu_get_window(np
, &dbase
, &dsize
);
936 fbase
= max(fbase
, dbase
+ dsize
);
939 fbase
= _ALIGN_UP(fbase
, 1 << IO_SEGMENT_SHIFT
);
940 fsize
= lmb_phys_mem_size();
942 if ((fbase
+ fsize
) <= 0x800000000)
943 hbase
= 0; /* use the device tree window */
945 /* If we're over 32 GB we need to cheat. We can't map all of
946 * RAM with the fixed mapping, and also fit the dynamic
947 * region. So try to place the dynamic region where the hash
948 * table sits, drivers never need to DMA to it, we don't
949 * need a fixed mapping for that area.
952 pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
955 hbase
= __pa(htab_address
);
956 hend
= hbase
+ htab_size_bytes
;
958 /* The window must start and end on a segment boundary */
959 if ((hbase
!= _ALIGN_UP(hbase
, 1 << IO_SEGMENT_SHIFT
)) ||
960 (hend
!= _ALIGN_UP(hend
, 1 << IO_SEGMENT_SHIFT
))) {
961 pr_debug("iommu: hash window not segment aligned\n");
965 /* Check the hash window fits inside the real DMA window */
966 for_each_node_by_name(np
, "axon") {
967 cell_iommu_get_window(np
, &dbase
, &dsize
);
969 if (hbase
< dbase
|| (hend
> (dbase
+ dsize
))) {
970 pr_debug("iommu: hash window doesn't fit in"
971 "real DMA window\n");
979 /* Setup the dynamic regions */
980 for_each_node_by_name(np
, "axon") {
981 iommu
= cell_iommu_alloc(np
);
985 cell_iommu_get_window(np
, &dbase
, &dsize
);
988 dsize
= htab_size_bytes
;
991 printk(KERN_DEBUG
"iommu: node %d, dynamic window 0x%lx-0x%lx "
992 "fixed window 0x%lx-0x%lx\n", iommu
->nid
, dbase
,
993 dbase
+ dsize
, fbase
, fbase
+ fsize
);
995 cell_iommu_setup_page_tables(iommu
, dbase
, dsize
, fbase
, fsize
);
996 cell_iommu_setup_fixed_ptab(iommu
, np
, dbase
, dsize
,
998 cell_iommu_enable_hardware(iommu
);
999 cell_iommu_setup_window(iommu
, np
, dbase
, dsize
, 0);
1002 dma_iommu_fixed_ops
= dma_direct_ops
;
1003 dma_iommu_fixed_ops
.set_dma_mask
= dma_set_mask_and_switch
;
1005 dma_iommu_ops
.set_dma_mask
= dma_set_mask_and_switch
;
1006 set_pci_dma_ops(&dma_iommu_ops
);
1011 static int iommu_fixed_disabled
;
1013 static int __init
setup_iommu_fixed(char *str
)
1015 if (strcmp(str
, "off") == 0)
1016 iommu_fixed_disabled
= 1;
1020 __setup("iommu_fixed=", setup_iommu_fixed
);
1022 static int __init
cell_iommu_init(void)
1024 struct device_node
*np
;
1026 /* If IOMMU is disabled or we have little enough RAM to not need
1027 * to enable it, we setup a direct mapping.
1029 * Note: should we make sure we have the IOMMU actually disabled ?
1032 (!iommu_force_on
&& lmb_end_of_DRAM() <= 0x80000000ull
))
1033 if (cell_iommu_init_disabled() == 0)
1036 /* Setup various ppc_md. callbacks */
1037 ppc_md
.pci_dma_dev_setup
= cell_pci_dma_dev_setup
;
1038 ppc_md
.tce_build
= tce_build_cell
;
1039 ppc_md
.tce_free
= tce_free_cell
;
1041 if (!iommu_fixed_disabled
&& cell_iommu_fixed_mapping_init() == 0)
1044 /* Create an iommu for each /axon node. */
1045 for_each_node_by_name(np
, "axon") {
1046 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
1048 cell_iommu_init_one(np
, 0);
1051 /* Create an iommu for each toplevel /pci-internal node for
1052 * old hardware/firmware
1054 for_each_node_by_name(np
, "pci-internal") {
1055 if (np
->parent
== NULL
|| np
->parent
->parent
!= NULL
)
1057 cell_iommu_init_one(np
, SPIDER_DMA_OFFSET
);
1060 /* Setup default PCI iommu ops */
1061 set_pci_dma_ops(&dma_iommu_ops
);
1064 /* Register callbacks on OF platform device addition/removal
1065 * to handle linking them to the right DMA operations
1067 bus_register_notifier(&of_platform_bus_type
, &cell_of_bus_notifier
);
1071 machine_arch_initcall(cell
, cell_iommu_init
);
1072 machine_arch_initcall(celleb_native
, cell_iommu_init
);