2 * Derived from arch/powerpc/kernel/iommu.c
4 * Copyright IBM Corporation, 2006-2007
5 * Copyright (C) 2006 Jon Mason <jdmason@kudzu.us>
7 * Author: Jon Mason <jdmason@kudzu.us>
8 * Author: Muli Ben-Yehuda <muli@il.ibm.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/types.h>
28 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 #include <linux/crash_dump.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitmap.h>
35 #include <linux/pci_ids.h>
36 #include <linux/pci.h>
37 #include <linux/delay.h>
38 #include <linux/scatterlist.h>
39 #include <linux/iommu-helper.h>
41 #include <asm/iommu.h>
42 #include <asm/calgary.h>
44 #include <asm/pci-direct.h>
47 #include <asm/bios_ebda.h>
48 #include <asm/x86_init.h>
49 #include <asm/iommu_table.h>
51 #ifdef CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT
52 int use_calgary __read_mostly
= 1;
54 int use_calgary __read_mostly
= 0;
55 #endif /* CONFIG_CALGARY_DEFAULT_ENABLED */
57 #define PCI_DEVICE_ID_IBM_CALGARY 0x02a1
58 #define PCI_DEVICE_ID_IBM_CALIOC2 0x0308
60 /* register offsets inside the host bridge space */
61 #define CALGARY_CONFIG_REG 0x0108
62 #define PHB_CSR_OFFSET 0x0110 /* Channel Status */
63 #define PHB_PLSSR_OFFSET 0x0120
64 #define PHB_CONFIG_RW_OFFSET 0x0160
65 #define PHB_IOBASE_BAR_LOW 0x0170
66 #define PHB_IOBASE_BAR_HIGH 0x0180
67 #define PHB_MEM_1_LOW 0x0190
68 #define PHB_MEM_1_HIGH 0x01A0
69 #define PHB_IO_ADDR_SIZE 0x01B0
70 #define PHB_MEM_1_SIZE 0x01C0
71 #define PHB_MEM_ST_OFFSET 0x01D0
72 #define PHB_AER_OFFSET 0x0200
73 #define PHB_CONFIG_0_HIGH 0x0220
74 #define PHB_CONFIG_0_LOW 0x0230
75 #define PHB_CONFIG_0_END 0x0240
76 #define PHB_MEM_2_LOW 0x02B0
77 #define PHB_MEM_2_HIGH 0x02C0
78 #define PHB_MEM_2_SIZE_HIGH 0x02D0
79 #define PHB_MEM_2_SIZE_LOW 0x02E0
80 #define PHB_DOSHOLE_OFFSET 0x08E0
82 /* CalIOC2 specific */
83 #define PHB_SAVIOR_L2 0x0DB0
84 #define PHB_PAGE_MIG_CTRL 0x0DA8
85 #define PHB_PAGE_MIG_DEBUG 0x0DA0
86 #define PHB_ROOT_COMPLEX_STATUS 0x0CB0
89 #define PHB_TCE_ENABLE 0x20000000
90 #define PHB_SLOT_DISABLE 0x1C000000
91 #define PHB_DAC_DISABLE 0x01000000
92 #define PHB_MEM2_ENABLE 0x00400000
93 #define PHB_MCSR_ENABLE 0x00100000
94 /* TAR (Table Address Register) */
95 #define TAR_SW_BITS 0x0000ffffffff800fUL
96 #define TAR_VALID 0x0000000000000008UL
97 /* CSR (Channel/DMA Status Register) */
98 #define CSR_AGENT_MASK 0xffe0ffff
99 /* CCR (Calgary Configuration Register) */
100 #define CCR_2SEC_TIMEOUT 0x000000000000000EUL
101 /* PMCR/PMDR (Page Migration Control/Debug Registers */
102 #define PMR_SOFTSTOP 0x80000000
103 #define PMR_SOFTSTOPFAULT 0x40000000
104 #define PMR_HARDSTOP 0x20000000
107 * The maximum PHB bus number.
108 * x3950M2 (rare): 8 chassis, 48 PHBs per chassis = 384
109 * x3950M2: 4 chassis, 48 PHBs per chassis = 192
110 * x3950 (PCIE): 8 chassis, 32 PHBs per chassis = 256
111 * x3950 (PCIX): 8 chassis, 16 PHBs per chassis = 128
113 #define MAX_PHB_BUS_NUM 256
115 #define PHBS_PER_CALGARY 4
117 /* register offsets in Calgary's internal register space */
118 static const unsigned long tar_offsets
[] = {
125 static const unsigned long split_queue_offsets
[] = {
126 0x4870 /* SPLIT QUEUE 0 */,
127 0x5870 /* SPLIT QUEUE 1 */,
128 0x6870 /* SPLIT QUEUE 2 */,
129 0x7870 /* SPLIT QUEUE 3 */
132 static const unsigned long phb_offsets
[] = {
139 /* PHB debug registers */
141 static const unsigned long phb_debug_offsets
[] = {
142 0x4000 /* PHB 0 DEBUG */,
143 0x5000 /* PHB 1 DEBUG */,
144 0x6000 /* PHB 2 DEBUG */,
145 0x7000 /* PHB 3 DEBUG */
149 * STUFF register for each debug PHB,
150 * byte 1 = start bus number, byte 2 = end bus number
153 #define PHB_DEBUG_STUFF_OFFSET 0x0020
155 #define EMERGENCY_PAGES 32 /* = 128KB */
157 unsigned int specified_table_size
= TCE_TABLE_SIZE_UNSPECIFIED
;
158 static int translate_empty_slots __read_mostly
= 0;
159 static int calgary_detected __read_mostly
= 0;
161 static struct rio_table_hdr
*rio_table_hdr __initdata
;
162 static struct scal_detail
*scal_devs
[MAX_NUMNODES
] __initdata
;
163 static struct rio_detail
*rio_devs
[MAX_NUMNODES
* 4] __initdata
;
165 struct calgary_bus_info
{
167 unsigned char translation_disabled
;
172 static void calgary_handle_quirks(struct iommu_table
*tbl
, struct pci_dev
*dev
);
173 static void calgary_tce_cache_blast(struct iommu_table
*tbl
);
174 static void calgary_dump_error_regs(struct iommu_table
*tbl
);
175 static void calioc2_handle_quirks(struct iommu_table
*tbl
, struct pci_dev
*dev
);
176 static void calioc2_tce_cache_blast(struct iommu_table
*tbl
);
177 static void calioc2_dump_error_regs(struct iommu_table
*tbl
);
178 static void calgary_init_bitmap_from_tce_table(struct iommu_table
*tbl
);
179 static void get_tce_space_from_tar(void);
181 static struct cal_chipset_ops calgary_chip_ops
= {
182 .handle_quirks
= calgary_handle_quirks
,
183 .tce_cache_blast
= calgary_tce_cache_blast
,
184 .dump_error_regs
= calgary_dump_error_regs
187 static struct cal_chipset_ops calioc2_chip_ops
= {
188 .handle_quirks
= calioc2_handle_quirks
,
189 .tce_cache_blast
= calioc2_tce_cache_blast
,
190 .dump_error_regs
= calioc2_dump_error_regs
193 static struct calgary_bus_info bus_info
[MAX_PHB_BUS_NUM
] = { { NULL
, 0, 0 }, };
195 static inline int translation_enabled(struct iommu_table
*tbl
)
197 /* only PHBs with translation enabled have an IOMMU table */
198 return (tbl
!= NULL
);
201 static void iommu_range_reserve(struct iommu_table
*tbl
,
202 unsigned long start_addr
, unsigned int npages
)
208 index
= start_addr
>> PAGE_SHIFT
;
210 /* bail out if we're asked to reserve a region we don't cover */
211 if (index
>= tbl
->it_size
)
214 end
= index
+ npages
;
215 if (end
> tbl
->it_size
) /* don't go off the table */
218 spin_lock_irqsave(&tbl
->it_lock
, flags
);
220 bitmap_set(tbl
->it_map
, index
, npages
);
222 spin_unlock_irqrestore(&tbl
->it_lock
, flags
);
225 static unsigned long iommu_range_alloc(struct device
*dev
,
226 struct iommu_table
*tbl
,
230 unsigned long offset
;
231 unsigned long boundary_size
;
233 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
234 PAGE_SIZE
) >> PAGE_SHIFT
;
238 spin_lock_irqsave(&tbl
->it_lock
, flags
);
240 offset
= iommu_area_alloc(tbl
->it_map
, tbl
->it_size
, tbl
->it_hint
,
241 npages
, 0, boundary_size
, 0);
242 if (offset
== ~0UL) {
243 tbl
->chip_ops
->tce_cache_blast(tbl
);
245 offset
= iommu_area_alloc(tbl
->it_map
, tbl
->it_size
, 0,
246 npages
, 0, boundary_size
, 0);
247 if (offset
== ~0UL) {
248 printk(KERN_WARNING
"Calgary: IOMMU full.\n");
249 spin_unlock_irqrestore(&tbl
->it_lock
, flags
);
250 if (panic_on_overflow
)
251 panic("Calgary: fix the allocator.\n");
253 return DMA_ERROR_CODE
;
257 tbl
->it_hint
= offset
+ npages
;
258 BUG_ON(tbl
->it_hint
> tbl
->it_size
);
260 spin_unlock_irqrestore(&tbl
->it_lock
, flags
);
265 static dma_addr_t
iommu_alloc(struct device
*dev
, struct iommu_table
*tbl
,
266 void *vaddr
, unsigned int npages
, int direction
)
271 entry
= iommu_range_alloc(dev
, tbl
, npages
);
273 if (unlikely(entry
== DMA_ERROR_CODE
)) {
274 printk(KERN_WARNING
"Calgary: failed to allocate %u pages in "
275 "iommu %p\n", npages
, tbl
);
276 return DMA_ERROR_CODE
;
279 /* set the return dma address */
280 ret
= (entry
<< PAGE_SHIFT
) | ((unsigned long)vaddr
& ~PAGE_MASK
);
282 /* put the TCEs in the HW table */
283 tce_build(tbl
, entry
, npages
, (unsigned long)vaddr
& PAGE_MASK
,
288 static void iommu_free(struct iommu_table
*tbl
, dma_addr_t dma_addr
,
292 unsigned long badend
;
295 /* were we called with bad_dma_address? */
296 badend
= DMA_ERROR_CODE
+ (EMERGENCY_PAGES
* PAGE_SIZE
);
297 if (unlikely((dma_addr
>= DMA_ERROR_CODE
) && (dma_addr
< badend
))) {
298 WARN(1, KERN_ERR
"Calgary: driver tried unmapping bad DMA "
299 "address 0x%Lx\n", dma_addr
);
303 entry
= dma_addr
>> PAGE_SHIFT
;
305 BUG_ON(entry
+ npages
> tbl
->it_size
);
307 tce_free(tbl
, entry
, npages
);
309 spin_lock_irqsave(&tbl
->it_lock
, flags
);
311 bitmap_clear(tbl
->it_map
, entry
, npages
);
313 spin_unlock_irqrestore(&tbl
->it_lock
, flags
);
316 static inline struct iommu_table
*find_iommu_table(struct device
*dev
)
318 struct pci_dev
*pdev
;
319 struct pci_bus
*pbus
;
320 struct iommu_table
*tbl
;
322 pdev
= to_pci_dev(dev
);
324 /* search up the device tree for an iommu */
327 tbl
= pci_iommu(pbus
);
328 if (tbl
&& tbl
->it_busno
== pbus
->number
)
334 BUG_ON(tbl
&& (tbl
->it_busno
!= pbus
->number
));
339 static void calgary_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
340 int nelems
,enum dma_data_direction dir
,
341 struct dma_attrs
*attrs
)
343 struct iommu_table
*tbl
= find_iommu_table(dev
);
344 struct scatterlist
*s
;
347 if (!translation_enabled(tbl
))
350 for_each_sg(sglist
, s
, nelems
, i
) {
352 dma_addr_t dma
= s
->dma_address
;
353 unsigned int dmalen
= s
->dma_length
;
358 npages
= iommu_num_pages(dma
, dmalen
, PAGE_SIZE
);
359 iommu_free(tbl
, dma
, npages
);
363 static int calgary_map_sg(struct device
*dev
, struct scatterlist
*sg
,
364 int nelems
, enum dma_data_direction dir
,
365 struct dma_attrs
*attrs
)
367 struct iommu_table
*tbl
= find_iommu_table(dev
);
368 struct scatterlist
*s
;
374 for_each_sg(sg
, s
, nelems
, i
) {
377 vaddr
= (unsigned long) sg_virt(s
);
378 npages
= iommu_num_pages(vaddr
, s
->length
, PAGE_SIZE
);
380 entry
= iommu_range_alloc(dev
, tbl
, npages
);
381 if (entry
== DMA_ERROR_CODE
) {
382 /* makes sure unmap knows to stop */
387 s
->dma_address
= (entry
<< PAGE_SHIFT
) | s
->offset
;
389 /* insert into HW table */
390 tce_build(tbl
, entry
, npages
, vaddr
& PAGE_MASK
, dir
);
392 s
->dma_length
= s
->length
;
397 calgary_unmap_sg(dev
, sg
, nelems
, dir
, NULL
);
398 for_each_sg(sg
, s
, nelems
, i
) {
399 sg
->dma_address
= DMA_ERROR_CODE
;
405 static dma_addr_t
calgary_map_page(struct device
*dev
, struct page
*page
,
406 unsigned long offset
, size_t size
,
407 enum dma_data_direction dir
,
408 struct dma_attrs
*attrs
)
410 void *vaddr
= page_address(page
) + offset
;
413 struct iommu_table
*tbl
= find_iommu_table(dev
);
415 uaddr
= (unsigned long)vaddr
;
416 npages
= iommu_num_pages(uaddr
, size
, PAGE_SIZE
);
418 return iommu_alloc(dev
, tbl
, vaddr
, npages
, dir
);
421 static void calgary_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
422 size_t size
, enum dma_data_direction dir
,
423 struct dma_attrs
*attrs
)
425 struct iommu_table
*tbl
= find_iommu_table(dev
);
428 npages
= iommu_num_pages(dma_addr
, size
, PAGE_SIZE
);
429 iommu_free(tbl
, dma_addr
, npages
);
432 static void* calgary_alloc_coherent(struct device
*dev
, size_t size
,
433 dma_addr_t
*dma_handle
, gfp_t flag
, struct dma_attrs
*attrs
)
437 unsigned int npages
, order
;
438 struct iommu_table
*tbl
= find_iommu_table(dev
);
440 size
= PAGE_ALIGN(size
); /* size rounded up to full pages */
441 npages
= size
>> PAGE_SHIFT
;
442 order
= get_order(size
);
444 flag
&= ~(__GFP_DMA
| __GFP_HIGHMEM
| __GFP_DMA32
);
446 /* alloc enough pages (and possibly more) */
447 ret
= (void *)__get_free_pages(flag
, order
);
450 memset(ret
, 0, size
);
452 /* set up tces to cover the allocated range */
453 mapping
= iommu_alloc(dev
, tbl
, ret
, npages
, DMA_BIDIRECTIONAL
);
454 if (mapping
== DMA_ERROR_CODE
)
456 *dma_handle
= mapping
;
459 free_pages((unsigned long)ret
, get_order(size
));
465 static void calgary_free_coherent(struct device
*dev
, size_t size
,
466 void *vaddr
, dma_addr_t dma_handle
,
467 struct dma_attrs
*attrs
)
470 struct iommu_table
*tbl
= find_iommu_table(dev
);
472 size
= PAGE_ALIGN(size
);
473 npages
= size
>> PAGE_SHIFT
;
475 iommu_free(tbl
, dma_handle
, npages
);
476 free_pages((unsigned long)vaddr
, get_order(size
));
479 static struct dma_map_ops calgary_dma_ops
= {
480 .alloc
= calgary_alloc_coherent
,
481 .free
= calgary_free_coherent
,
482 .map_sg
= calgary_map_sg
,
483 .unmap_sg
= calgary_unmap_sg
,
484 .map_page
= calgary_map_page
,
485 .unmap_page
= calgary_unmap_page
,
488 static inline void __iomem
* busno_to_bbar(unsigned char num
)
490 return bus_info
[num
].bbar
;
493 static inline int busno_to_phbid(unsigned char num
)
495 return bus_info
[num
].phbid
;
498 static inline unsigned long split_queue_offset(unsigned char num
)
500 size_t idx
= busno_to_phbid(num
);
502 return split_queue_offsets
[idx
];
505 static inline unsigned long tar_offset(unsigned char num
)
507 size_t idx
= busno_to_phbid(num
);
509 return tar_offsets
[idx
];
512 static inline unsigned long phb_offset(unsigned char num
)
514 size_t idx
= busno_to_phbid(num
);
516 return phb_offsets
[idx
];
519 static inline void __iomem
* calgary_reg(void __iomem
*bar
, unsigned long offset
)
521 unsigned long target
= ((unsigned long)bar
) | offset
;
522 return (void __iomem
*)target
;
525 static inline int is_calioc2(unsigned short device
)
527 return (device
== PCI_DEVICE_ID_IBM_CALIOC2
);
530 static inline int is_calgary(unsigned short device
)
532 return (device
== PCI_DEVICE_ID_IBM_CALGARY
);
535 static inline int is_cal_pci_dev(unsigned short device
)
537 return (is_calgary(device
) || is_calioc2(device
));
540 static void calgary_tce_cache_blast(struct iommu_table
*tbl
)
545 void __iomem
*bbar
= tbl
->bbar
;
546 void __iomem
*target
;
548 /* disable arbitration on the bus */
549 target
= calgary_reg(bbar
, phb_offset(tbl
->it_busno
) | PHB_AER_OFFSET
);
553 /* read plssr to ensure it got there */
554 target
= calgary_reg(bbar
, phb_offset(tbl
->it_busno
) | PHB_PLSSR_OFFSET
);
557 /* poll split queues until all DMA activity is done */
558 target
= calgary_reg(bbar
, split_queue_offset(tbl
->it_busno
));
562 } while ((val
& 0xff) != 0xff && i
< 100);
564 printk(KERN_WARNING
"Calgary: PCI bus not quiesced, "
565 "continuing anyway\n");
567 /* invalidate TCE cache */
568 target
= calgary_reg(bbar
, tar_offset(tbl
->it_busno
));
569 writeq(tbl
->tar_val
, target
);
571 /* enable arbitration */
572 target
= calgary_reg(bbar
, phb_offset(tbl
->it_busno
) | PHB_AER_OFFSET
);
574 (void)readl(target
); /* flush */
577 static void calioc2_tce_cache_blast(struct iommu_table
*tbl
)
579 void __iomem
*bbar
= tbl
->bbar
;
580 void __iomem
*target
;
585 unsigned char bus
= tbl
->it_busno
;
588 printk(KERN_DEBUG
"Calgary: CalIOC2 bus 0x%x entering tce cache blast "
589 "sequence - count %d\n", bus
, count
);
591 /* 1. using the Page Migration Control reg set SoftStop */
592 target
= calgary_reg(bbar
, phb_offset(bus
) | PHB_PAGE_MIG_CTRL
);
593 val
= be32_to_cpu(readl(target
));
594 printk(KERN_DEBUG
"1a. read 0x%x [LE] from %p\n", val
, target
);
596 printk(KERN_DEBUG
"1b. writing 0x%x [LE] to %p\n", val
, target
);
597 writel(cpu_to_be32(val
), target
);
599 /* 2. poll split queues until all DMA activity is done */
600 printk(KERN_DEBUG
"2a. starting to poll split queues\n");
601 target
= calgary_reg(bbar
, split_queue_offset(bus
));
603 val64
= readq(target
);
605 } while ((val64
& 0xff) != 0xff && i
< 100);
607 printk(KERN_WARNING
"CalIOC2: PCI bus not quiesced, "
608 "continuing anyway\n");
610 /* 3. poll Page Migration DEBUG for SoftStopFault */
611 target
= calgary_reg(bbar
, phb_offset(bus
) | PHB_PAGE_MIG_DEBUG
);
612 val
= be32_to_cpu(readl(target
));
613 printk(KERN_DEBUG
"3. read 0x%x [LE] from %p\n", val
, target
);
615 /* 4. if SoftStopFault - goto (1) */
616 if (val
& PMR_SOFTSTOPFAULT
) {
620 printk(KERN_WARNING
"CalIOC2: too many SoftStopFaults, "
621 "aborting TCE cache flush sequence!\n");
622 return; /* pray for the best */
626 /* 5. Slam into HardStop by reading PHB_PAGE_MIG_CTRL */
627 target
= calgary_reg(bbar
, phb_offset(bus
) | PHB_PAGE_MIG_CTRL
);
628 printk(KERN_DEBUG
"5a. slamming into HardStop by reading %p\n", target
);
629 val
= be32_to_cpu(readl(target
));
630 printk(KERN_DEBUG
"5b. read 0x%x [LE] from %p\n", val
, target
);
631 target
= calgary_reg(bbar
, phb_offset(bus
) | PHB_PAGE_MIG_DEBUG
);
632 val
= be32_to_cpu(readl(target
));
633 printk(KERN_DEBUG
"5c. read 0x%x [LE] from %p (debug)\n", val
, target
);
635 /* 6. invalidate TCE cache */
636 printk(KERN_DEBUG
"6. invalidating TCE cache\n");
637 target
= calgary_reg(bbar
, tar_offset(bus
));
638 writeq(tbl
->tar_val
, target
);
640 /* 7. Re-read PMCR */
641 printk(KERN_DEBUG
"7a. Re-reading PMCR\n");
642 target
= calgary_reg(bbar
, phb_offset(bus
) | PHB_PAGE_MIG_CTRL
);
643 val
= be32_to_cpu(readl(target
));
644 printk(KERN_DEBUG
"7b. read 0x%x [LE] from %p\n", val
, target
);
646 /* 8. Remove HardStop */
647 printk(KERN_DEBUG
"8a. removing HardStop from PMCR\n");
648 target
= calgary_reg(bbar
, phb_offset(bus
) | PHB_PAGE_MIG_CTRL
);
650 printk(KERN_DEBUG
"8b. writing 0x%x [LE] to %p\n", val
, target
);
651 writel(cpu_to_be32(val
), target
);
652 val
= be32_to_cpu(readl(target
));
653 printk(KERN_DEBUG
"8c. read 0x%x [LE] from %p\n", val
, target
);
656 static void __init
calgary_reserve_mem_region(struct pci_dev
*dev
, u64 start
,
659 unsigned int numpages
;
661 limit
= limit
| 0xfffff;
664 numpages
= ((limit
- start
) >> PAGE_SHIFT
);
665 iommu_range_reserve(pci_iommu(dev
->bus
), start
, numpages
);
668 static void __init
calgary_reserve_peripheral_mem_1(struct pci_dev
*dev
)
670 void __iomem
*target
;
671 u64 low
, high
, sizelow
;
673 struct iommu_table
*tbl
= pci_iommu(dev
->bus
);
674 unsigned char busnum
= dev
->bus
->number
;
675 void __iomem
*bbar
= tbl
->bbar
;
677 /* peripheral MEM_1 region */
678 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_MEM_1_LOW
);
679 low
= be32_to_cpu(readl(target
));
680 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_MEM_1_HIGH
);
681 high
= be32_to_cpu(readl(target
));
682 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_MEM_1_SIZE
);
683 sizelow
= be32_to_cpu(readl(target
));
685 start
= (high
<< 32) | low
;
688 calgary_reserve_mem_region(dev
, start
, limit
);
691 static void __init
calgary_reserve_peripheral_mem_2(struct pci_dev
*dev
)
693 void __iomem
*target
;
695 u64 low
, high
, sizelow
, sizehigh
;
697 struct iommu_table
*tbl
= pci_iommu(dev
->bus
);
698 unsigned char busnum
= dev
->bus
->number
;
699 void __iomem
*bbar
= tbl
->bbar
;
702 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_CONFIG_RW_OFFSET
);
703 val32
= be32_to_cpu(readl(target
));
704 if (!(val32
& PHB_MEM2_ENABLE
))
707 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_MEM_2_LOW
);
708 low
= be32_to_cpu(readl(target
));
709 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_MEM_2_HIGH
);
710 high
= be32_to_cpu(readl(target
));
711 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_MEM_2_SIZE_LOW
);
712 sizelow
= be32_to_cpu(readl(target
));
713 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_MEM_2_SIZE_HIGH
);
714 sizehigh
= be32_to_cpu(readl(target
));
716 start
= (high
<< 32) | low
;
717 limit
= (sizehigh
<< 32) | sizelow
;
719 calgary_reserve_mem_region(dev
, start
, limit
);
723 * some regions of the IO address space do not get translated, so we
724 * must not give devices IO addresses in those regions. The regions
725 * are the 640KB-1MB region and the two PCI peripheral memory holes.
726 * Reserve all of them in the IOMMU bitmap to avoid giving them out
729 static void __init
calgary_reserve_regions(struct pci_dev
*dev
)
733 struct iommu_table
*tbl
= pci_iommu(dev
->bus
);
735 /* reserve EMERGENCY_PAGES from bad_dma_address and up */
736 iommu_range_reserve(tbl
, DMA_ERROR_CODE
, EMERGENCY_PAGES
);
738 /* avoid the BIOS/VGA first 640KB-1MB region */
739 /* for CalIOC2 - avoid the entire first MB */
740 if (is_calgary(dev
->device
)) {
741 start
= (640 * 1024);
742 npages
= ((1024 - 640) * 1024) >> PAGE_SHIFT
;
743 } else { /* calioc2 */
745 npages
= (1 * 1024 * 1024) >> PAGE_SHIFT
;
747 iommu_range_reserve(tbl
, start
, npages
);
749 /* reserve the two PCI peripheral memory regions in IO space */
750 calgary_reserve_peripheral_mem_1(dev
);
751 calgary_reserve_peripheral_mem_2(dev
);
754 static int __init
calgary_setup_tar(struct pci_dev
*dev
, void __iomem
*bbar
)
758 void __iomem
*target
;
760 struct iommu_table
*tbl
;
762 /* build TCE tables for each PHB */
763 ret
= build_tce_table(dev
, bbar
);
767 tbl
= pci_iommu(dev
->bus
);
768 tbl
->it_base
= (unsigned long)bus_info
[dev
->bus
->number
].tce_space
;
770 if (is_kdump_kernel())
771 calgary_init_bitmap_from_tce_table(tbl
);
773 tce_free(tbl
, 0, tbl
->it_size
);
775 if (is_calgary(dev
->device
))
776 tbl
->chip_ops
= &calgary_chip_ops
;
777 else if (is_calioc2(dev
->device
))
778 tbl
->chip_ops
= &calioc2_chip_ops
;
782 calgary_reserve_regions(dev
);
784 /* set TARs for each PHB */
785 target
= calgary_reg(bbar
, tar_offset(dev
->bus
->number
));
786 val64
= be64_to_cpu(readq(target
));
788 /* zero out all TAR bits under sw control */
789 val64
&= ~TAR_SW_BITS
;
790 table_phys
= (u64
)__pa(tbl
->it_base
);
794 BUG_ON(specified_table_size
> TCE_TABLE_SIZE_8M
);
795 val64
|= (u64
) specified_table_size
;
797 tbl
->tar_val
= cpu_to_be64(val64
);
799 writeq(tbl
->tar_val
, target
);
800 readq(target
); /* flush */
805 static void __init
calgary_free_bus(struct pci_dev
*dev
)
808 struct iommu_table
*tbl
= pci_iommu(dev
->bus
);
809 void __iomem
*target
;
810 unsigned int bitmapsz
;
812 target
= calgary_reg(tbl
->bbar
, tar_offset(dev
->bus
->number
));
813 val64
= be64_to_cpu(readq(target
));
814 val64
&= ~TAR_SW_BITS
;
815 writeq(cpu_to_be64(val64
), target
);
816 readq(target
); /* flush */
818 bitmapsz
= tbl
->it_size
/ BITS_PER_BYTE
;
819 free_pages((unsigned long)tbl
->it_map
, get_order(bitmapsz
));
824 set_pci_iommu(dev
->bus
, NULL
);
826 /* Can't free bootmem allocated memory after system is up :-( */
827 bus_info
[dev
->bus
->number
].tce_space
= NULL
;
830 static void calgary_dump_error_regs(struct iommu_table
*tbl
)
832 void __iomem
*bbar
= tbl
->bbar
;
833 void __iomem
*target
;
836 target
= calgary_reg(bbar
, phb_offset(tbl
->it_busno
) | PHB_CSR_OFFSET
);
837 csr
= be32_to_cpu(readl(target
));
839 target
= calgary_reg(bbar
, phb_offset(tbl
->it_busno
) | PHB_PLSSR_OFFSET
);
840 plssr
= be32_to_cpu(readl(target
));
842 /* If no error, the agent ID in the CSR is not valid */
843 printk(KERN_EMERG
"Calgary: DMA error on Calgary PHB 0x%x, "
844 "0x%08x@CSR 0x%08x@PLSSR\n", tbl
->it_busno
, csr
, plssr
);
847 static void calioc2_dump_error_regs(struct iommu_table
*tbl
)
849 void __iomem
*bbar
= tbl
->bbar
;
850 u32 csr
, csmr
, plssr
, mck
, rcstat
;
851 void __iomem
*target
;
852 unsigned long phboff
= phb_offset(tbl
->it_busno
);
853 unsigned long erroff
;
858 target
= calgary_reg(bbar
, phboff
| PHB_CSR_OFFSET
);
859 csr
= be32_to_cpu(readl(target
));
861 target
= calgary_reg(bbar
, phboff
| PHB_PLSSR_OFFSET
);
862 plssr
= be32_to_cpu(readl(target
));
864 target
= calgary_reg(bbar
, phboff
| 0x290);
865 csmr
= be32_to_cpu(readl(target
));
867 target
= calgary_reg(bbar
, phboff
| 0x800);
868 mck
= be32_to_cpu(readl(target
));
870 printk(KERN_EMERG
"Calgary: DMA error on CalIOC2 PHB 0x%x\n",
873 printk(KERN_EMERG
"Calgary: 0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
874 csr
, plssr
, csmr
, mck
);
876 /* dump rest of error regs */
877 printk(KERN_EMERG
"Calgary: ");
878 for (i
= 0; i
< ARRAY_SIZE(errregs
); i
++) {
879 /* err regs are at 0x810 - 0x870 */
880 erroff
= (0x810 + (i
* 0x10));
881 target
= calgary_reg(bbar
, phboff
| erroff
);
882 errregs
[i
] = be32_to_cpu(readl(target
));
883 printk("0x%08x@0x%lx ", errregs
[i
], erroff
);
887 /* root complex status */
888 target
= calgary_reg(bbar
, phboff
| PHB_ROOT_COMPLEX_STATUS
);
889 rcstat
= be32_to_cpu(readl(target
));
890 printk(KERN_EMERG
"Calgary: 0x%08x@0x%x\n", rcstat
,
891 PHB_ROOT_COMPLEX_STATUS
);
894 static void calgary_watchdog(unsigned long data
)
896 struct pci_dev
*dev
= (struct pci_dev
*)data
;
897 struct iommu_table
*tbl
= pci_iommu(dev
->bus
);
898 void __iomem
*bbar
= tbl
->bbar
;
900 void __iomem
*target
;
902 target
= calgary_reg(bbar
, phb_offset(tbl
->it_busno
) | PHB_CSR_OFFSET
);
903 val32
= be32_to_cpu(readl(target
));
905 /* If no error, the agent ID in the CSR is not valid */
906 if (val32
& CSR_AGENT_MASK
) {
907 tbl
->chip_ops
->dump_error_regs(tbl
);
912 /* Disable bus that caused the error */
913 target
= calgary_reg(bbar
, phb_offset(tbl
->it_busno
) |
914 PHB_CONFIG_RW_OFFSET
);
915 val32
= be32_to_cpu(readl(target
));
916 val32
|= PHB_SLOT_DISABLE
;
917 writel(cpu_to_be32(val32
), target
);
918 readl(target
); /* flush */
920 /* Reset the timer */
921 mod_timer(&tbl
->watchdog_timer
, jiffies
+ 2 * HZ
);
925 static void __init
calgary_set_split_completion_timeout(void __iomem
*bbar
,
926 unsigned char busnum
, unsigned long timeout
)
929 void __iomem
*target
;
930 unsigned int phb_shift
= ~0; /* silence gcc */
933 switch (busno_to_phbid(busnum
)) {
934 case 0: phb_shift
= (63 - 19);
936 case 1: phb_shift
= (63 - 23);
938 case 2: phb_shift
= (63 - 27);
940 case 3: phb_shift
= (63 - 35);
943 BUG_ON(busno_to_phbid(busnum
));
946 target
= calgary_reg(bbar
, CALGARY_CONFIG_REG
);
947 val64
= be64_to_cpu(readq(target
));
949 /* zero out this PHB's timer bits */
950 mask
= ~(0xFUL
<< phb_shift
);
952 val64
|= (timeout
<< phb_shift
);
953 writeq(cpu_to_be64(val64
), target
);
954 readq(target
); /* flush */
957 static void __init
calioc2_handle_quirks(struct iommu_table
*tbl
, struct pci_dev
*dev
)
959 unsigned char busnum
= dev
->bus
->number
;
960 void __iomem
*bbar
= tbl
->bbar
;
961 void __iomem
*target
;
965 * CalIOC2 designers recommend setting bit 8 in 0xnDB0 to 1
967 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_SAVIOR_L2
);
968 val
= cpu_to_be32(readl(target
));
970 writel(cpu_to_be32(val
), target
);
973 static void __init
calgary_handle_quirks(struct iommu_table
*tbl
, struct pci_dev
*dev
)
975 unsigned char busnum
= dev
->bus
->number
;
978 * Give split completion a longer timeout on bus 1 for aic94xx
979 * http://bugzilla.kernel.org/show_bug.cgi?id=7180
981 if (is_calgary(dev
->device
) && (busnum
== 1))
982 calgary_set_split_completion_timeout(tbl
->bbar
, busnum
,
986 static void __init
calgary_enable_translation(struct pci_dev
*dev
)
989 unsigned char busnum
;
990 void __iomem
*target
;
992 struct iommu_table
*tbl
;
994 busnum
= dev
->bus
->number
;
995 tbl
= pci_iommu(dev
->bus
);
998 /* enable TCE in PHB Config Register */
999 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_CONFIG_RW_OFFSET
);
1000 val32
= be32_to_cpu(readl(target
));
1001 val32
|= PHB_TCE_ENABLE
| PHB_DAC_DISABLE
| PHB_MCSR_ENABLE
;
1003 printk(KERN_INFO
"Calgary: enabling translation on %s PHB %#x\n",
1004 (dev
->device
== PCI_DEVICE_ID_IBM_CALGARY
) ?
1005 "Calgary" : "CalIOC2", busnum
);
1006 printk(KERN_INFO
"Calgary: errant DMAs will now be prevented on this "
1009 writel(cpu_to_be32(val32
), target
);
1010 readl(target
); /* flush */
1012 init_timer(&tbl
->watchdog_timer
);
1013 tbl
->watchdog_timer
.function
= &calgary_watchdog
;
1014 tbl
->watchdog_timer
.data
= (unsigned long)dev
;
1015 mod_timer(&tbl
->watchdog_timer
, jiffies
);
1018 static void __init
calgary_disable_translation(struct pci_dev
*dev
)
1021 unsigned char busnum
;
1022 void __iomem
*target
;
1024 struct iommu_table
*tbl
;
1026 busnum
= dev
->bus
->number
;
1027 tbl
= pci_iommu(dev
->bus
);
1030 /* disable TCE in PHB Config Register */
1031 target
= calgary_reg(bbar
, phb_offset(busnum
) | PHB_CONFIG_RW_OFFSET
);
1032 val32
= be32_to_cpu(readl(target
));
1033 val32
&= ~(PHB_TCE_ENABLE
| PHB_DAC_DISABLE
| PHB_MCSR_ENABLE
);
1035 printk(KERN_INFO
"Calgary: disabling translation on PHB %#x!\n", busnum
);
1036 writel(cpu_to_be32(val32
), target
);
1037 readl(target
); /* flush */
1039 del_timer_sync(&tbl
->watchdog_timer
);
1042 static void __init
calgary_init_one_nontraslated(struct pci_dev
*dev
)
1045 set_pci_iommu(dev
->bus
, NULL
);
1047 /* is the device behind a bridge? */
1048 if (dev
->bus
->parent
)
1049 dev
->bus
->parent
->self
= dev
;
1051 dev
->bus
->self
= dev
;
1054 static int __init
calgary_init_one(struct pci_dev
*dev
)
1057 struct iommu_table
*tbl
;
1060 bbar
= busno_to_bbar(dev
->bus
->number
);
1061 ret
= calgary_setup_tar(dev
, bbar
);
1067 if (dev
->bus
->parent
) {
1068 if (dev
->bus
->parent
->self
)
1069 printk(KERN_WARNING
"Calgary: IEEEE, dev %p has "
1070 "bus->parent->self!\n", dev
);
1071 dev
->bus
->parent
->self
= dev
;
1073 dev
->bus
->self
= dev
;
1075 tbl
= pci_iommu(dev
->bus
);
1076 tbl
->chip_ops
->handle_quirks(tbl
, dev
);
1078 calgary_enable_translation(dev
);
1086 static int __init
calgary_locate_bbars(void)
1089 int rioidx
, phb
, bus
;
1091 void __iomem
*target
;
1092 unsigned long offset
;
1093 u8 start_bus
, end_bus
;
1097 for (rioidx
= 0; rioidx
< rio_table_hdr
->num_rio_dev
; rioidx
++) {
1098 struct rio_detail
*rio
= rio_devs
[rioidx
];
1100 if ((rio
->type
!= COMPAT_CALGARY
) && (rio
->type
!= ALT_CALGARY
))
1103 /* map entire 1MB of Calgary config space */
1104 bbar
= ioremap_nocache(rio
->BBAR
, 1024 * 1024);
1108 for (phb
= 0; phb
< PHBS_PER_CALGARY
; phb
++) {
1109 offset
= phb_debug_offsets
[phb
] | PHB_DEBUG_STUFF_OFFSET
;
1110 target
= calgary_reg(bbar
, offset
);
1112 val
= be32_to_cpu(readl(target
));
1114 start_bus
= (u8
)((val
& 0x00FF0000) >> 16);
1115 end_bus
= (u8
)((val
& 0x0000FF00) >> 8);
1118 for (bus
= start_bus
; bus
<= end_bus
; bus
++) {
1119 bus_info
[bus
].bbar
= bbar
;
1120 bus_info
[bus
].phbid
= phb
;
1123 bus_info
[start_bus
].bbar
= bbar
;
1124 bus_info
[start_bus
].phbid
= phb
;
1132 /* scan bus_info and iounmap any bbars we previously ioremap'd */
1133 for (bus
= 0; bus
< ARRAY_SIZE(bus_info
); bus
++)
1134 if (bus_info
[bus
].bbar
)
1135 iounmap(bus_info
[bus
].bbar
);
1140 static int __init
calgary_init(void)
1143 struct pci_dev
*dev
= NULL
;
1144 struct calgary_bus_info
*info
;
1146 ret
= calgary_locate_bbars();
1150 /* Purely for kdump kernel case */
1151 if (is_kdump_kernel())
1152 get_tce_space_from_tar();
1155 dev
= pci_get_device(PCI_VENDOR_ID_IBM
, PCI_ANY_ID
, dev
);
1158 if (!is_cal_pci_dev(dev
->device
))
1161 info
= &bus_info
[dev
->bus
->number
];
1162 if (info
->translation_disabled
) {
1163 calgary_init_one_nontraslated(dev
);
1167 if (!info
->tce_space
&& !translate_empty_slots
)
1170 ret
= calgary_init_one(dev
);
1176 for_each_pci_dev(dev
) {
1177 struct iommu_table
*tbl
;
1179 tbl
= find_iommu_table(&dev
->dev
);
1181 if (translation_enabled(tbl
))
1182 dev
->dev
.archdata
.dma_ops
= &calgary_dma_ops
;
1189 dev
= pci_get_device(PCI_VENDOR_ID_IBM
, PCI_ANY_ID
, dev
);
1192 if (!is_cal_pci_dev(dev
->device
))
1195 info
= &bus_info
[dev
->bus
->number
];
1196 if (info
->translation_disabled
) {
1200 if (!info
->tce_space
&& !translate_empty_slots
)
1203 calgary_disable_translation(dev
);
1204 calgary_free_bus(dev
);
1205 pci_dev_put(dev
); /* Undo calgary_init_one()'s pci_dev_get() */
1206 dev
->dev
.archdata
.dma_ops
= NULL
;
1212 static inline int __init
determine_tce_table_size(u64 ram
)
1216 if (specified_table_size
!= TCE_TABLE_SIZE_UNSPECIFIED
)
1217 return specified_table_size
;
1220 * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
1221 * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
1222 * larger table size has twice as many entries, so shift the
1223 * max ram address by 13 to divide by 8K and then look at the
1224 * order of the result to choose between 0-7.
1226 ret
= get_order(ram
>> 13);
1227 if (ret
> TCE_TABLE_SIZE_8M
)
1228 ret
= TCE_TABLE_SIZE_8M
;
1233 static int __init
build_detail_arrays(void)
1236 unsigned numnodes
, i
;
1237 int scal_detail_size
, rio_detail_size
;
1239 numnodes
= rio_table_hdr
->num_scal_dev
;
1240 if (numnodes
> MAX_NUMNODES
){
1242 "Calgary: MAX_NUMNODES too low! Defined as %d, "
1243 "but system has %d nodes.\n",
1244 MAX_NUMNODES
, numnodes
);
1248 switch (rio_table_hdr
->version
){
1250 scal_detail_size
= 11;
1251 rio_detail_size
= 13;
1254 scal_detail_size
= 12;
1255 rio_detail_size
= 15;
1259 "Calgary: Invalid Rio Grande Table Version: %d\n",
1260 rio_table_hdr
->version
);
1264 ptr
= ((unsigned long)rio_table_hdr
) + 3;
1265 for (i
= 0; i
< numnodes
; i
++, ptr
+= scal_detail_size
)
1266 scal_devs
[i
] = (struct scal_detail
*)ptr
;
1268 for (i
= 0; i
< rio_table_hdr
->num_rio_dev
;
1269 i
++, ptr
+= rio_detail_size
)
1270 rio_devs
[i
] = (struct rio_detail
*)ptr
;
1275 static int __init
calgary_bus_has_devices(int bus
, unsigned short pci_dev
)
1280 if (pci_dev
== PCI_DEVICE_ID_IBM_CALIOC2
) {
1282 * FIXME: properly scan for devices across the
1283 * PCI-to-PCI bridge on every CalIOC2 port.
1288 for (dev
= 1; dev
< 8; dev
++) {
1289 val
= read_pci_config(bus
, dev
, 0, 0);
1290 if (val
!= 0xffffffff)
1293 return (val
!= 0xffffffff);
1297 * calgary_init_bitmap_from_tce_table():
1298 * Function for kdump case. In the second/kdump kernel initialize
1299 * the bitmap based on the tce table entries obtained from first kernel
1301 static void calgary_init_bitmap_from_tce_table(struct iommu_table
*tbl
)
1305 tp
= ((u64
*)tbl
->it_base
);
1306 for (index
= 0 ; index
< tbl
->it_size
; index
++) {
1308 set_bit(index
, tbl
->it_map
);
1314 * get_tce_space_from_tar():
1315 * Function for kdump case. Get the tce tables from first kernel
1316 * by reading the contents of the base address register of calgary iommu
1318 static void __init
get_tce_space_from_tar(void)
1321 void __iomem
*target
;
1322 unsigned long tce_space
;
1324 for (bus
= 0; bus
< MAX_PHB_BUS_NUM
; bus
++) {
1325 struct calgary_bus_info
*info
= &bus_info
[bus
];
1326 unsigned short pci_device
;
1329 val
= read_pci_config(bus
, 0, 0, 0);
1330 pci_device
= (val
& 0xFFFF0000) >> 16;
1332 if (!is_cal_pci_dev(pci_device
))
1334 if (info
->translation_disabled
)
1337 if (calgary_bus_has_devices(bus
, pci_device
) ||
1338 translate_empty_slots
) {
1339 target
= calgary_reg(bus_info
[bus
].bbar
,
1341 tce_space
= be64_to_cpu(readq(target
));
1342 tce_space
= tce_space
& TAR_SW_BITS
;
1344 tce_space
= tce_space
& (~specified_table_size
);
1345 info
->tce_space
= (u64
*)__va(tce_space
);
1351 static int __init
calgary_iommu_init(void)
1355 /* ok, we're trying to use Calgary - let's roll */
1356 printk(KERN_INFO
"PCI-DMA: Using Calgary IOMMU\n");
1358 ret
= calgary_init();
1360 printk(KERN_ERR
"PCI-DMA: Calgary init failed %d, "
1361 "falling back to no_iommu\n", ret
);
1368 int __init
detect_calgary(void)
1372 int calgary_found
= 0;
1374 unsigned int offset
, prev_offset
;
1378 * if the user specified iommu=off or iommu=soft or we found
1379 * another HW IOMMU already, bail out.
1381 if (no_iommu
|| iommu_detected
)
1387 if (!early_pci_allowed())
1390 printk(KERN_DEBUG
"Calgary: detecting Calgary via BIOS EBDA area\n");
1392 ptr
= (unsigned long)phys_to_virt(get_bios_ebda());
1394 rio_table_hdr
= NULL
;
1398 * The next offset is stored in the 1st word.
1399 * Only parse up until the offset increases:
1401 while (offset
> prev_offset
) {
1402 /* The block id is stored in the 2nd word */
1403 if (*((unsigned short *)(ptr
+ offset
+ 2)) == 0x4752){
1404 /* set the pointer past the offset & block id */
1405 rio_table_hdr
= (struct rio_table_hdr
*)(ptr
+ offset
+ 4);
1408 prev_offset
= offset
;
1409 offset
= *((unsigned short *)(ptr
+ offset
));
1411 if (!rio_table_hdr
) {
1412 printk(KERN_DEBUG
"Calgary: Unable to locate Rio Grande table "
1413 "in EBDA - bailing!\n");
1417 ret
= build_detail_arrays();
1419 printk(KERN_DEBUG
"Calgary: build_detail_arrays ret %d\n", ret
);
1423 specified_table_size
= determine_tce_table_size((is_kdump_kernel() ?
1424 saved_max_pfn
: max_pfn
) * PAGE_SIZE
);
1426 for (bus
= 0; bus
< MAX_PHB_BUS_NUM
; bus
++) {
1427 struct calgary_bus_info
*info
= &bus_info
[bus
];
1428 unsigned short pci_device
;
1431 val
= read_pci_config(bus
, 0, 0, 0);
1432 pci_device
= (val
& 0xFFFF0000) >> 16;
1434 if (!is_cal_pci_dev(pci_device
))
1437 if (info
->translation_disabled
)
1440 if (calgary_bus_has_devices(bus
, pci_device
) ||
1441 translate_empty_slots
) {
1443 * If it is kdump kernel, find and use tce tables
1444 * from first kernel, else allocate tce tables here
1446 if (!is_kdump_kernel()) {
1447 tbl
= alloc_tce_table();
1450 info
->tce_space
= tbl
;
1456 printk(KERN_DEBUG
"Calgary: finished detection, Calgary %s\n",
1457 calgary_found
? "found" : "not found");
1459 if (calgary_found
) {
1461 calgary_detected
= 1;
1462 printk(KERN_INFO
"PCI-DMA: Calgary IOMMU detected.\n");
1463 printk(KERN_INFO
"PCI-DMA: Calgary TCE table spec is %d\n",
1464 specified_table_size
);
1466 x86_init
.iommu
.iommu_init
= calgary_iommu_init
;
1468 return calgary_found
;
1471 for (--bus
; bus
>= 0; --bus
) {
1472 struct calgary_bus_info
*info
= &bus_info
[bus
];
1474 if (info
->tce_space
)
1475 free_tce_table(info
->tce_space
);
1480 static int __init
calgary_parse_options(char *p
)
1482 unsigned int bridge
;
1488 if (!strncmp(p
, "64k", 3))
1489 specified_table_size
= TCE_TABLE_SIZE_64K
;
1490 else if (!strncmp(p
, "128k", 4))
1491 specified_table_size
= TCE_TABLE_SIZE_128K
;
1492 else if (!strncmp(p
, "256k", 4))
1493 specified_table_size
= TCE_TABLE_SIZE_256K
;
1494 else if (!strncmp(p
, "512k", 4))
1495 specified_table_size
= TCE_TABLE_SIZE_512K
;
1496 else if (!strncmp(p
, "1M", 2))
1497 specified_table_size
= TCE_TABLE_SIZE_1M
;
1498 else if (!strncmp(p
, "2M", 2))
1499 specified_table_size
= TCE_TABLE_SIZE_2M
;
1500 else if (!strncmp(p
, "4M", 2))
1501 specified_table_size
= TCE_TABLE_SIZE_4M
;
1502 else if (!strncmp(p
, "8M", 2))
1503 specified_table_size
= TCE_TABLE_SIZE_8M
;
1505 len
= strlen("translate_empty_slots");
1506 if (!strncmp(p
, "translate_empty_slots", len
))
1507 translate_empty_slots
= 1;
1509 len
= strlen("disable");
1510 if (!strncmp(p
, "disable", len
)) {
1516 ret
= kstrtoul(p
, 0, &val
);
1521 if (bridge
< MAX_PHB_BUS_NUM
) {
1522 printk(KERN_INFO
"Calgary: disabling "
1523 "translation for PHB %#x\n", bridge
);
1524 bus_info
[bridge
].translation_disabled
= 1;
1528 p
= strpbrk(p
, ",");
1536 __setup("calgary=", calgary_parse_options
);
1538 static void __init
calgary_fixup_one_tce_space(struct pci_dev
*dev
)
1540 struct iommu_table
*tbl
;
1541 unsigned int npages
;
1544 tbl
= pci_iommu(dev
->bus
);
1546 for (i
= 0; i
< 4; i
++) {
1547 struct resource
*r
= &dev
->resource
[PCI_BRIDGE_RESOURCES
+ i
];
1549 /* Don't give out TCEs that map MEM resources */
1550 if (!(r
->flags
& IORESOURCE_MEM
))
1553 /* 0-based? we reserve the whole 1st MB anyway */
1557 /* cover the whole region */
1558 npages
= resource_size(r
) >> PAGE_SHIFT
;
1561 iommu_range_reserve(tbl
, r
->start
, npages
);
1565 static int __init
calgary_fixup_tce_spaces(void)
1567 struct pci_dev
*dev
= NULL
;
1568 struct calgary_bus_info
*info
;
1570 if (no_iommu
|| swiotlb
|| !calgary_detected
)
1573 printk(KERN_DEBUG
"Calgary: fixing up tce spaces\n");
1576 dev
= pci_get_device(PCI_VENDOR_ID_IBM
, PCI_ANY_ID
, dev
);
1579 if (!is_cal_pci_dev(dev
->device
))
1582 info
= &bus_info
[dev
->bus
->number
];
1583 if (info
->translation_disabled
)
1586 if (!info
->tce_space
)
1589 calgary_fixup_one_tce_space(dev
);
1597 * We need to be call after pcibios_assign_resources (fs_initcall level)
1598 * and before device_initcall.
1600 rootfs_initcall(calgary_fixup_tce_spaces
);
1602 IOMMU_INIT_POST(detect_calgary
);