2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <asm/atomic.h>
31 #include <asm/pgtable.h>
32 #include <asm/proto.h>
34 #include <asm/cacheflush.h>
35 #include <asm/swiotlb.h>
39 static unsigned long iommu_bus_base
; /* GART remapping area (physical) */
40 static unsigned long iommu_size
; /* size of remapping area bytes */
41 static unsigned long iommu_pages
; /* .. and in pages */
43 static u32
*iommu_gatt_base
; /* Remapping table */
46 * If this is disabled the IOMMU will use an optimized flushing strategy
47 * of only flushing when an mapping is reused. With it true the GART is
48 * flushed for every mapping. Problem is that doing the lazy flush seems
49 * to trigger bugs with some popular PCI cards, in particular 3ware (but
50 * has been also also seen with Qlogic at least).
52 int iommu_fullflush
= 1;
54 /* Allocation bitmap for the remapping area: */
55 static DEFINE_SPINLOCK(iommu_bitmap_lock
);
56 /* Guarded by iommu_bitmap_lock: */
57 static unsigned long *iommu_gart_bitmap
;
59 static u32 gart_unmapped_entry
;
62 #define GPTE_COHERENT 2
63 #define GPTE_ENCODE(x) \
64 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
65 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
67 #define to_pages(addr, size) \
68 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
70 #define EMERGENCY_PAGES 32 /* = 128KB */
73 #define AGPEXTERN extern
78 /* backdoor interface to AGP driver */
79 AGPEXTERN
int agp_memory_reserved
;
80 AGPEXTERN __u32
*agp_gatt_table
;
82 static unsigned long next_bit
; /* protected by iommu_bitmap_lock */
83 static int need_flush
; /* global flush state. set for each gart wrap */
85 static unsigned long alloc_iommu(int size
)
87 unsigned long offset
, flags
;
89 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
90 offset
= find_next_zero_string(iommu_gart_bitmap
, next_bit
,
94 offset
= find_next_zero_string(iommu_gart_bitmap
, 0,
98 set_bit_string(iommu_gart_bitmap
, offset
, size
);
99 next_bit
= offset
+size
;
100 if (next_bit
>= iommu_pages
) {
107 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
112 static void free_iommu(unsigned long offset
, int size
)
116 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
117 __clear_bit_string(iommu_gart_bitmap
, offset
, size
);
118 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
122 * Use global flush state to avoid races with multiple flushers.
124 static void flush_gart(void)
128 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
133 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
136 #ifdef CONFIG_IOMMU_LEAK
138 #define SET_LEAK(x) \
140 if (iommu_leak_tab) \
141 iommu_leak_tab[x] = __builtin_return_address(0);\
144 #define CLEAR_LEAK(x) \
146 if (iommu_leak_tab) \
147 iommu_leak_tab[x] = NULL; \
150 /* Debugging aid for drivers that don't free their IOMMU tables */
151 static void **iommu_leak_tab
;
152 static int leak_trace
;
153 static int iommu_leak_pages
= 20;
155 static void dump_leak(void)
160 if (dump
|| !iommu_leak_tab
)
163 show_stack(NULL
, NULL
);
165 /* Very crude. dump some from the end of the table too */
166 printk(KERN_DEBUG
"Dumping %d pages from end of IOMMU:\n",
168 for (i
= 0; i
< iommu_leak_pages
; i
+= 2) {
169 printk(KERN_DEBUG
"%lu: ", iommu_pages
-i
);
170 printk_address((unsigned long) iommu_leak_tab
[iommu_pages
-i
], 0);
171 printk(KERN_CONT
"%c", (i
+1)%2 == 0 ? '\n' : ' ');
173 printk(KERN_DEBUG
"\n");
177 # define CLEAR_LEAK(x)
180 static void iommu_full(struct device
*dev
, size_t size
, int dir
)
183 * Ran out of IOMMU space for this operation. This is very bad.
184 * Unfortunately the drivers cannot handle this operation properly.
185 * Return some non mapped prereserved space in the aperture and
186 * let the Northbridge deal with it. This will result in garbage
187 * in the IO operation. When the size exceeds the prereserved space
188 * memory corruption will occur or random memory will be DMAed
189 * out. Hopefully no network devices use single mappings that big.
193 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
196 if (size
> PAGE_SIZE
*EMERGENCY_PAGES
) {
197 if (dir
== PCI_DMA_FROMDEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
198 panic("PCI-DMA: Memory would be corrupted\n");
199 if (dir
== PCI_DMA_TODEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
201 "PCI-DMA: Random memory would be DMAed\n");
203 #ifdef CONFIG_IOMMU_LEAK
209 need_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
211 u64 mask
= *dev
->dma_mask
;
212 int high
= addr
+ size
> mask
;
222 nonforced_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
224 u64 mask
= *dev
->dma_mask
;
225 int high
= addr
+ size
> mask
;
231 /* Map a single continuous physical area into the IOMMU.
232 * Caller needs to check if the iommu is needed and flush.
234 static dma_addr_t
dma_map_area(struct device
*dev
, dma_addr_t phys_mem
,
235 size_t size
, int dir
)
237 unsigned long npages
= to_pages(phys_mem
, size
);
238 unsigned long iommu_page
= alloc_iommu(npages
);
241 if (iommu_page
== -1) {
242 if (!nonforced_iommu(dev
, phys_mem
, size
))
244 if (panic_on_overflow
)
245 panic("dma_map_area overflow %lu bytes\n", size
);
246 iommu_full(dev
, size
, dir
);
247 return bad_dma_address
;
250 for (i
= 0; i
< npages
; i
++) {
251 iommu_gatt_base
[iommu_page
+ i
] = GPTE_ENCODE(phys_mem
);
252 SET_LEAK(iommu_page
+ i
);
253 phys_mem
+= PAGE_SIZE
;
255 return iommu_bus_base
+ iommu_page
*PAGE_SIZE
+ (phys_mem
& ~PAGE_MASK
);
259 gart_map_simple(struct device
*dev
, char *buf
, size_t size
, int dir
)
261 dma_addr_t map
= dma_map_area(dev
, virt_to_bus(buf
), size
, dir
);
268 /* Map a single area into the IOMMU */
270 gart_map_single(struct device
*dev
, void *addr
, size_t size
, int dir
)
272 unsigned long phys_mem
, bus
;
277 phys_mem
= virt_to_phys(addr
);
278 if (!need_iommu(dev
, phys_mem
, size
))
281 bus
= gart_map_simple(dev
, addr
, size
, dir
);
287 * Free a DMA mapping.
289 static void gart_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
290 size_t size
, int direction
)
292 unsigned long iommu_page
;
296 if (dma_addr
< iommu_bus_base
+ EMERGENCY_PAGES
*PAGE_SIZE
||
297 dma_addr
>= iommu_bus_base
+ iommu_size
)
300 iommu_page
= (dma_addr
- iommu_bus_base
)>>PAGE_SHIFT
;
301 npages
= to_pages(dma_addr
, size
);
302 for (i
= 0; i
< npages
; i
++) {
303 iommu_gatt_base
[iommu_page
+ i
] = gart_unmapped_entry
;
304 CLEAR_LEAK(iommu_page
+ i
);
306 free_iommu(iommu_page
, npages
);
310 * Wrapper for pci_unmap_single working with scatterlists.
313 gart_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
, int dir
)
315 struct scatterlist
*s
;
318 for_each_sg(sg
, s
, nents
, i
) {
319 if (!s
->dma_length
|| !s
->length
)
321 gart_unmap_single(dev
, s
->dma_address
, s
->dma_length
, dir
);
325 /* Fallback for dma_map_sg in case of overflow */
326 static int dma_map_sg_nonforce(struct device
*dev
, struct scatterlist
*sg
,
329 struct scatterlist
*s
;
332 #ifdef CONFIG_IOMMU_DEBUG
333 printk(KERN_DEBUG
"dma_map_sg overflow\n");
336 for_each_sg(sg
, s
, nents
, i
) {
337 unsigned long addr
= sg_phys(s
);
339 if (nonforced_iommu(dev
, addr
, s
->length
)) {
340 addr
= dma_map_area(dev
, addr
, s
->length
, dir
);
341 if (addr
== bad_dma_address
) {
343 gart_unmap_sg(dev
, sg
, i
, dir
);
345 sg
[0].dma_length
= 0;
349 s
->dma_address
= addr
;
350 s
->dma_length
= s
->length
;
357 /* Map multiple scatterlist entries continuous into the first. */
358 static int __dma_map_cont(struct scatterlist
*start
, int nelems
,
359 struct scatterlist
*sout
, unsigned long pages
)
361 unsigned long iommu_start
= alloc_iommu(pages
);
362 unsigned long iommu_page
= iommu_start
;
363 struct scatterlist
*s
;
366 if (iommu_start
== -1)
369 for_each_sg(start
, s
, nelems
, i
) {
370 unsigned long pages
, addr
;
371 unsigned long phys_addr
= s
->dma_address
;
373 BUG_ON(s
!= start
&& s
->offset
);
375 sout
->dma_address
= iommu_bus_base
;
376 sout
->dma_address
+= iommu_page
*PAGE_SIZE
+ s
->offset
;
377 sout
->dma_length
= s
->length
;
379 sout
->dma_length
+= s
->length
;
383 pages
= to_pages(s
->offset
, s
->length
);
385 iommu_gatt_base
[iommu_page
] = GPTE_ENCODE(addr
);
386 SET_LEAK(iommu_page
);
391 BUG_ON(iommu_page
- iommu_start
!= pages
);
397 dma_map_cont(struct scatterlist
*start
, int nelems
, struct scatterlist
*sout
,
398 unsigned long pages
, int need
)
402 sout
->dma_address
= start
->dma_address
;
403 sout
->dma_length
= start
->length
;
406 return __dma_map_cont(start
, nelems
, sout
, pages
);
410 * DMA map all entries in a scatterlist.
411 * Merge chunks that have page aligned sizes into a continuous mapping.
414 gart_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
, int dir
)
416 struct scatterlist
*s
, *ps
, *start_sg
, *sgmap
;
417 int need
= 0, nextneed
, i
, out
, start
;
418 unsigned long pages
= 0;
428 start_sg
= sgmap
= sg
;
429 ps
= NULL
; /* shut up gcc */
430 for_each_sg(sg
, s
, nents
, i
) {
431 dma_addr_t addr
= sg_phys(s
);
433 s
->dma_address
= addr
;
434 BUG_ON(s
->length
== 0);
436 nextneed
= need_iommu(dev
, addr
, s
->length
);
438 /* Handle the previous not yet processed entries */
441 * Can only merge when the last chunk ends on a
442 * page boundary and the new one doesn't have an
445 if (!iommu_merge
|| !nextneed
|| !need
|| s
->offset
||
446 (ps
->offset
+ ps
->length
) % PAGE_SIZE
) {
447 if (dma_map_cont(start_sg
, i
- start
, sgmap
,
451 sgmap
= sg_next(sgmap
);
459 pages
+= to_pages(s
->offset
, s
->length
);
462 if (dma_map_cont(start_sg
, i
- start
, sgmap
, pages
, need
) < 0)
467 sgmap
= sg_next(sgmap
);
468 sgmap
->dma_length
= 0;
474 gart_unmap_sg(dev
, sg
, out
, dir
);
476 /* When it was forced or merged try again in a dumb way */
477 if (force_iommu
|| iommu_merge
) {
478 out
= dma_map_sg_nonforce(dev
, sg
, nents
, dir
);
482 if (panic_on_overflow
)
483 panic("dma_map_sg: overflow on %lu pages\n", pages
);
485 iommu_full(dev
, pages
<< PAGE_SHIFT
, dir
);
486 for_each_sg(sg
, s
, nents
, i
)
487 s
->dma_address
= bad_dma_address
;
493 static __init
unsigned long check_iommu_size(unsigned long aper
, u64 aper_size
)
498 iommu_size
= aper_size
;
503 a
= aper
+ iommu_size
;
504 iommu_size
-= round_up(a
, PMD_PAGE_SIZE
) - a
;
506 if (iommu_size
< 64*1024*1024) {
508 "PCI-DMA: Warning: Small IOMMU %luMB."
509 " Consider increasing the AGP aperture in BIOS\n",
516 static __init
unsigned read_aperture(struct pci_dev
*dev
, u32
*size
)
518 unsigned aper_size
= 0, aper_base_32
, aper_order
;
521 pci_read_config_dword(dev
, 0x94, &aper_base_32
);
522 pci_read_config_dword(dev
, 0x90, &aper_order
);
523 aper_order
= (aper_order
>> 1) & 7;
525 aper_base
= aper_base_32
& 0x7fff;
528 aper_size
= (32 * 1024 * 1024) << aper_order
;
529 if (aper_base
+ aper_size
> 0x100000000UL
|| !aper_size
)
537 * Private Northbridge GATT initialization in case we cannot use the
538 * AGP driver for some reason.
540 static __init
int init_k8_gatt(struct agp_kern_info
*info
)
542 unsigned aper_size
, gatt_size
, new_aper_size
;
543 unsigned aper_base
, new_aper_base
;
548 printk(KERN_INFO
"PCI-DMA: Disabling AGP.\n");
549 aper_size
= aper_base
= info
->aper_size
= 0;
551 for (i
= 0; i
< num_k8_northbridges
; i
++) {
552 dev
= k8_northbridges
[i
];
553 new_aper_base
= read_aperture(dev
, &new_aper_size
);
558 aper_size
= new_aper_size
;
559 aper_base
= new_aper_base
;
561 if (aper_size
!= new_aper_size
|| aper_base
!= new_aper_base
)
566 info
->aper_base
= aper_base
;
567 info
->aper_size
= aper_size
>> 20;
569 gatt_size
= (aper_size
>> PAGE_SHIFT
) * sizeof(u32
);
570 gatt
= (void *)__get_free_pages(GFP_KERNEL
, get_order(gatt_size
));
572 panic("Cannot allocate GATT table");
573 if (set_memory_uc((unsigned long)gatt
, gatt_size
>> PAGE_SHIFT
))
574 panic("Could not set GART PTEs to uncacheable pages");
576 memset(gatt
, 0, gatt_size
);
577 agp_gatt_table
= gatt
;
579 for (i
= 0; i
< num_k8_northbridges
; i
++) {
583 dev
= k8_northbridges
[i
];
584 gatt_reg
= __pa(gatt
) >> 12;
586 pci_write_config_dword(dev
, 0x98, gatt_reg
);
587 pci_read_config_dword(dev
, 0x90, &ctl
);
590 ctl
&= ~((1<<4) | (1<<5));
592 pci_write_config_dword(dev
, 0x90, ctl
);
596 printk(KERN_INFO
"PCI-DMA: aperture base @ %x size %u KB\n",
597 aper_base
, aper_size
>>10);
601 /* Should not happen anymore */
602 printk(KERN_ERR
"PCI-DMA: More than 4GB of RAM and no IOMMU\n"
603 KERN_ERR
"PCI-DMA: 32bit PCI IO may malfunction.\n");
607 extern int agp_amd64_init(void);
609 static const struct dma_mapping_ops gart_dma_ops
= {
610 .mapping_error
= NULL
,
611 .map_single
= gart_map_single
,
612 .map_simple
= gart_map_simple
,
613 .unmap_single
= gart_unmap_single
,
614 .sync_single_for_cpu
= NULL
,
615 .sync_single_for_device
= NULL
,
616 .sync_single_range_for_cpu
= NULL
,
617 .sync_single_range_for_device
= NULL
,
618 .sync_sg_for_cpu
= NULL
,
619 .sync_sg_for_device
= NULL
,
620 .map_sg
= gart_map_sg
,
621 .unmap_sg
= gart_unmap_sg
,
624 void gart_iommu_shutdown(void)
629 if (no_agp
&& (dma_ops
!= &gart_dma_ops
))
632 for (i
= 0; i
< num_k8_northbridges
; i
++) {
635 dev
= k8_northbridges
[i
];
636 pci_read_config_dword(dev
, 0x90, &ctl
);
640 pci_write_config_dword(dev
, 0x90, ctl
);
644 void __init
gart_iommu_init(void)
646 struct agp_kern_info info
;
647 unsigned long iommu_start
;
648 unsigned long aper_size
;
649 unsigned long scratch
;
652 if (cache_k8_northbridges() < 0 || num_k8_northbridges
== 0) {
653 printk(KERN_INFO
"PCI-GART: No AMD northbridge found.\n");
657 #ifndef CONFIG_AGP_AMD64
660 /* Makefile puts PCI initialization via subsys_initcall first. */
661 /* Add other K8 AGP bridge drivers here */
663 (agp_amd64_init() < 0) ||
664 (agp_copy_info(agp_bridge
, &info
) < 0);
670 /* Did we detect a different HW IOMMU? */
671 if (iommu_detected
&& !gart_iommu_aperture
)
675 (!force_iommu
&& end_pfn
<= MAX_DMA32_PFN
) ||
676 !gart_iommu_aperture
||
677 (no_agp
&& init_k8_gatt(&info
) < 0)) {
678 if (end_pfn
> MAX_DMA32_PFN
) {
679 printk(KERN_ERR
"WARNING more than 4GB of memory "
680 "but GART IOMMU not available.\n"
681 KERN_ERR
"WARNING 32bit PCI may malfunction.\n");
686 printk(KERN_INFO
"PCI-DMA: using GART IOMMU.\n");
687 aper_size
= info
.aper_size
* 1024 * 1024;
688 iommu_size
= check_iommu_size(info
.aper_base
, aper_size
);
689 iommu_pages
= iommu_size
>> PAGE_SHIFT
;
691 iommu_gart_bitmap
= (void *) __get_free_pages(GFP_KERNEL
,
692 get_order(iommu_pages
/8));
693 if (!iommu_gart_bitmap
)
694 panic("Cannot allocate iommu bitmap\n");
695 memset(iommu_gart_bitmap
, 0, iommu_pages
/8);
697 #ifdef CONFIG_IOMMU_LEAK
699 iommu_leak_tab
= (void *)__get_free_pages(GFP_KERNEL
,
700 get_order(iommu_pages
*sizeof(void *)));
702 memset(iommu_leak_tab
, 0, iommu_pages
* 8);
705 "PCI-DMA: Cannot allocate leak trace area\n");
710 * Out of IOMMU space handling.
711 * Reserve some invalid pages at the beginning of the GART.
713 set_bit_string(iommu_gart_bitmap
, 0, EMERGENCY_PAGES
);
715 agp_memory_reserved
= iommu_size
;
717 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
720 iommu_start
= aper_size
- iommu_size
;
721 iommu_bus_base
= info
.aper_base
+ iommu_start
;
722 bad_dma_address
= iommu_bus_base
;
723 iommu_gatt_base
= agp_gatt_table
+ (iommu_start
>>PAGE_SHIFT
);
726 * Unmap the IOMMU part of the GART. The alias of the page is
727 * always mapped with cache enabled and there is no full cache
728 * coherency across the GART remapping. The unmapping avoids
729 * automatic prefetches from the CPU allocating cache lines in
730 * there. All CPU accesses are done via the direct mapping to
731 * the backing memory. The GART address is only used by PCI
734 set_memory_np((unsigned long)__va(iommu_bus_base
),
735 iommu_size
>> PAGE_SHIFT
);
738 * Try to workaround a bug (thanks to BenH)
739 * Set unmapped entries to a scratch page instead of 0.
740 * Any prefetches that hit unmapped entries won't get an bus abort
743 scratch
= get_zeroed_page(GFP_KERNEL
);
745 panic("Cannot allocate iommu scratch page");
746 gart_unmapped_entry
= GPTE_ENCODE(__pa(scratch
));
747 for (i
= EMERGENCY_PAGES
; i
< iommu_pages
; i
++)
748 iommu_gatt_base
[i
] = gart_unmapped_entry
;
751 dma_ops
= &gart_dma_ops
;
754 void __init
gart_parse_options(char *p
)
758 #ifdef CONFIG_IOMMU_LEAK
759 if (!strncmp(p
, "leak", 4)) {
763 if (isdigit(*p
) && get_option(&p
, &arg
))
764 iommu_leak_pages
= arg
;
767 if (isdigit(*p
) && get_option(&p
, &arg
))
769 if (!strncmp(p
, "fullflush", 8))
771 if (!strncmp(p
, "nofullflush", 11))
773 if (!strncmp(p
, "noagp", 5))
775 if (!strncmp(p
, "noaperture", 10))
777 /* duplicated from pci-dma.c */
778 if (!strncmp(p
, "force", 5))
779 gart_iommu_aperture_allowed
= 1;
780 if (!strncmp(p
, "allowed", 7))
781 gart_iommu_aperture_allowed
= 1;
782 if (!strncmp(p
, "memaper", 7)) {
783 fallback_aper_force
= 1;
787 if (get_option(&p
, &arg
))
788 fallback_aper_order
= arg
;