2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <linux/iommu-helper.h>
29 #include <linux/sysdev.h>
30 #include <asm/atomic.h>
33 #include <asm/pgtable.h>
34 #include <asm/proto.h>
36 #include <asm/cacheflush.h>
37 #include <asm/swiotlb.h>
41 static unsigned long iommu_bus_base
; /* GART remapping area (physical) */
42 static unsigned long iommu_size
; /* size of remapping area bytes */
43 static unsigned long iommu_pages
; /* .. and in pages */
45 static u32
*iommu_gatt_base
; /* Remapping table */
48 * If this is disabled the IOMMU will use an optimized flushing strategy
49 * of only flushing when an mapping is reused. With it true the GART is
50 * flushed for every mapping. Problem is that doing the lazy flush seems
51 * to trigger bugs with some popular PCI cards, in particular 3ware (but
52 * has been also also seen with Qlogic at least).
54 int iommu_fullflush
= 1;
56 /* Allocation bitmap for the remapping area: */
57 static DEFINE_SPINLOCK(iommu_bitmap_lock
);
58 /* Guarded by iommu_bitmap_lock: */
59 static unsigned long *iommu_gart_bitmap
;
61 static u32 gart_unmapped_entry
;
64 #define GPTE_COHERENT 2
65 #define GPTE_ENCODE(x) \
66 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
67 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69 #define to_pages(addr, size) \
70 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
72 #define EMERGENCY_PAGES 32 /* = 128KB */
75 #define AGPEXTERN extern
80 /* backdoor interface to AGP driver */
81 AGPEXTERN
int agp_memory_reserved
;
82 AGPEXTERN __u32
*agp_gatt_table
;
84 static unsigned long next_bit
; /* protected by iommu_bitmap_lock */
85 static int need_flush
; /* global flush state. set for each gart wrap */
87 static unsigned long alloc_iommu(struct device
*dev
, int size
)
89 unsigned long offset
, flags
;
90 unsigned long boundary_size
;
91 unsigned long base_index
;
93 base_index
= ALIGN(iommu_bus_base
& dma_get_seg_boundary(dev
),
94 PAGE_SIZE
) >> PAGE_SHIFT
;
95 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
96 PAGE_SIZE
) >> PAGE_SHIFT
;
98 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
99 offset
= iommu_area_alloc(iommu_gart_bitmap
, iommu_pages
, next_bit
,
100 size
, base_index
, boundary_size
, 0);
103 offset
= iommu_area_alloc(iommu_gart_bitmap
, iommu_pages
, 0,
104 size
, base_index
, boundary_size
, 0);
107 next_bit
= offset
+size
;
108 if (next_bit
>= iommu_pages
) {
115 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
120 static void free_iommu(unsigned long offset
, int size
)
124 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
125 iommu_area_free(iommu_gart_bitmap
, offset
, size
);
126 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
130 * Use global flush state to avoid races with multiple flushers.
132 static void flush_gart(void)
136 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
141 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
144 #ifdef CONFIG_IOMMU_LEAK
146 #define SET_LEAK(x) \
148 if (iommu_leak_tab) \
149 iommu_leak_tab[x] = __builtin_return_address(0);\
152 #define CLEAR_LEAK(x) \
154 if (iommu_leak_tab) \
155 iommu_leak_tab[x] = NULL; \
158 /* Debugging aid for drivers that don't free their IOMMU tables */
159 static void **iommu_leak_tab
;
160 static int leak_trace
;
161 static int iommu_leak_pages
= 20;
163 static void dump_leak(void)
168 if (dump
|| !iommu_leak_tab
)
171 show_stack(NULL
, NULL
);
173 /* Very crude. dump some from the end of the table too */
174 printk(KERN_DEBUG
"Dumping %d pages from end of IOMMU:\n",
176 for (i
= 0; i
< iommu_leak_pages
; i
+= 2) {
177 printk(KERN_DEBUG
"%lu: ", iommu_pages
-i
);
178 printk_address((unsigned long) iommu_leak_tab
[iommu_pages
-i
], 0);
179 printk(KERN_CONT
"%c", (i
+1)%2 == 0 ? '\n' : ' ');
181 printk(KERN_DEBUG
"\n");
185 # define CLEAR_LEAK(x)
188 static void iommu_full(struct device
*dev
, size_t size
, int dir
)
191 * Ran out of IOMMU space for this operation. This is very bad.
192 * Unfortunately the drivers cannot handle this operation properly.
193 * Return some non mapped prereserved space in the aperture and
194 * let the Northbridge deal with it. This will result in garbage
195 * in the IO operation. When the size exceeds the prereserved space
196 * memory corruption will occur or random memory will be DMAed
197 * out. Hopefully no network devices use single mappings that big.
201 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
204 if (size
> PAGE_SIZE
*EMERGENCY_PAGES
) {
205 if (dir
== PCI_DMA_FROMDEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
206 panic("PCI-DMA: Memory would be corrupted\n");
207 if (dir
== PCI_DMA_TODEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
209 "PCI-DMA: Random memory would be DMAed\n");
211 #ifdef CONFIG_IOMMU_LEAK
217 need_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
219 u64 mask
= *dev
->dma_mask
;
220 int high
= addr
+ size
> mask
;
230 nonforced_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
232 u64 mask
= *dev
->dma_mask
;
233 int high
= addr
+ size
> mask
;
239 /* Map a single continuous physical area into the IOMMU.
240 * Caller needs to check if the iommu is needed and flush.
242 static dma_addr_t
dma_map_area(struct device
*dev
, dma_addr_t phys_mem
,
243 size_t size
, int dir
)
245 unsigned long npages
= to_pages(phys_mem
, size
);
246 unsigned long iommu_page
= alloc_iommu(dev
, npages
);
249 if (iommu_page
== -1) {
250 if (!nonforced_iommu(dev
, phys_mem
, size
))
252 if (panic_on_overflow
)
253 panic("dma_map_area overflow %lu bytes\n", size
);
254 iommu_full(dev
, size
, dir
);
255 return bad_dma_address
;
258 for (i
= 0; i
< npages
; i
++) {
259 iommu_gatt_base
[iommu_page
+ i
] = GPTE_ENCODE(phys_mem
);
260 SET_LEAK(iommu_page
+ i
);
261 phys_mem
+= PAGE_SIZE
;
263 return iommu_bus_base
+ iommu_page
*PAGE_SIZE
+ (phys_mem
& ~PAGE_MASK
);
267 gart_map_simple(struct device
*dev
, phys_addr_t paddr
, size_t size
, int dir
)
269 dma_addr_t map
= dma_map_area(dev
, paddr
, size
, dir
);
276 /* Map a single area into the IOMMU */
278 gart_map_single(struct device
*dev
, phys_addr_t paddr
, size_t size
, int dir
)
285 if (!need_iommu(dev
, paddr
, size
))
288 bus
= gart_map_simple(dev
, paddr
, size
, dir
);
294 * Free a DMA mapping.
296 static void gart_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
297 size_t size
, int direction
)
299 unsigned long iommu_page
;
303 if (dma_addr
< iommu_bus_base
+ EMERGENCY_PAGES
*PAGE_SIZE
||
304 dma_addr
>= iommu_bus_base
+ iommu_size
)
307 iommu_page
= (dma_addr
- iommu_bus_base
)>>PAGE_SHIFT
;
308 npages
= to_pages(dma_addr
, size
);
309 for (i
= 0; i
< npages
; i
++) {
310 iommu_gatt_base
[iommu_page
+ i
] = gart_unmapped_entry
;
311 CLEAR_LEAK(iommu_page
+ i
);
313 free_iommu(iommu_page
, npages
);
317 * Wrapper for pci_unmap_single working with scatterlists.
320 gart_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
, int dir
)
322 struct scatterlist
*s
;
325 for_each_sg(sg
, s
, nents
, i
) {
326 if (!s
->dma_length
|| !s
->length
)
328 gart_unmap_single(dev
, s
->dma_address
, s
->dma_length
, dir
);
332 /* Fallback for dma_map_sg in case of overflow */
333 static int dma_map_sg_nonforce(struct device
*dev
, struct scatterlist
*sg
,
336 struct scatterlist
*s
;
339 #ifdef CONFIG_IOMMU_DEBUG
340 printk(KERN_DEBUG
"dma_map_sg overflow\n");
343 for_each_sg(sg
, s
, nents
, i
) {
344 unsigned long addr
= sg_phys(s
);
346 if (nonforced_iommu(dev
, addr
, s
->length
)) {
347 addr
= dma_map_area(dev
, addr
, s
->length
, dir
);
348 if (addr
== bad_dma_address
) {
350 gart_unmap_sg(dev
, sg
, i
, dir
);
352 sg
[0].dma_length
= 0;
356 s
->dma_address
= addr
;
357 s
->dma_length
= s
->length
;
364 /* Map multiple scatterlist entries continuous into the first. */
365 static int __dma_map_cont(struct device
*dev
, struct scatterlist
*start
,
366 int nelems
, struct scatterlist
*sout
,
369 unsigned long iommu_start
= alloc_iommu(dev
, pages
);
370 unsigned long iommu_page
= iommu_start
;
371 struct scatterlist
*s
;
374 if (iommu_start
== -1)
377 for_each_sg(start
, s
, nelems
, i
) {
378 unsigned long pages
, addr
;
379 unsigned long phys_addr
= s
->dma_address
;
381 BUG_ON(s
!= start
&& s
->offset
);
383 sout
->dma_address
= iommu_bus_base
;
384 sout
->dma_address
+= iommu_page
*PAGE_SIZE
+ s
->offset
;
385 sout
->dma_length
= s
->length
;
387 sout
->dma_length
+= s
->length
;
391 pages
= to_pages(s
->offset
, s
->length
);
393 iommu_gatt_base
[iommu_page
] = GPTE_ENCODE(addr
);
394 SET_LEAK(iommu_page
);
399 BUG_ON(iommu_page
- iommu_start
!= pages
);
405 dma_map_cont(struct device
*dev
, struct scatterlist
*start
, int nelems
,
406 struct scatterlist
*sout
, unsigned long pages
, int need
)
410 sout
->dma_address
= start
->dma_address
;
411 sout
->dma_length
= start
->length
;
414 return __dma_map_cont(dev
, start
, nelems
, sout
, pages
);
418 * DMA map all entries in a scatterlist.
419 * Merge chunks that have page aligned sizes into a continuous mapping.
422 gart_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
, int dir
)
424 struct scatterlist
*s
, *ps
, *start_sg
, *sgmap
;
425 int need
= 0, nextneed
, i
, out
, start
;
426 unsigned long pages
= 0;
427 unsigned int seg_size
;
428 unsigned int max_seg_size
;
438 start_sg
= sgmap
= sg
;
440 max_seg_size
= dma_get_max_seg_size(dev
);
441 ps
= NULL
; /* shut up gcc */
442 for_each_sg(sg
, s
, nents
, i
) {
443 dma_addr_t addr
= sg_phys(s
);
445 s
->dma_address
= addr
;
446 BUG_ON(s
->length
== 0);
448 nextneed
= need_iommu(dev
, addr
, s
->length
);
450 /* Handle the previous not yet processed entries */
453 * Can only merge when the last chunk ends on a
454 * page boundary and the new one doesn't have an
457 if (!iommu_merge
|| !nextneed
|| !need
|| s
->offset
||
458 (s
->length
+ seg_size
> max_seg_size
) ||
459 (ps
->offset
+ ps
->length
) % PAGE_SIZE
) {
460 if (dma_map_cont(dev
, start_sg
, i
- start
,
461 sgmap
, pages
, need
) < 0)
465 sgmap
= sg_next(sgmap
);
472 seg_size
+= s
->length
;
474 pages
+= to_pages(s
->offset
, s
->length
);
477 if (dma_map_cont(dev
, start_sg
, i
- start
, sgmap
, pages
, need
) < 0)
482 sgmap
= sg_next(sgmap
);
483 sgmap
->dma_length
= 0;
489 gart_unmap_sg(dev
, sg
, out
, dir
);
491 /* When it was forced or merged try again in a dumb way */
492 if (force_iommu
|| iommu_merge
) {
493 out
= dma_map_sg_nonforce(dev
, sg
, nents
, dir
);
497 if (panic_on_overflow
)
498 panic("dma_map_sg: overflow on %lu pages\n", pages
);
500 iommu_full(dev
, pages
<< PAGE_SHIFT
, dir
);
501 for_each_sg(sg
, s
, nents
, i
)
502 s
->dma_address
= bad_dma_address
;
508 static __init
unsigned long check_iommu_size(unsigned long aper
, u64 aper_size
)
513 iommu_size
= aper_size
;
518 a
= aper
+ iommu_size
;
519 iommu_size
-= round_up(a
, PMD_PAGE_SIZE
) - a
;
521 if (iommu_size
< 64*1024*1024) {
523 "PCI-DMA: Warning: Small IOMMU %luMB."
524 " Consider increasing the AGP aperture in BIOS\n",
531 static __init
unsigned read_aperture(struct pci_dev
*dev
, u32
*size
)
533 unsigned aper_size
= 0, aper_base_32
, aper_order
;
536 pci_read_config_dword(dev
, AMD64_GARTAPERTUREBASE
, &aper_base_32
);
537 pci_read_config_dword(dev
, AMD64_GARTAPERTURECTL
, &aper_order
);
538 aper_order
= (aper_order
>> 1) & 7;
540 aper_base
= aper_base_32
& 0x7fff;
543 aper_size
= (32 * 1024 * 1024) << aper_order
;
544 if (aper_base
+ aper_size
> 0x100000000UL
|| !aper_size
)
551 static void enable_gart_translations(void)
555 for (i
= 0; i
< num_k8_northbridges
; i
++) {
556 struct pci_dev
*dev
= k8_northbridges
[i
];
558 enable_gart_translation(dev
, __pa(agp_gatt_table
));
563 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
564 * resume in the same way as they are handled in gart_iommu_hole_init().
566 static bool fix_up_north_bridges
;
567 static u32 aperture_order
;
568 static u32 aperture_alloc
;
570 void set_up_gart_resume(u32 aper_order
, u32 aper_alloc
)
572 fix_up_north_bridges
= true;
573 aperture_order
= aper_order
;
574 aperture_alloc
= aper_alloc
;
577 static int gart_resume(struct sys_device
*dev
)
579 printk(KERN_INFO
"PCI-DMA: Resuming GART IOMMU\n");
581 if (fix_up_north_bridges
) {
584 printk(KERN_INFO
"PCI-DMA: Restoring GART aperture settings\n");
586 for (i
= 0; i
< num_k8_northbridges
; i
++) {
587 struct pci_dev
*dev
= k8_northbridges
[i
];
590 * Don't enable translations just yet. That is the next
591 * step. Restore the pre-suspend aperture settings.
593 pci_write_config_dword(dev
, AMD64_GARTAPERTURECTL
,
594 aperture_order
<< 1);
595 pci_write_config_dword(dev
, AMD64_GARTAPERTUREBASE
,
596 aperture_alloc
>> 25);
600 enable_gart_translations();
605 static int gart_suspend(struct sys_device
*dev
, pm_message_t state
)
610 static struct sysdev_class gart_sysdev_class
= {
612 .suspend
= gart_suspend
,
613 .resume
= gart_resume
,
617 static struct sys_device device_gart
= {
619 .cls
= &gart_sysdev_class
,
623 * Private Northbridge GATT initialization in case we cannot use the
624 * AGP driver for some reason.
626 static __init
int init_k8_gatt(struct agp_kern_info
*info
)
628 unsigned aper_size
, gatt_size
, new_aper_size
;
629 unsigned aper_base
, new_aper_base
;
633 unsigned long start_pfn
, end_pfn
;
635 printk(KERN_INFO
"PCI-DMA: Disabling AGP.\n");
636 aper_size
= aper_base
= info
->aper_size
= 0;
638 for (i
= 0; i
< num_k8_northbridges
; i
++) {
639 dev
= k8_northbridges
[i
];
640 new_aper_base
= read_aperture(dev
, &new_aper_size
);
645 aper_size
= new_aper_size
;
646 aper_base
= new_aper_base
;
648 if (aper_size
!= new_aper_size
|| aper_base
!= new_aper_base
)
653 info
->aper_base
= aper_base
;
654 info
->aper_size
= aper_size
>> 20;
656 gatt_size
= (aper_size
>> PAGE_SHIFT
) * sizeof(u32
);
657 gatt
= (void *)__get_free_pages(GFP_KERNEL
, get_order(gatt_size
));
659 panic("Cannot allocate GATT table");
660 if (set_memory_uc((unsigned long)gatt
, gatt_size
>> PAGE_SHIFT
))
661 panic("Could not set GART PTEs to uncacheable pages");
663 memset(gatt
, 0, gatt_size
);
664 agp_gatt_table
= gatt
;
666 enable_gart_translations();
668 error
= sysdev_class_register(&gart_sysdev_class
);
670 error
= sysdev_register(&device_gart
);
672 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
676 printk(KERN_INFO
"PCI-DMA: aperture base @ %x size %u KB\n",
677 aper_base
, aper_size
>>10);
679 /* need to map that range */
680 end_pfn
= (aper_base
>>PAGE_SHIFT
) + (aper_size
>>PAGE_SHIFT
);
681 if (end_pfn
> max_low_pfn_mapped
) {
682 start_pfn
= max_low_pfn_mapped
;
683 max_low_pfn_mapped
= init_memory_mapping(start_pfn
<<PAGE_SHIFT
,
684 end_pfn
<<PAGE_SHIFT
);
685 if (max_pfn_mapped
< max_low_pfn_mapped
)
686 max_pfn_mapped
= max_low_pfn_mapped
;
691 /* Should not happen anymore */
692 printk(KERN_WARNING
"PCI-DMA: More than 4GB of RAM and no IOMMU\n"
693 KERN_WARNING
"falling back to iommu=soft.\n");
697 extern int agp_amd64_init(void);
699 static const struct dma_mapping_ops gart_dma_ops
= {
700 .mapping_error
= NULL
,
701 .map_single
= gart_map_single
,
702 .map_simple
= gart_map_simple
,
703 .unmap_single
= gart_unmap_single
,
704 .sync_single_for_cpu
= NULL
,
705 .sync_single_for_device
= NULL
,
706 .sync_single_range_for_cpu
= NULL
,
707 .sync_single_range_for_device
= NULL
,
708 .sync_sg_for_cpu
= NULL
,
709 .sync_sg_for_device
= NULL
,
710 .map_sg
= gart_map_sg
,
711 .unmap_sg
= gart_unmap_sg
,
714 void gart_iommu_shutdown(void)
719 if (no_agp
&& (dma_ops
!= &gart_dma_ops
))
722 for (i
= 0; i
< num_k8_northbridges
; i
++) {
725 dev
= k8_northbridges
[i
];
726 pci_read_config_dword(dev
, AMD64_GARTAPERTURECTL
, &ctl
);
730 pci_write_config_dword(dev
, AMD64_GARTAPERTURECTL
, ctl
);
734 void __init
gart_iommu_init(void)
736 struct agp_kern_info info
;
737 unsigned long iommu_start
;
738 unsigned long aper_size
;
739 unsigned long scratch
;
742 if (cache_k8_northbridges() < 0 || num_k8_northbridges
== 0) {
743 printk(KERN_INFO
"PCI-GART: No AMD northbridge found.\n");
747 #ifndef CONFIG_AGP_AMD64
750 /* Makefile puts PCI initialization via subsys_initcall first. */
751 /* Add other K8 AGP bridge drivers here */
753 (agp_amd64_init() < 0) ||
754 (agp_copy_info(agp_bridge
, &info
) < 0);
760 /* Did we detect a different HW IOMMU? */
761 if (iommu_detected
&& !gart_iommu_aperture
)
765 (!force_iommu
&& max_pfn
<= MAX_DMA32_PFN
) ||
766 !gart_iommu_aperture
||
767 (no_agp
&& init_k8_gatt(&info
) < 0)) {
768 if (max_pfn
> MAX_DMA32_PFN
) {
769 printk(KERN_WARNING
"More than 4GB of memory "
770 "but GART IOMMU not available.\n"
771 KERN_WARNING
"falling back to iommu=soft.\n");
776 printk(KERN_INFO
"PCI-DMA: using GART IOMMU.\n");
777 aper_size
= info
.aper_size
* 1024 * 1024;
778 iommu_size
= check_iommu_size(info
.aper_base
, aper_size
);
779 iommu_pages
= iommu_size
>> PAGE_SHIFT
;
781 iommu_gart_bitmap
= (void *) __get_free_pages(GFP_KERNEL
,
782 get_order(iommu_pages
/8));
783 if (!iommu_gart_bitmap
)
784 panic("Cannot allocate iommu bitmap\n");
785 memset(iommu_gart_bitmap
, 0, iommu_pages
/8);
787 #ifdef CONFIG_IOMMU_LEAK
789 iommu_leak_tab
= (void *)__get_free_pages(GFP_KERNEL
,
790 get_order(iommu_pages
*sizeof(void *)));
792 memset(iommu_leak_tab
, 0, iommu_pages
* 8);
795 "PCI-DMA: Cannot allocate leak trace area\n");
800 * Out of IOMMU space handling.
801 * Reserve some invalid pages at the beginning of the GART.
803 set_bit_string(iommu_gart_bitmap
, 0, EMERGENCY_PAGES
);
805 agp_memory_reserved
= iommu_size
;
807 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
810 iommu_start
= aper_size
- iommu_size
;
811 iommu_bus_base
= info
.aper_base
+ iommu_start
;
812 bad_dma_address
= iommu_bus_base
;
813 iommu_gatt_base
= agp_gatt_table
+ (iommu_start
>>PAGE_SHIFT
);
816 * Unmap the IOMMU part of the GART. The alias of the page is
817 * always mapped with cache enabled and there is no full cache
818 * coherency across the GART remapping. The unmapping avoids
819 * automatic prefetches from the CPU allocating cache lines in
820 * there. All CPU accesses are done via the direct mapping to
821 * the backing memory. The GART address is only used by PCI
824 set_memory_np((unsigned long)__va(iommu_bus_base
),
825 iommu_size
>> PAGE_SHIFT
);
827 * Tricky. The GART table remaps the physical memory range,
828 * so the CPU wont notice potential aliases and if the memory
829 * is remapped to UC later on, we might surprise the PCI devices
830 * with a stray writeout of a cacheline. So play it sure and
831 * do an explicit, full-scale wbinvd() _after_ having marked all
832 * the pages as Not-Present:
837 * Try to workaround a bug (thanks to BenH):
838 * Set unmapped entries to a scratch page instead of 0.
839 * Any prefetches that hit unmapped entries won't get an bus abort
840 * then. (P2P bridge may be prefetching on DMA reads).
842 scratch
= get_zeroed_page(GFP_KERNEL
);
844 panic("Cannot allocate iommu scratch page");
845 gart_unmapped_entry
= GPTE_ENCODE(__pa(scratch
));
846 for (i
= EMERGENCY_PAGES
; i
< iommu_pages
; i
++)
847 iommu_gatt_base
[i
] = gart_unmapped_entry
;
850 dma_ops
= &gart_dma_ops
;
853 void __init
gart_parse_options(char *p
)
857 #ifdef CONFIG_IOMMU_LEAK
858 if (!strncmp(p
, "leak", 4)) {
862 if (isdigit(*p
) && get_option(&p
, &arg
))
863 iommu_leak_pages
= arg
;
866 if (isdigit(*p
) && get_option(&p
, &arg
))
868 if (!strncmp(p
, "fullflush", 8))
870 if (!strncmp(p
, "nofullflush", 11))
872 if (!strncmp(p
, "noagp", 5))
874 if (!strncmp(p
, "noaperture", 10))
876 /* duplicated from pci-dma.c */
877 if (!strncmp(p
, "force", 5))
878 gart_iommu_aperture_allowed
= 1;
879 if (!strncmp(p
, "allowed", 7))
880 gart_iommu_aperture_allowed
= 1;
881 if (!strncmp(p
, "memaper", 7)) {
882 fallback_aper_force
= 1;
886 if (get_option(&p
, &arg
))
887 fallback_aper_order
= arg
;