2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <linux/iommu-helper.h>
29 #include <linux/sysdev.h>
30 #include <asm/atomic.h>
33 #include <asm/pgtable.h>
34 #include <asm/proto.h>
35 #include <asm/iommu.h>
37 #include <asm/cacheflush.h>
38 #include <asm/swiotlb.h>
42 static unsigned long iommu_bus_base
; /* GART remapping area (physical) */
43 static unsigned long iommu_size
; /* size of remapping area bytes */
44 static unsigned long iommu_pages
; /* .. and in pages */
46 static u32
*iommu_gatt_base
; /* Remapping table */
49 * If this is disabled the IOMMU will use an optimized flushing strategy
50 * of only flushing when an mapping is reused. With it true the GART is
51 * flushed for every mapping. Problem is that doing the lazy flush seems
52 * to trigger bugs with some popular PCI cards, in particular 3ware (but
53 * has been also also seen with Qlogic at least).
55 int iommu_fullflush
= 1;
57 /* Allocation bitmap for the remapping area: */
58 static DEFINE_SPINLOCK(iommu_bitmap_lock
);
59 /* Guarded by iommu_bitmap_lock: */
60 static unsigned long *iommu_gart_bitmap
;
62 static u32 gart_unmapped_entry
;
65 #define GPTE_COHERENT 2
66 #define GPTE_ENCODE(x) \
67 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
68 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
70 #define EMERGENCY_PAGES 32 /* = 128KB */
73 #define AGPEXTERN extern
78 /* backdoor interface to AGP driver */
79 AGPEXTERN
int agp_memory_reserved
;
80 AGPEXTERN __u32
*agp_gatt_table
;
82 static unsigned long next_bit
; /* protected by iommu_bitmap_lock */
83 static int need_flush
; /* global flush state. set for each gart wrap */
85 static unsigned long alloc_iommu(struct device
*dev
, int size
)
87 unsigned long offset
, flags
;
88 unsigned long boundary_size
;
89 unsigned long base_index
;
91 base_index
= ALIGN(iommu_bus_base
& dma_get_seg_boundary(dev
),
92 PAGE_SIZE
) >> PAGE_SHIFT
;
93 boundary_size
= ALIGN(dma_get_seg_boundary(dev
) + 1,
94 PAGE_SIZE
) >> PAGE_SHIFT
;
96 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
97 offset
= iommu_area_alloc(iommu_gart_bitmap
, iommu_pages
, next_bit
,
98 size
, base_index
, boundary_size
, 0);
101 offset
= iommu_area_alloc(iommu_gart_bitmap
, iommu_pages
, 0,
102 size
, base_index
, boundary_size
, 0);
105 next_bit
= offset
+size
;
106 if (next_bit
>= iommu_pages
) {
113 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
118 static void free_iommu(unsigned long offset
, int size
)
122 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
123 iommu_area_free(iommu_gart_bitmap
, offset
, size
);
124 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
128 * Use global flush state to avoid races with multiple flushers.
130 static void flush_gart(void)
134 spin_lock_irqsave(&iommu_bitmap_lock
, flags
);
139 spin_unlock_irqrestore(&iommu_bitmap_lock
, flags
);
142 #ifdef CONFIG_IOMMU_LEAK
144 #define SET_LEAK(x) \
146 if (iommu_leak_tab) \
147 iommu_leak_tab[x] = __builtin_return_address(0);\
150 #define CLEAR_LEAK(x) \
152 if (iommu_leak_tab) \
153 iommu_leak_tab[x] = NULL; \
156 /* Debugging aid for drivers that don't free their IOMMU tables */
157 static void **iommu_leak_tab
;
158 static int leak_trace
;
159 static int iommu_leak_pages
= 20;
161 static void dump_leak(void)
166 if (dump
|| !iommu_leak_tab
)
169 show_stack(NULL
, NULL
);
171 /* Very crude. dump some from the end of the table too */
172 printk(KERN_DEBUG
"Dumping %d pages from end of IOMMU:\n",
174 for (i
= 0; i
< iommu_leak_pages
; i
+= 2) {
175 printk(KERN_DEBUG
"%lu: ", iommu_pages
-i
);
176 printk_address((unsigned long) iommu_leak_tab
[iommu_pages
-i
], 0);
177 printk(KERN_CONT
"%c", (i
+1)%2 == 0 ? '\n' : ' ');
179 printk(KERN_DEBUG
"\n");
183 # define CLEAR_LEAK(x)
186 static void iommu_full(struct device
*dev
, size_t size
, int dir
)
189 * Ran out of IOMMU space for this operation. This is very bad.
190 * Unfortunately the drivers cannot handle this operation properly.
191 * Return some non mapped prereserved space in the aperture and
192 * let the Northbridge deal with it. This will result in garbage
193 * in the IO operation. When the size exceeds the prereserved space
194 * memory corruption will occur or random memory will be DMAed
195 * out. Hopefully no network devices use single mappings that big.
198 dev_err(dev
, "PCI-DMA: Out of IOMMU space for %lu bytes\n", size
);
200 if (size
> PAGE_SIZE
*EMERGENCY_PAGES
) {
201 if (dir
== PCI_DMA_FROMDEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
202 panic("PCI-DMA: Memory would be corrupted\n");
203 if (dir
== PCI_DMA_TODEVICE
|| dir
== PCI_DMA_BIDIRECTIONAL
)
205 "PCI-DMA: Random memory would be DMAed\n");
207 #ifdef CONFIG_IOMMU_LEAK
213 need_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
215 u64 mask
= *dev
->dma_mask
;
216 int high
= addr
+ size
> mask
;
226 nonforced_iommu(struct device
*dev
, unsigned long addr
, size_t size
)
228 u64 mask
= *dev
->dma_mask
;
229 int high
= addr
+ size
> mask
;
235 /* Map a single continuous physical area into the IOMMU.
236 * Caller needs to check if the iommu is needed and flush.
238 static dma_addr_t
dma_map_area(struct device
*dev
, dma_addr_t phys_mem
,
239 size_t size
, int dir
)
241 unsigned long npages
= iommu_num_pages(phys_mem
, size
);
242 unsigned long iommu_page
= alloc_iommu(dev
, npages
);
245 if (iommu_page
== -1) {
246 if (!nonforced_iommu(dev
, phys_mem
, size
))
248 if (panic_on_overflow
)
249 panic("dma_map_area overflow %lu bytes\n", size
);
250 iommu_full(dev
, size
, dir
);
251 return bad_dma_address
;
254 for (i
= 0; i
< npages
; i
++) {
255 iommu_gatt_base
[iommu_page
+ i
] = GPTE_ENCODE(phys_mem
);
256 SET_LEAK(iommu_page
+ i
);
257 phys_mem
+= PAGE_SIZE
;
259 return iommu_bus_base
+ iommu_page
*PAGE_SIZE
+ (phys_mem
& ~PAGE_MASK
);
263 gart_map_simple(struct device
*dev
, phys_addr_t paddr
, size_t size
, int dir
)
265 dma_addr_t map
= dma_map_area(dev
, paddr
, size
, dir
);
272 /* Map a single area into the IOMMU */
274 gart_map_single(struct device
*dev
, phys_addr_t paddr
, size_t size
, int dir
)
281 if (!need_iommu(dev
, paddr
, size
))
284 bus
= gart_map_simple(dev
, paddr
, size
, dir
);
290 * Free a DMA mapping.
292 static void gart_unmap_single(struct device
*dev
, dma_addr_t dma_addr
,
293 size_t size
, int direction
)
295 unsigned long iommu_page
;
299 if (dma_addr
< iommu_bus_base
+ EMERGENCY_PAGES
*PAGE_SIZE
||
300 dma_addr
>= iommu_bus_base
+ iommu_size
)
303 iommu_page
= (dma_addr
- iommu_bus_base
)>>PAGE_SHIFT
;
304 npages
= iommu_num_pages(dma_addr
, size
);
305 for (i
= 0; i
< npages
; i
++) {
306 iommu_gatt_base
[iommu_page
+ i
] = gart_unmapped_entry
;
307 CLEAR_LEAK(iommu_page
+ i
);
309 free_iommu(iommu_page
, npages
);
313 * Wrapper for pci_unmap_single working with scatterlists.
316 gart_unmap_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
, int dir
)
318 struct scatterlist
*s
;
321 for_each_sg(sg
, s
, nents
, i
) {
322 if (!s
->dma_length
|| !s
->length
)
324 gart_unmap_single(dev
, s
->dma_address
, s
->dma_length
, dir
);
328 /* Fallback for dma_map_sg in case of overflow */
329 static int dma_map_sg_nonforce(struct device
*dev
, struct scatterlist
*sg
,
332 struct scatterlist
*s
;
335 #ifdef CONFIG_IOMMU_DEBUG
336 printk(KERN_DEBUG
"dma_map_sg overflow\n");
339 for_each_sg(sg
, s
, nents
, i
) {
340 unsigned long addr
= sg_phys(s
);
342 if (nonforced_iommu(dev
, addr
, s
->length
)) {
343 addr
= dma_map_area(dev
, addr
, s
->length
, dir
);
344 if (addr
== bad_dma_address
) {
346 gart_unmap_sg(dev
, sg
, i
, dir
);
348 sg
[0].dma_length
= 0;
352 s
->dma_address
= addr
;
353 s
->dma_length
= s
->length
;
360 /* Map multiple scatterlist entries continuous into the first. */
361 static int __dma_map_cont(struct device
*dev
, struct scatterlist
*start
,
362 int nelems
, struct scatterlist
*sout
,
365 unsigned long iommu_start
= alloc_iommu(dev
, pages
);
366 unsigned long iommu_page
= iommu_start
;
367 struct scatterlist
*s
;
370 if (iommu_start
== -1)
373 for_each_sg(start
, s
, nelems
, i
) {
374 unsigned long pages
, addr
;
375 unsigned long phys_addr
= s
->dma_address
;
377 BUG_ON(s
!= start
&& s
->offset
);
379 sout
->dma_address
= iommu_bus_base
;
380 sout
->dma_address
+= iommu_page
*PAGE_SIZE
+ s
->offset
;
381 sout
->dma_length
= s
->length
;
383 sout
->dma_length
+= s
->length
;
387 pages
= iommu_num_pages(s
->offset
, s
->length
);
389 iommu_gatt_base
[iommu_page
] = GPTE_ENCODE(addr
);
390 SET_LEAK(iommu_page
);
395 BUG_ON(iommu_page
- iommu_start
!= pages
);
401 dma_map_cont(struct device
*dev
, struct scatterlist
*start
, int nelems
,
402 struct scatterlist
*sout
, unsigned long pages
, int need
)
406 sout
->dma_address
= start
->dma_address
;
407 sout
->dma_length
= start
->length
;
410 return __dma_map_cont(dev
, start
, nelems
, sout
, pages
);
414 * DMA map all entries in a scatterlist.
415 * Merge chunks that have page aligned sizes into a continuous mapping.
418 gart_map_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
, int dir
)
420 struct scatterlist
*s
, *ps
, *start_sg
, *sgmap
;
421 int need
= 0, nextneed
, i
, out
, start
;
422 unsigned long pages
= 0;
423 unsigned int seg_size
;
424 unsigned int max_seg_size
;
434 start_sg
= sgmap
= sg
;
436 max_seg_size
= dma_get_max_seg_size(dev
);
437 ps
= NULL
; /* shut up gcc */
438 for_each_sg(sg
, s
, nents
, i
) {
439 dma_addr_t addr
= sg_phys(s
);
441 s
->dma_address
= addr
;
442 BUG_ON(s
->length
== 0);
444 nextneed
= need_iommu(dev
, addr
, s
->length
);
446 /* Handle the previous not yet processed entries */
449 * Can only merge when the last chunk ends on a
450 * page boundary and the new one doesn't have an
453 if (!iommu_merge
|| !nextneed
|| !need
|| s
->offset
||
454 (s
->length
+ seg_size
> max_seg_size
) ||
455 (ps
->offset
+ ps
->length
) % PAGE_SIZE
) {
456 if (dma_map_cont(dev
, start_sg
, i
- start
,
457 sgmap
, pages
, need
) < 0)
461 sgmap
= sg_next(sgmap
);
468 seg_size
+= s
->length
;
470 pages
+= iommu_num_pages(s
->offset
, s
->length
);
473 if (dma_map_cont(dev
, start_sg
, i
- start
, sgmap
, pages
, need
) < 0)
478 sgmap
= sg_next(sgmap
);
479 sgmap
->dma_length
= 0;
485 gart_unmap_sg(dev
, sg
, out
, dir
);
487 /* When it was forced or merged try again in a dumb way */
488 if (force_iommu
|| iommu_merge
) {
489 out
= dma_map_sg_nonforce(dev
, sg
, nents
, dir
);
493 if (panic_on_overflow
)
494 panic("dma_map_sg: overflow on %lu pages\n", pages
);
496 iommu_full(dev
, pages
<< PAGE_SHIFT
, dir
);
497 for_each_sg(sg
, s
, nents
, i
)
498 s
->dma_address
= bad_dma_address
;
504 static __init
unsigned long check_iommu_size(unsigned long aper
, u64 aper_size
)
509 iommu_size
= aper_size
;
514 a
= aper
+ iommu_size
;
515 iommu_size
-= round_up(a
, PMD_PAGE_SIZE
) - a
;
517 if (iommu_size
< 64*1024*1024) {
519 "PCI-DMA: Warning: Small IOMMU %luMB."
520 " Consider increasing the AGP aperture in BIOS\n",
527 static __init
unsigned read_aperture(struct pci_dev
*dev
, u32
*size
)
529 unsigned aper_size
= 0, aper_base_32
, aper_order
;
532 pci_read_config_dword(dev
, AMD64_GARTAPERTUREBASE
, &aper_base_32
);
533 pci_read_config_dword(dev
, AMD64_GARTAPERTURECTL
, &aper_order
);
534 aper_order
= (aper_order
>> 1) & 7;
536 aper_base
= aper_base_32
& 0x7fff;
539 aper_size
= (32 * 1024 * 1024) << aper_order
;
540 if (aper_base
+ aper_size
> 0x100000000UL
|| !aper_size
)
547 static void enable_gart_translations(void)
551 for (i
= 0; i
< num_k8_northbridges
; i
++) {
552 struct pci_dev
*dev
= k8_northbridges
[i
];
554 enable_gart_translation(dev
, __pa(agp_gatt_table
));
559 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
560 * resume in the same way as they are handled in gart_iommu_hole_init().
562 static bool fix_up_north_bridges
;
563 static u32 aperture_order
;
564 static u32 aperture_alloc
;
566 void set_up_gart_resume(u32 aper_order
, u32 aper_alloc
)
568 fix_up_north_bridges
= true;
569 aperture_order
= aper_order
;
570 aperture_alloc
= aper_alloc
;
573 static int gart_resume(struct sys_device
*dev
)
575 printk(KERN_INFO
"PCI-DMA: Resuming GART IOMMU\n");
577 if (fix_up_north_bridges
) {
580 printk(KERN_INFO
"PCI-DMA: Restoring GART aperture settings\n");
582 for (i
= 0; i
< num_k8_northbridges
; i
++) {
583 struct pci_dev
*dev
= k8_northbridges
[i
];
586 * Don't enable translations just yet. That is the next
587 * step. Restore the pre-suspend aperture settings.
589 pci_write_config_dword(dev
, AMD64_GARTAPERTURECTL
,
590 aperture_order
<< 1);
591 pci_write_config_dword(dev
, AMD64_GARTAPERTUREBASE
,
592 aperture_alloc
>> 25);
596 enable_gart_translations();
601 static int gart_suspend(struct sys_device
*dev
, pm_message_t state
)
606 static struct sysdev_class gart_sysdev_class
= {
608 .suspend
= gart_suspend
,
609 .resume
= gart_resume
,
613 static struct sys_device device_gart
= {
615 .cls
= &gart_sysdev_class
,
619 * Private Northbridge GATT initialization in case we cannot use the
620 * AGP driver for some reason.
622 static __init
int init_k8_gatt(struct agp_kern_info
*info
)
624 unsigned aper_size
, gatt_size
, new_aper_size
;
625 unsigned aper_base
, new_aper_base
;
630 printk(KERN_INFO
"PCI-DMA: Disabling AGP.\n");
631 aper_size
= aper_base
= info
->aper_size
= 0;
633 for (i
= 0; i
< num_k8_northbridges
; i
++) {
634 dev
= k8_northbridges
[i
];
635 new_aper_base
= read_aperture(dev
, &new_aper_size
);
640 aper_size
= new_aper_size
;
641 aper_base
= new_aper_base
;
643 if (aper_size
!= new_aper_size
|| aper_base
!= new_aper_base
)
648 info
->aper_base
= aper_base
;
649 info
->aper_size
= aper_size
>> 20;
651 gatt_size
= (aper_size
>> PAGE_SHIFT
) * sizeof(u32
);
652 gatt
= (void *)__get_free_pages(GFP_KERNEL
, get_order(gatt_size
));
654 panic("Cannot allocate GATT table");
655 if (set_memory_uc((unsigned long)gatt
, gatt_size
>> PAGE_SHIFT
))
656 panic("Could not set GART PTEs to uncacheable pages");
658 memset(gatt
, 0, gatt_size
);
659 agp_gatt_table
= gatt
;
661 error
= sysdev_class_register(&gart_sysdev_class
);
663 error
= sysdev_register(&device_gart
);
665 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
669 printk(KERN_INFO
"PCI-DMA: aperture base @ %x size %u KB\n",
670 aper_base
, aper_size
>>10);
675 /* Should not happen anymore */
676 printk(KERN_WARNING
"PCI-DMA: More than 4GB of RAM and no IOMMU\n"
677 KERN_WARNING
"falling back to iommu=soft.\n");
681 extern int agp_amd64_init(void);
683 static struct dma_mapping_ops gart_dma_ops
= {
684 .map_single
= gart_map_single
,
685 .map_simple
= gart_map_simple
,
686 .unmap_single
= gart_unmap_single
,
687 .sync_single_for_cpu
= NULL
,
688 .sync_single_for_device
= NULL
,
689 .sync_single_range_for_cpu
= NULL
,
690 .sync_single_range_for_device
= NULL
,
691 .sync_sg_for_cpu
= NULL
,
692 .sync_sg_for_device
= NULL
,
693 .map_sg
= gart_map_sg
,
694 .unmap_sg
= gart_unmap_sg
,
697 void gart_iommu_shutdown(void)
702 if (no_agp
&& (dma_ops
!= &gart_dma_ops
))
705 for (i
= 0; i
< num_k8_northbridges
; i
++) {
708 dev
= k8_northbridges
[i
];
709 pci_read_config_dword(dev
, AMD64_GARTAPERTURECTL
, &ctl
);
713 pci_write_config_dword(dev
, AMD64_GARTAPERTURECTL
, ctl
);
717 void __init
gart_iommu_init(void)
719 struct agp_kern_info info
;
720 unsigned long iommu_start
;
721 unsigned long aper_base
, aper_size
;
722 unsigned long start_pfn
, end_pfn
;
723 unsigned long scratch
;
726 if (cache_k8_northbridges() < 0 || num_k8_northbridges
== 0) {
727 printk(KERN_INFO
"PCI-GART: No AMD northbridge found.\n");
731 #ifndef CONFIG_AGP_AMD64
734 /* Makefile puts PCI initialization via subsys_initcall first. */
735 /* Add other K8 AGP bridge drivers here */
737 (agp_amd64_init() < 0) ||
738 (agp_copy_info(agp_bridge
, &info
) < 0);
744 /* Did we detect a different HW IOMMU? */
745 if (iommu_detected
&& !gart_iommu_aperture
)
749 (!force_iommu
&& max_pfn
<= MAX_DMA32_PFN
) ||
750 !gart_iommu_aperture
||
751 (no_agp
&& init_k8_gatt(&info
) < 0)) {
752 if (max_pfn
> MAX_DMA32_PFN
) {
753 printk(KERN_WARNING
"More than 4GB of memory "
754 "but GART IOMMU not available.\n"
755 KERN_WARNING
"falling back to iommu=soft.\n");
760 /* need to map that range */
761 aper_size
= info
.aper_size
<< 20;
762 aper_base
= info
.aper_base
;
763 end_pfn
= (aper_base
>>PAGE_SHIFT
) + (aper_size
>>PAGE_SHIFT
);
764 if (end_pfn
> max_low_pfn_mapped
) {
765 start_pfn
= (aper_base
>>PAGE_SHIFT
);
766 init_memory_mapping(start_pfn
<<PAGE_SHIFT
, end_pfn
<<PAGE_SHIFT
);
769 printk(KERN_INFO
"PCI-DMA: using GART IOMMU.\n");
770 iommu_size
= check_iommu_size(info
.aper_base
, aper_size
);
771 iommu_pages
= iommu_size
>> PAGE_SHIFT
;
773 iommu_gart_bitmap
= (void *) __get_free_pages(GFP_KERNEL
,
774 get_order(iommu_pages
/8));
775 if (!iommu_gart_bitmap
)
776 panic("Cannot allocate iommu bitmap\n");
777 memset(iommu_gart_bitmap
, 0, iommu_pages
/8);
779 #ifdef CONFIG_IOMMU_LEAK
781 iommu_leak_tab
= (void *)__get_free_pages(GFP_KERNEL
,
782 get_order(iommu_pages
*sizeof(void *)));
784 memset(iommu_leak_tab
, 0, iommu_pages
* 8);
787 "PCI-DMA: Cannot allocate leak trace area\n");
792 * Out of IOMMU space handling.
793 * Reserve some invalid pages at the beginning of the GART.
795 set_bit_string(iommu_gart_bitmap
, 0, EMERGENCY_PAGES
);
797 agp_memory_reserved
= iommu_size
;
799 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
802 iommu_start
= aper_size
- iommu_size
;
803 iommu_bus_base
= info
.aper_base
+ iommu_start
;
804 bad_dma_address
= iommu_bus_base
;
805 iommu_gatt_base
= agp_gatt_table
+ (iommu_start
>>PAGE_SHIFT
);
808 * Unmap the IOMMU part of the GART. The alias of the page is
809 * always mapped with cache enabled and there is no full cache
810 * coherency across the GART remapping. The unmapping avoids
811 * automatic prefetches from the CPU allocating cache lines in
812 * there. All CPU accesses are done via the direct mapping to
813 * the backing memory. The GART address is only used by PCI
816 set_memory_np((unsigned long)__va(iommu_bus_base
),
817 iommu_size
>> PAGE_SHIFT
);
819 * Tricky. The GART table remaps the physical memory range,
820 * so the CPU wont notice potential aliases and if the memory
821 * is remapped to UC later on, we might surprise the PCI devices
822 * with a stray writeout of a cacheline. So play it sure and
823 * do an explicit, full-scale wbinvd() _after_ having marked all
824 * the pages as Not-Present:
829 * Now all caches are flushed and we can safely enable
830 * GART hardware. Doing it early leaves the possibility
831 * of stale cache entries that can lead to GART PTE
834 enable_gart_translations();
837 * Try to workaround a bug (thanks to BenH):
838 * Set unmapped entries to a scratch page instead of 0.
839 * Any prefetches that hit unmapped entries won't get an bus abort
840 * then. (P2P bridge may be prefetching on DMA reads).
842 scratch
= get_zeroed_page(GFP_KERNEL
);
844 panic("Cannot allocate iommu scratch page");
845 gart_unmapped_entry
= GPTE_ENCODE(__pa(scratch
));
846 for (i
= EMERGENCY_PAGES
; i
< iommu_pages
; i
++)
847 iommu_gatt_base
[i
] = gart_unmapped_entry
;
850 dma_ops
= &gart_dma_ops
;
853 void __init
gart_parse_options(char *p
)
857 #ifdef CONFIG_IOMMU_LEAK
858 if (!strncmp(p
, "leak", 4)) {
862 if (isdigit(*p
) && get_option(&p
, &arg
))
863 iommu_leak_pages
= arg
;
866 if (isdigit(*p
) && get_option(&p
, &arg
))
868 if (!strncmp(p
, "fullflush", 9))
870 if (!strncmp(p
, "nofullflush", 11))
872 if (!strncmp(p
, "noagp", 5))
874 if (!strncmp(p
, "noaperture", 10))
876 /* duplicated from pci-dma.c */
877 if (!strncmp(p
, "force", 5))
878 gart_iommu_aperture_allowed
= 1;
879 if (!strncmp(p
, "allowed", 7))
880 gart_iommu_aperture_allowed
= 1;
881 if (!strncmp(p
, "memaper", 7)) {
882 fallback_aper_force
= 1;
886 if (get_option(&p
, &arg
))
887 fallback_aper_order
= arg
;