iommu sg merging: x86: make pci-gart iommu respect the segment size limits
[linux-2.6/verdex.git] / arch / x86 / kernel / pci-gart_64.c
blob5ee700f0844de67675bae79b8138ffdcf6131a94
1 /*
2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB.
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <asm/atomic.h>
29 #include <asm/io.h>
30 #include <asm/mtrr.h>
31 #include <asm/pgtable.h>
32 #include <asm/proto.h>
33 #include <asm/gart.h>
34 #include <asm/cacheflush.h>
35 #include <asm/swiotlb.h>
36 #include <asm/dma.h>
37 #include <asm/k8.h>
39 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
40 static unsigned long iommu_size; /* size of remapping area bytes */
41 static unsigned long iommu_pages; /* .. and in pages */
43 static u32 *iommu_gatt_base; /* Remapping table */
46 * If this is disabled the IOMMU will use an optimized flushing strategy
47 * of only flushing when an mapping is reused. With it true the GART is
48 * flushed for every mapping. Problem is that doing the lazy flush seems
49 * to trigger bugs with some popular PCI cards, in particular 3ware (but
50 * has been also also seen with Qlogic at least).
52 int iommu_fullflush = 1;
54 /* Allocation bitmap for the remapping area: */
55 static DEFINE_SPINLOCK(iommu_bitmap_lock);
56 /* Guarded by iommu_bitmap_lock: */
57 static unsigned long *iommu_gart_bitmap;
59 static u32 gart_unmapped_entry;
61 #define GPTE_VALID 1
62 #define GPTE_COHERENT 2
63 #define GPTE_ENCODE(x) \
64 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
65 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
67 #define to_pages(addr, size) \
68 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
70 #define EMERGENCY_PAGES 32 /* = 128KB */
72 #ifdef CONFIG_AGP
73 #define AGPEXTERN extern
74 #else
75 #define AGPEXTERN
76 #endif
78 /* backdoor interface to AGP driver */
79 AGPEXTERN int agp_memory_reserved;
80 AGPEXTERN __u32 *agp_gatt_table;
82 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83 static int need_flush; /* global flush state. set for each gart wrap */
85 static unsigned long alloc_iommu(int size)
87 unsigned long offset, flags;
89 spin_lock_irqsave(&iommu_bitmap_lock, flags);
90 offset = find_next_zero_string(iommu_gart_bitmap, next_bit,
91 iommu_pages, size);
92 if (offset == -1) {
93 need_flush = 1;
94 offset = find_next_zero_string(iommu_gart_bitmap, 0,
95 iommu_pages, size);
97 if (offset != -1) {
98 set_bit_string(iommu_gart_bitmap, offset, size);
99 next_bit = offset+size;
100 if (next_bit >= iommu_pages) {
101 next_bit = 0;
102 need_flush = 1;
105 if (iommu_fullflush)
106 need_flush = 1;
107 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
109 return offset;
112 static void free_iommu(unsigned long offset, int size)
114 unsigned long flags;
116 spin_lock_irqsave(&iommu_bitmap_lock, flags);
117 __clear_bit_string(iommu_gart_bitmap, offset, size);
118 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
122 * Use global flush state to avoid races with multiple flushers.
124 static void flush_gart(void)
126 unsigned long flags;
128 spin_lock_irqsave(&iommu_bitmap_lock, flags);
129 if (need_flush) {
130 k8_flush_garts();
131 need_flush = 0;
133 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
136 #ifdef CONFIG_IOMMU_LEAK
138 #define SET_LEAK(x) \
139 do { \
140 if (iommu_leak_tab) \
141 iommu_leak_tab[x] = __builtin_return_address(0);\
142 } while (0)
144 #define CLEAR_LEAK(x) \
145 do { \
146 if (iommu_leak_tab) \
147 iommu_leak_tab[x] = NULL; \
148 } while (0)
150 /* Debugging aid for drivers that don't free their IOMMU tables */
151 static void **iommu_leak_tab;
152 static int leak_trace;
153 static int iommu_leak_pages = 20;
155 static void dump_leak(void)
157 int i;
158 static int dump;
160 if (dump || !iommu_leak_tab)
161 return;
162 dump = 1;
163 show_stack(NULL, NULL);
165 /* Very crude. dump some from the end of the table too */
166 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
167 iommu_leak_pages);
168 for (i = 0; i < iommu_leak_pages; i += 2) {
169 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
170 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
171 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
173 printk(KERN_DEBUG "\n");
175 #else
176 # define SET_LEAK(x)
177 # define CLEAR_LEAK(x)
178 #endif
180 static void iommu_full(struct device *dev, size_t size, int dir)
183 * Ran out of IOMMU space for this operation. This is very bad.
184 * Unfortunately the drivers cannot handle this operation properly.
185 * Return some non mapped prereserved space in the aperture and
186 * let the Northbridge deal with it. This will result in garbage
187 * in the IO operation. When the size exceeds the prereserved space
188 * memory corruption will occur or random memory will be DMAed
189 * out. Hopefully no network devices use single mappings that big.
192 printk(KERN_ERR
193 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
194 size, dev->bus_id);
196 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
197 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
198 panic("PCI-DMA: Memory would be corrupted\n");
199 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
200 panic(KERN_ERR
201 "PCI-DMA: Random memory would be DMAed\n");
203 #ifdef CONFIG_IOMMU_LEAK
204 dump_leak();
205 #endif
208 static inline int
209 need_iommu(struct device *dev, unsigned long addr, size_t size)
211 u64 mask = *dev->dma_mask;
212 int high = addr + size > mask;
213 int mmu = high;
215 if (force_iommu)
216 mmu = 1;
218 return mmu;
221 static inline int
222 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
224 u64 mask = *dev->dma_mask;
225 int high = addr + size > mask;
226 int mmu = high;
228 return mmu;
231 /* Map a single continuous physical area into the IOMMU.
232 * Caller needs to check if the iommu is needed and flush.
234 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
235 size_t size, int dir)
237 unsigned long npages = to_pages(phys_mem, size);
238 unsigned long iommu_page = alloc_iommu(npages);
239 int i;
241 if (iommu_page == -1) {
242 if (!nonforced_iommu(dev, phys_mem, size))
243 return phys_mem;
244 if (panic_on_overflow)
245 panic("dma_map_area overflow %lu bytes\n", size);
246 iommu_full(dev, size, dir);
247 return bad_dma_address;
250 for (i = 0; i < npages; i++) {
251 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
252 SET_LEAK(iommu_page + i);
253 phys_mem += PAGE_SIZE;
255 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
258 static dma_addr_t
259 gart_map_simple(struct device *dev, char *buf, size_t size, int dir)
261 dma_addr_t map = dma_map_area(dev, virt_to_bus(buf), size, dir);
263 flush_gart();
265 return map;
268 /* Map a single area into the IOMMU */
269 static dma_addr_t
270 gart_map_single(struct device *dev, void *addr, size_t size, int dir)
272 unsigned long phys_mem, bus;
274 if (!dev)
275 dev = &fallback_dev;
277 phys_mem = virt_to_phys(addr);
278 if (!need_iommu(dev, phys_mem, size))
279 return phys_mem;
281 bus = gart_map_simple(dev, addr, size, dir);
283 return bus;
287 * Free a DMA mapping.
289 static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
290 size_t size, int direction)
292 unsigned long iommu_page;
293 int npages;
294 int i;
296 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
297 dma_addr >= iommu_bus_base + iommu_size)
298 return;
300 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
301 npages = to_pages(dma_addr, size);
302 for (i = 0; i < npages; i++) {
303 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
304 CLEAR_LEAK(iommu_page + i);
306 free_iommu(iommu_page, npages);
310 * Wrapper for pci_unmap_single working with scatterlists.
312 static void
313 gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
315 struct scatterlist *s;
316 int i;
318 for_each_sg(sg, s, nents, i) {
319 if (!s->dma_length || !s->length)
320 break;
321 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
325 /* Fallback for dma_map_sg in case of overflow */
326 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
327 int nents, int dir)
329 struct scatterlist *s;
330 int i;
332 #ifdef CONFIG_IOMMU_DEBUG
333 printk(KERN_DEBUG "dma_map_sg overflow\n");
334 #endif
336 for_each_sg(sg, s, nents, i) {
337 unsigned long addr = sg_phys(s);
339 if (nonforced_iommu(dev, addr, s->length)) {
340 addr = dma_map_area(dev, addr, s->length, dir);
341 if (addr == bad_dma_address) {
342 if (i > 0)
343 gart_unmap_sg(dev, sg, i, dir);
344 nents = 0;
345 sg[0].dma_length = 0;
346 break;
349 s->dma_address = addr;
350 s->dma_length = s->length;
352 flush_gart();
354 return nents;
357 /* Map multiple scatterlist entries continuous into the first. */
358 static int __dma_map_cont(struct scatterlist *start, int nelems,
359 struct scatterlist *sout, unsigned long pages)
361 unsigned long iommu_start = alloc_iommu(pages);
362 unsigned long iommu_page = iommu_start;
363 struct scatterlist *s;
364 int i;
366 if (iommu_start == -1)
367 return -1;
369 for_each_sg(start, s, nelems, i) {
370 unsigned long pages, addr;
371 unsigned long phys_addr = s->dma_address;
373 BUG_ON(s != start && s->offset);
374 if (s == start) {
375 sout->dma_address = iommu_bus_base;
376 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
377 sout->dma_length = s->length;
378 } else {
379 sout->dma_length += s->length;
382 addr = phys_addr;
383 pages = to_pages(s->offset, s->length);
384 while (pages--) {
385 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
386 SET_LEAK(iommu_page);
387 addr += PAGE_SIZE;
388 iommu_page++;
391 BUG_ON(iommu_page - iommu_start != pages);
393 return 0;
396 static inline int
397 dma_map_cont(struct scatterlist *start, int nelems, struct scatterlist *sout,
398 unsigned long pages, int need)
400 if (!need) {
401 BUG_ON(nelems != 1);
402 sout->dma_address = start->dma_address;
403 sout->dma_length = start->length;
404 return 0;
406 return __dma_map_cont(start, nelems, sout, pages);
410 * DMA map all entries in a scatterlist.
411 * Merge chunks that have page aligned sizes into a continuous mapping.
413 static int
414 gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
416 struct scatterlist *s, *ps, *start_sg, *sgmap;
417 int need = 0, nextneed, i, out, start;
418 unsigned long pages = 0;
419 unsigned int seg_size;
420 unsigned int max_seg_size;
422 if (nents == 0)
423 return 0;
425 if (!dev)
426 dev = &fallback_dev;
428 out = 0;
429 start = 0;
430 start_sg = sgmap = sg;
431 seg_size = 0;
432 max_seg_size = dma_get_max_seg_size(dev);
433 ps = NULL; /* shut up gcc */
434 for_each_sg(sg, s, nents, i) {
435 dma_addr_t addr = sg_phys(s);
437 s->dma_address = addr;
438 BUG_ON(s->length == 0);
440 nextneed = need_iommu(dev, addr, s->length);
442 /* Handle the previous not yet processed entries */
443 if (i > start) {
445 * Can only merge when the last chunk ends on a
446 * page boundary and the new one doesn't have an
447 * offset.
449 if (!iommu_merge || !nextneed || !need || s->offset ||
450 (s->length + seg_size > max_seg_size) ||
451 (ps->offset + ps->length) % PAGE_SIZE) {
452 if (dma_map_cont(start_sg, i - start, sgmap,
453 pages, need) < 0)
454 goto error;
455 out++;
456 seg_size = 0;
457 sgmap = sg_next(sgmap);
458 pages = 0;
459 start = i;
460 start_sg = s;
464 seg_size += s->length;
465 need = nextneed;
466 pages += to_pages(s->offset, s->length);
467 ps = s;
469 if (dma_map_cont(start_sg, i - start, sgmap, pages, need) < 0)
470 goto error;
471 out++;
472 flush_gart();
473 if (out < nents) {
474 sgmap = sg_next(sgmap);
475 sgmap->dma_length = 0;
477 return out;
479 error:
480 flush_gart();
481 gart_unmap_sg(dev, sg, out, dir);
483 /* When it was forced or merged try again in a dumb way */
484 if (force_iommu || iommu_merge) {
485 out = dma_map_sg_nonforce(dev, sg, nents, dir);
486 if (out > 0)
487 return out;
489 if (panic_on_overflow)
490 panic("dma_map_sg: overflow on %lu pages\n", pages);
492 iommu_full(dev, pages << PAGE_SHIFT, dir);
493 for_each_sg(sg, s, nents, i)
494 s->dma_address = bad_dma_address;
495 return 0;
498 static int no_agp;
500 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
502 unsigned long a;
504 if (!iommu_size) {
505 iommu_size = aper_size;
506 if (!no_agp)
507 iommu_size /= 2;
510 a = aper + iommu_size;
511 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
513 if (iommu_size < 64*1024*1024) {
514 printk(KERN_WARNING
515 "PCI-DMA: Warning: Small IOMMU %luMB."
516 " Consider increasing the AGP aperture in BIOS\n",
517 iommu_size >> 20);
520 return iommu_size;
523 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
525 unsigned aper_size = 0, aper_base_32, aper_order;
526 u64 aper_base;
528 pci_read_config_dword(dev, 0x94, &aper_base_32);
529 pci_read_config_dword(dev, 0x90, &aper_order);
530 aper_order = (aper_order >> 1) & 7;
532 aper_base = aper_base_32 & 0x7fff;
533 aper_base <<= 25;
535 aper_size = (32 * 1024 * 1024) << aper_order;
536 if (aper_base + aper_size > 0x100000000UL || !aper_size)
537 aper_base = 0;
539 *size = aper_size;
540 return aper_base;
544 * Private Northbridge GATT initialization in case we cannot use the
545 * AGP driver for some reason.
547 static __init int init_k8_gatt(struct agp_kern_info *info)
549 unsigned aper_size, gatt_size, new_aper_size;
550 unsigned aper_base, new_aper_base;
551 struct pci_dev *dev;
552 void *gatt;
553 int i;
555 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
556 aper_size = aper_base = info->aper_size = 0;
557 dev = NULL;
558 for (i = 0; i < num_k8_northbridges; i++) {
559 dev = k8_northbridges[i];
560 new_aper_base = read_aperture(dev, &new_aper_size);
561 if (!new_aper_base)
562 goto nommu;
564 if (!aper_base) {
565 aper_size = new_aper_size;
566 aper_base = new_aper_base;
568 if (aper_size != new_aper_size || aper_base != new_aper_base)
569 goto nommu;
571 if (!aper_base)
572 goto nommu;
573 info->aper_base = aper_base;
574 info->aper_size = aper_size >> 20;
576 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
577 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
578 if (!gatt)
579 panic("Cannot allocate GATT table");
580 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
581 panic("Could not set GART PTEs to uncacheable pages");
583 memset(gatt, 0, gatt_size);
584 agp_gatt_table = gatt;
586 for (i = 0; i < num_k8_northbridges; i++) {
587 u32 gatt_reg;
588 u32 ctl;
590 dev = k8_northbridges[i];
591 gatt_reg = __pa(gatt) >> 12;
592 gatt_reg <<= 4;
593 pci_write_config_dword(dev, 0x98, gatt_reg);
594 pci_read_config_dword(dev, 0x90, &ctl);
596 ctl |= 1;
597 ctl &= ~((1<<4) | (1<<5));
599 pci_write_config_dword(dev, 0x90, ctl);
601 flush_gart();
603 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
604 aper_base, aper_size>>10);
605 return 0;
607 nommu:
608 /* Should not happen anymore */
609 printk(KERN_ERR "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
610 KERN_ERR "PCI-DMA: 32bit PCI IO may malfunction.\n");
611 return -1;
614 extern int agp_amd64_init(void);
616 static const struct dma_mapping_ops gart_dma_ops = {
617 .mapping_error = NULL,
618 .map_single = gart_map_single,
619 .map_simple = gart_map_simple,
620 .unmap_single = gart_unmap_single,
621 .sync_single_for_cpu = NULL,
622 .sync_single_for_device = NULL,
623 .sync_single_range_for_cpu = NULL,
624 .sync_single_range_for_device = NULL,
625 .sync_sg_for_cpu = NULL,
626 .sync_sg_for_device = NULL,
627 .map_sg = gart_map_sg,
628 .unmap_sg = gart_unmap_sg,
631 void gart_iommu_shutdown(void)
633 struct pci_dev *dev;
634 int i;
636 if (no_agp && (dma_ops != &gart_dma_ops))
637 return;
639 for (i = 0; i < num_k8_northbridges; i++) {
640 u32 ctl;
642 dev = k8_northbridges[i];
643 pci_read_config_dword(dev, 0x90, &ctl);
645 ctl &= ~1;
647 pci_write_config_dword(dev, 0x90, ctl);
651 void __init gart_iommu_init(void)
653 struct agp_kern_info info;
654 unsigned long iommu_start;
655 unsigned long aper_size;
656 unsigned long scratch;
657 long i;
659 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
660 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
661 return;
664 #ifndef CONFIG_AGP_AMD64
665 no_agp = 1;
666 #else
667 /* Makefile puts PCI initialization via subsys_initcall first. */
668 /* Add other K8 AGP bridge drivers here */
669 no_agp = no_agp ||
670 (agp_amd64_init() < 0) ||
671 (agp_copy_info(agp_bridge, &info) < 0);
672 #endif
674 if (swiotlb)
675 return;
677 /* Did we detect a different HW IOMMU? */
678 if (iommu_detected && !gart_iommu_aperture)
679 return;
681 if (no_iommu ||
682 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
683 !gart_iommu_aperture ||
684 (no_agp && init_k8_gatt(&info) < 0)) {
685 if (end_pfn > MAX_DMA32_PFN) {
686 printk(KERN_ERR "WARNING more than 4GB of memory "
687 "but GART IOMMU not available.\n"
688 KERN_ERR "WARNING 32bit PCI may malfunction.\n");
690 return;
693 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
694 aper_size = info.aper_size * 1024 * 1024;
695 iommu_size = check_iommu_size(info.aper_base, aper_size);
696 iommu_pages = iommu_size >> PAGE_SHIFT;
698 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
699 get_order(iommu_pages/8));
700 if (!iommu_gart_bitmap)
701 panic("Cannot allocate iommu bitmap\n");
702 memset(iommu_gart_bitmap, 0, iommu_pages/8);
704 #ifdef CONFIG_IOMMU_LEAK
705 if (leak_trace) {
706 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
707 get_order(iommu_pages*sizeof(void *)));
708 if (iommu_leak_tab)
709 memset(iommu_leak_tab, 0, iommu_pages * 8);
710 else
711 printk(KERN_DEBUG
712 "PCI-DMA: Cannot allocate leak trace area\n");
714 #endif
717 * Out of IOMMU space handling.
718 * Reserve some invalid pages at the beginning of the GART.
720 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
722 agp_memory_reserved = iommu_size;
723 printk(KERN_INFO
724 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
725 iommu_size >> 20);
727 iommu_start = aper_size - iommu_size;
728 iommu_bus_base = info.aper_base + iommu_start;
729 bad_dma_address = iommu_bus_base;
730 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
733 * Unmap the IOMMU part of the GART. The alias of the page is
734 * always mapped with cache enabled and there is no full cache
735 * coherency across the GART remapping. The unmapping avoids
736 * automatic prefetches from the CPU allocating cache lines in
737 * there. All CPU accesses are done via the direct mapping to
738 * the backing memory. The GART address is only used by PCI
739 * devices.
741 set_memory_np((unsigned long)__va(iommu_bus_base),
742 iommu_size >> PAGE_SHIFT);
745 * Try to workaround a bug (thanks to BenH)
746 * Set unmapped entries to a scratch page instead of 0.
747 * Any prefetches that hit unmapped entries won't get an bus abort
748 * then.
750 scratch = get_zeroed_page(GFP_KERNEL);
751 if (!scratch)
752 panic("Cannot allocate iommu scratch page");
753 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
754 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
755 iommu_gatt_base[i] = gart_unmapped_entry;
757 flush_gart();
758 dma_ops = &gart_dma_ops;
761 void __init gart_parse_options(char *p)
763 int arg;
765 #ifdef CONFIG_IOMMU_LEAK
766 if (!strncmp(p, "leak", 4)) {
767 leak_trace = 1;
768 p += 4;
769 if (*p == '=') ++p;
770 if (isdigit(*p) && get_option(&p, &arg))
771 iommu_leak_pages = arg;
773 #endif
774 if (isdigit(*p) && get_option(&p, &arg))
775 iommu_size = arg;
776 if (!strncmp(p, "fullflush", 8))
777 iommu_fullflush = 1;
778 if (!strncmp(p, "nofullflush", 11))
779 iommu_fullflush = 0;
780 if (!strncmp(p, "noagp", 5))
781 no_agp = 1;
782 if (!strncmp(p, "noaperture", 10))
783 fix_aperture = 0;
784 /* duplicated from pci-dma.c */
785 if (!strncmp(p, "force", 5))
786 gart_iommu_aperture_allowed = 1;
787 if (!strncmp(p, "allowed", 7))
788 gart_iommu_aperture_allowed = 1;
789 if (!strncmp(p, "memaper", 7)) {
790 fallback_aper_force = 1;
791 p += 7;
792 if (*p == '=') {
793 ++p;
794 if (get_option(&p, &arg))
795 fallback_aper_order = arg;