x86, gart: add resume handling
[linux-2.6/mini2440.git] / arch / x86 / kernel / pci-gart_64.c
blobf505c38903583dad2a856a2d7c3933f7d539b183
1 /*
2 * Dynamic DMA mapping support for AMD Hammer.
4 * Use the integrated AGP GART in the Hammer northbridge as an IOMMU for PCI.
5 * This allows to use PCI devices that only support 32bit addresses on systems
6 * with more than 4GB.
8 * See Documentation/DMA-mapping.txt for the interface specification.
10 * Copyright 2002 Andi Kleen, SuSE Labs.
11 * Subject to the GNU General Public License v2 only.
14 #include <linux/types.h>
15 #include <linux/ctype.h>
16 #include <linux/agp_backend.h>
17 #include <linux/init.h>
18 #include <linux/mm.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/pci.h>
22 #include <linux/module.h>
23 #include <linux/topology.h>
24 #include <linux/interrupt.h>
25 #include <linux/bitops.h>
26 #include <linux/kdebug.h>
27 #include <linux/scatterlist.h>
28 #include <linux/iommu-helper.h>
29 #include <linux/sysdev.h>
30 #include <asm/atomic.h>
31 #include <asm/io.h>
32 #include <asm/mtrr.h>
33 #include <asm/pgtable.h>
34 #include <asm/proto.h>
35 #include <asm/gart.h>
36 #include <asm/cacheflush.h>
37 #include <asm/swiotlb.h>
38 #include <asm/dma.h>
39 #include <asm/k8.h>
41 static unsigned long iommu_bus_base; /* GART remapping area (physical) */
42 static unsigned long iommu_size; /* size of remapping area bytes */
43 static unsigned long iommu_pages; /* .. and in pages */
45 static u32 *iommu_gatt_base; /* Remapping table */
48 * If this is disabled the IOMMU will use an optimized flushing strategy
49 * of only flushing when an mapping is reused. With it true the GART is
50 * flushed for every mapping. Problem is that doing the lazy flush seems
51 * to trigger bugs with some popular PCI cards, in particular 3ware (but
52 * has been also also seen with Qlogic at least).
54 int iommu_fullflush = 1;
56 /* Allocation bitmap for the remapping area: */
57 static DEFINE_SPINLOCK(iommu_bitmap_lock);
58 /* Guarded by iommu_bitmap_lock: */
59 static unsigned long *iommu_gart_bitmap;
61 static u32 gart_unmapped_entry;
63 #define GPTE_VALID 1
64 #define GPTE_COHERENT 2
65 #define GPTE_ENCODE(x) \
66 (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT)
67 #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28))
69 #define to_pages(addr, size) \
70 (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT)
72 #define EMERGENCY_PAGES 32 /* = 128KB */
74 #ifdef CONFIG_AGP
75 #define AGPEXTERN extern
76 #else
77 #define AGPEXTERN
78 #endif
80 /* backdoor interface to AGP driver */
81 AGPEXTERN int agp_memory_reserved;
82 AGPEXTERN __u32 *agp_gatt_table;
84 static unsigned long next_bit; /* protected by iommu_bitmap_lock */
85 static int need_flush; /* global flush state. set for each gart wrap */
87 static unsigned long alloc_iommu(struct device *dev, int size)
89 unsigned long offset, flags;
90 unsigned long boundary_size;
91 unsigned long base_index;
93 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
94 PAGE_SIZE) >> PAGE_SHIFT;
95 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
96 PAGE_SIZE) >> PAGE_SHIFT;
98 spin_lock_irqsave(&iommu_bitmap_lock, flags);
99 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
100 size, base_index, boundary_size, 0);
101 if (offset == -1) {
102 need_flush = 1;
103 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
104 size, base_index, boundary_size, 0);
106 if (offset != -1) {
107 set_bit_string(iommu_gart_bitmap, offset, size);
108 next_bit = offset+size;
109 if (next_bit >= iommu_pages) {
110 next_bit = 0;
111 need_flush = 1;
114 if (iommu_fullflush)
115 need_flush = 1;
116 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
118 return offset;
121 static void free_iommu(unsigned long offset, int size)
123 unsigned long flags;
125 spin_lock_irqsave(&iommu_bitmap_lock, flags);
126 iommu_area_free(iommu_gart_bitmap, offset, size);
127 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
131 * Use global flush state to avoid races with multiple flushers.
133 static void flush_gart(void)
135 unsigned long flags;
137 spin_lock_irqsave(&iommu_bitmap_lock, flags);
138 if (need_flush) {
139 k8_flush_garts();
140 need_flush = 0;
142 spin_unlock_irqrestore(&iommu_bitmap_lock, flags);
145 #ifdef CONFIG_IOMMU_LEAK
147 #define SET_LEAK(x) \
148 do { \
149 if (iommu_leak_tab) \
150 iommu_leak_tab[x] = __builtin_return_address(0);\
151 } while (0)
153 #define CLEAR_LEAK(x) \
154 do { \
155 if (iommu_leak_tab) \
156 iommu_leak_tab[x] = NULL; \
157 } while (0)
159 /* Debugging aid for drivers that don't free their IOMMU tables */
160 static void **iommu_leak_tab;
161 static int leak_trace;
162 static int iommu_leak_pages = 20;
164 static void dump_leak(void)
166 int i;
167 static int dump;
169 if (dump || !iommu_leak_tab)
170 return;
171 dump = 1;
172 show_stack(NULL, NULL);
174 /* Very crude. dump some from the end of the table too */
175 printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n",
176 iommu_leak_pages);
177 for (i = 0; i < iommu_leak_pages; i += 2) {
178 printk(KERN_DEBUG "%lu: ", iommu_pages-i);
179 printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0);
180 printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' ');
182 printk(KERN_DEBUG "\n");
184 #else
185 # define SET_LEAK(x)
186 # define CLEAR_LEAK(x)
187 #endif
189 static void iommu_full(struct device *dev, size_t size, int dir)
192 * Ran out of IOMMU space for this operation. This is very bad.
193 * Unfortunately the drivers cannot handle this operation properly.
194 * Return some non mapped prereserved space in the aperture and
195 * let the Northbridge deal with it. This will result in garbage
196 * in the IO operation. When the size exceeds the prereserved space
197 * memory corruption will occur or random memory will be DMAed
198 * out. Hopefully no network devices use single mappings that big.
201 printk(KERN_ERR
202 "PCI-DMA: Out of IOMMU space for %lu bytes at device %s\n",
203 size, dev->bus_id);
205 if (size > PAGE_SIZE*EMERGENCY_PAGES) {
206 if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
207 panic("PCI-DMA: Memory would be corrupted\n");
208 if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
209 panic(KERN_ERR
210 "PCI-DMA: Random memory would be DMAed\n");
212 #ifdef CONFIG_IOMMU_LEAK
213 dump_leak();
214 #endif
217 static inline int
218 need_iommu(struct device *dev, unsigned long addr, size_t size)
220 u64 mask = *dev->dma_mask;
221 int high = addr + size > mask;
222 int mmu = high;
224 if (force_iommu)
225 mmu = 1;
227 return mmu;
230 static inline int
231 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
233 u64 mask = *dev->dma_mask;
234 int high = addr + size > mask;
235 int mmu = high;
237 return mmu;
240 /* Map a single continuous physical area into the IOMMU.
241 * Caller needs to check if the iommu is needed and flush.
243 static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
244 size_t size, int dir)
246 unsigned long npages = to_pages(phys_mem, size);
247 unsigned long iommu_page = alloc_iommu(dev, npages);
248 int i;
250 if (iommu_page == -1) {
251 if (!nonforced_iommu(dev, phys_mem, size))
252 return phys_mem;
253 if (panic_on_overflow)
254 panic("dma_map_area overflow %lu bytes\n", size);
255 iommu_full(dev, size, dir);
256 return bad_dma_address;
259 for (i = 0; i < npages; i++) {
260 iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem);
261 SET_LEAK(iommu_page + i);
262 phys_mem += PAGE_SIZE;
264 return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK);
267 static dma_addr_t
268 gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
270 dma_addr_t map = dma_map_area(dev, paddr, size, dir);
272 flush_gart();
274 return map;
277 /* Map a single area into the IOMMU */
278 static dma_addr_t
279 gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
281 unsigned long bus;
283 if (!dev)
284 dev = &fallback_dev;
286 if (!need_iommu(dev, paddr, size))
287 return paddr;
289 bus = gart_map_simple(dev, paddr, size, dir);
291 return bus;
295 * Free a DMA mapping.
297 static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
298 size_t size, int direction)
300 unsigned long iommu_page;
301 int npages;
302 int i;
304 if (dma_addr < iommu_bus_base + EMERGENCY_PAGES*PAGE_SIZE ||
305 dma_addr >= iommu_bus_base + iommu_size)
306 return;
308 iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT;
309 npages = to_pages(dma_addr, size);
310 for (i = 0; i < npages; i++) {
311 iommu_gatt_base[iommu_page + i] = gart_unmapped_entry;
312 CLEAR_LEAK(iommu_page + i);
314 free_iommu(iommu_page, npages);
318 * Wrapper for pci_unmap_single working with scatterlists.
320 static void
321 gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
323 struct scatterlist *s;
324 int i;
326 for_each_sg(sg, s, nents, i) {
327 if (!s->dma_length || !s->length)
328 break;
329 gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
333 /* Fallback for dma_map_sg in case of overflow */
334 static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
335 int nents, int dir)
337 struct scatterlist *s;
338 int i;
340 #ifdef CONFIG_IOMMU_DEBUG
341 printk(KERN_DEBUG "dma_map_sg overflow\n");
342 #endif
344 for_each_sg(sg, s, nents, i) {
345 unsigned long addr = sg_phys(s);
347 if (nonforced_iommu(dev, addr, s->length)) {
348 addr = dma_map_area(dev, addr, s->length, dir);
349 if (addr == bad_dma_address) {
350 if (i > 0)
351 gart_unmap_sg(dev, sg, i, dir);
352 nents = 0;
353 sg[0].dma_length = 0;
354 break;
357 s->dma_address = addr;
358 s->dma_length = s->length;
360 flush_gart();
362 return nents;
365 /* Map multiple scatterlist entries continuous into the first. */
366 static int __dma_map_cont(struct device *dev, struct scatterlist *start,
367 int nelems, struct scatterlist *sout,
368 unsigned long pages)
370 unsigned long iommu_start = alloc_iommu(dev, pages);
371 unsigned long iommu_page = iommu_start;
372 struct scatterlist *s;
373 int i;
375 if (iommu_start == -1)
376 return -1;
378 for_each_sg(start, s, nelems, i) {
379 unsigned long pages, addr;
380 unsigned long phys_addr = s->dma_address;
382 BUG_ON(s != start && s->offset);
383 if (s == start) {
384 sout->dma_address = iommu_bus_base;
385 sout->dma_address += iommu_page*PAGE_SIZE + s->offset;
386 sout->dma_length = s->length;
387 } else {
388 sout->dma_length += s->length;
391 addr = phys_addr;
392 pages = to_pages(s->offset, s->length);
393 while (pages--) {
394 iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr);
395 SET_LEAK(iommu_page);
396 addr += PAGE_SIZE;
397 iommu_page++;
400 BUG_ON(iommu_page - iommu_start != pages);
402 return 0;
405 static inline int
406 dma_map_cont(struct device *dev, struct scatterlist *start, int nelems,
407 struct scatterlist *sout, unsigned long pages, int need)
409 if (!need) {
410 BUG_ON(nelems != 1);
411 sout->dma_address = start->dma_address;
412 sout->dma_length = start->length;
413 return 0;
415 return __dma_map_cont(dev, start, nelems, sout, pages);
419 * DMA map all entries in a scatterlist.
420 * Merge chunks that have page aligned sizes into a continuous mapping.
422 static int
423 gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
425 struct scatterlist *s, *ps, *start_sg, *sgmap;
426 int need = 0, nextneed, i, out, start;
427 unsigned long pages = 0;
428 unsigned int seg_size;
429 unsigned int max_seg_size;
431 if (nents == 0)
432 return 0;
434 if (!dev)
435 dev = &fallback_dev;
437 out = 0;
438 start = 0;
439 start_sg = sgmap = sg;
440 seg_size = 0;
441 max_seg_size = dma_get_max_seg_size(dev);
442 ps = NULL; /* shut up gcc */
443 for_each_sg(sg, s, nents, i) {
444 dma_addr_t addr = sg_phys(s);
446 s->dma_address = addr;
447 BUG_ON(s->length == 0);
449 nextneed = need_iommu(dev, addr, s->length);
451 /* Handle the previous not yet processed entries */
452 if (i > start) {
454 * Can only merge when the last chunk ends on a
455 * page boundary and the new one doesn't have an
456 * offset.
458 if (!iommu_merge || !nextneed || !need || s->offset ||
459 (s->length + seg_size > max_seg_size) ||
460 (ps->offset + ps->length) % PAGE_SIZE) {
461 if (dma_map_cont(dev, start_sg, i - start,
462 sgmap, pages, need) < 0)
463 goto error;
464 out++;
465 seg_size = 0;
466 sgmap = sg_next(sgmap);
467 pages = 0;
468 start = i;
469 start_sg = s;
473 seg_size += s->length;
474 need = nextneed;
475 pages += to_pages(s->offset, s->length);
476 ps = s;
478 if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0)
479 goto error;
480 out++;
481 flush_gart();
482 if (out < nents) {
483 sgmap = sg_next(sgmap);
484 sgmap->dma_length = 0;
486 return out;
488 error:
489 flush_gart();
490 gart_unmap_sg(dev, sg, out, dir);
492 /* When it was forced or merged try again in a dumb way */
493 if (force_iommu || iommu_merge) {
494 out = dma_map_sg_nonforce(dev, sg, nents, dir);
495 if (out > 0)
496 return out;
498 if (panic_on_overflow)
499 panic("dma_map_sg: overflow on %lu pages\n", pages);
501 iommu_full(dev, pages << PAGE_SHIFT, dir);
502 for_each_sg(sg, s, nents, i)
503 s->dma_address = bad_dma_address;
504 return 0;
507 static int no_agp;
509 static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size)
511 unsigned long a;
513 if (!iommu_size) {
514 iommu_size = aper_size;
515 if (!no_agp)
516 iommu_size /= 2;
519 a = aper + iommu_size;
520 iommu_size -= round_up(a, PMD_PAGE_SIZE) - a;
522 if (iommu_size < 64*1024*1024) {
523 printk(KERN_WARNING
524 "PCI-DMA: Warning: Small IOMMU %luMB."
525 " Consider increasing the AGP aperture in BIOS\n",
526 iommu_size >> 20);
529 return iommu_size;
532 static __init unsigned read_aperture(struct pci_dev *dev, u32 *size)
534 unsigned aper_size = 0, aper_base_32, aper_order;
535 u64 aper_base;
537 pci_read_config_dword(dev, AMD64_GARTAPERTUREBASE, &aper_base_32);
538 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &aper_order);
539 aper_order = (aper_order >> 1) & 7;
541 aper_base = aper_base_32 & 0x7fff;
542 aper_base <<= 25;
544 aper_size = (32 * 1024 * 1024) << aper_order;
545 if (aper_base + aper_size > 0x100000000UL || !aper_size)
546 aper_base = 0;
548 *size = aper_size;
549 return aper_base;
552 static void enable_gart_translations(void)
554 int i;
556 for (i = 0; i < num_k8_northbridges; i++) {
557 struct pci_dev *dev = k8_northbridges[i];
559 enable_gart_translation(dev, __pa(agp_gatt_table));
564 * If fix_up_north_bridges is set, the north bridges have to be fixed up on
565 * resume in the same way as they are handled in gart_iommu_hole_init().
567 static bool fix_up_north_bridges;
568 static u32 aperture_order;
569 static u32 aperture_alloc;
571 void set_up_gart_resume(u32 aper_order, u32 aper_alloc)
573 fix_up_north_bridges = true;
574 aperture_order = aper_order;
575 aperture_alloc = aper_alloc;
578 static int gart_resume(struct sys_device *dev)
580 printk(KERN_INFO "PCI-DMA: Resuming GART IOMMU\n");
582 if (fix_up_north_bridges) {
583 int i;
585 printk(KERN_INFO "PCI-DMA: Restoring GART aperture settings\n");
587 for (i = 0; i < num_k8_northbridges; i++) {
588 struct pci_dev *dev = k8_northbridges[i];
591 * Don't enable translations just yet. That is the next
592 * step. Restore the pre-suspend aperture settings.
594 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL,
595 aperture_order << 1);
596 pci_write_config_dword(dev, AMD64_GARTAPERTUREBASE,
597 aperture_alloc >> 25);
601 enable_gart_translations();
603 return 0;
606 static int gart_suspend(struct sys_device *dev, pm_message_t state)
608 return 0;
611 static struct sysdev_class gart_sysdev_class = {
612 .name = "gart",
613 .suspend = gart_suspend,
614 .resume = gart_resume,
618 static struct sys_device device_gart = {
619 .id = 0,
620 .cls = &gart_sysdev_class,
624 * Private Northbridge GATT initialization in case we cannot use the
625 * AGP driver for some reason.
627 static __init int init_k8_gatt(struct agp_kern_info *info)
629 unsigned aper_size, gatt_size, new_aper_size;
630 unsigned aper_base, new_aper_base;
631 struct pci_dev *dev;
632 void *gatt;
633 int i, error;
635 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
636 aper_size = aper_base = info->aper_size = 0;
637 dev = NULL;
638 for (i = 0; i < num_k8_northbridges; i++) {
639 dev = k8_northbridges[i];
640 new_aper_base = read_aperture(dev, &new_aper_size);
641 if (!new_aper_base)
642 goto nommu;
644 if (!aper_base) {
645 aper_size = new_aper_size;
646 aper_base = new_aper_base;
648 if (aper_size != new_aper_size || aper_base != new_aper_base)
649 goto nommu;
651 if (!aper_base)
652 goto nommu;
653 info->aper_base = aper_base;
654 info->aper_size = aper_size >> 20;
656 gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32);
657 gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size));
658 if (!gatt)
659 panic("Cannot allocate GATT table");
660 if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT))
661 panic("Could not set GART PTEs to uncacheable pages");
663 memset(gatt, 0, gatt_size);
664 agp_gatt_table = gatt;
666 enable_gart_translations();
668 error = sysdev_class_register(&gart_sysdev_class);
669 if (!error)
670 error = sysdev_register(&device_gart);
671 if (error)
672 panic("Could not register gart_sysdev -- would corrupt data on next suspend");
674 flush_gart();
676 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
677 aper_base, aper_size>>10);
678 return 0;
680 nommu:
681 /* Should not happen anymore */
682 printk(KERN_WARNING "PCI-DMA: More than 4GB of RAM and no IOMMU\n"
683 KERN_WARNING "falling back to iommu=soft.\n");
684 return -1;
687 extern int agp_amd64_init(void);
689 static const struct dma_mapping_ops gart_dma_ops = {
690 .mapping_error = NULL,
691 .map_single = gart_map_single,
692 .map_simple = gart_map_simple,
693 .unmap_single = gart_unmap_single,
694 .sync_single_for_cpu = NULL,
695 .sync_single_for_device = NULL,
696 .sync_single_range_for_cpu = NULL,
697 .sync_single_range_for_device = NULL,
698 .sync_sg_for_cpu = NULL,
699 .sync_sg_for_device = NULL,
700 .map_sg = gart_map_sg,
701 .unmap_sg = gart_unmap_sg,
704 void gart_iommu_shutdown(void)
706 struct pci_dev *dev;
707 int i;
709 if (no_agp && (dma_ops != &gart_dma_ops))
710 return;
712 for (i = 0; i < num_k8_northbridges; i++) {
713 u32 ctl;
715 dev = k8_northbridges[i];
716 pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &ctl);
718 ctl &= ~GARTEN;
720 pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, ctl);
724 void __init gart_iommu_init(void)
726 struct agp_kern_info info;
727 unsigned long iommu_start;
728 unsigned long aper_size;
729 unsigned long scratch;
730 long i;
732 if (cache_k8_northbridges() < 0 || num_k8_northbridges == 0) {
733 printk(KERN_INFO "PCI-GART: No AMD northbridge found.\n");
734 return;
737 #ifndef CONFIG_AGP_AMD64
738 no_agp = 1;
739 #else
740 /* Makefile puts PCI initialization via subsys_initcall first. */
741 /* Add other K8 AGP bridge drivers here */
742 no_agp = no_agp ||
743 (agp_amd64_init() < 0) ||
744 (agp_copy_info(agp_bridge, &info) < 0);
745 #endif
747 if (swiotlb)
748 return;
750 /* Did we detect a different HW IOMMU? */
751 if (iommu_detected && !gart_iommu_aperture)
752 return;
754 if (no_iommu ||
755 (!force_iommu && end_pfn <= MAX_DMA32_PFN) ||
756 !gart_iommu_aperture ||
757 (no_agp && init_k8_gatt(&info) < 0)) {
758 if (end_pfn > MAX_DMA32_PFN) {
759 printk(KERN_WARNING "More than 4GB of memory "
760 "but GART IOMMU not available.\n"
761 KERN_WARNING "falling back to iommu=soft.\n");
763 return;
766 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
767 aper_size = info.aper_size * 1024 * 1024;
768 iommu_size = check_iommu_size(info.aper_base, aper_size);
769 iommu_pages = iommu_size >> PAGE_SHIFT;
771 iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL,
772 get_order(iommu_pages/8));
773 if (!iommu_gart_bitmap)
774 panic("Cannot allocate iommu bitmap\n");
775 memset(iommu_gart_bitmap, 0, iommu_pages/8);
777 #ifdef CONFIG_IOMMU_LEAK
778 if (leak_trace) {
779 iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL,
780 get_order(iommu_pages*sizeof(void *)));
781 if (iommu_leak_tab)
782 memset(iommu_leak_tab, 0, iommu_pages * 8);
783 else
784 printk(KERN_DEBUG
785 "PCI-DMA: Cannot allocate leak trace area\n");
787 #endif
790 * Out of IOMMU space handling.
791 * Reserve some invalid pages at the beginning of the GART.
793 set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES);
795 agp_memory_reserved = iommu_size;
796 printk(KERN_INFO
797 "PCI-DMA: Reserving %luMB of IOMMU area in the AGP aperture\n",
798 iommu_size >> 20);
800 iommu_start = aper_size - iommu_size;
801 iommu_bus_base = info.aper_base + iommu_start;
802 bad_dma_address = iommu_bus_base;
803 iommu_gatt_base = agp_gatt_table + (iommu_start>>PAGE_SHIFT);
806 * Unmap the IOMMU part of the GART. The alias of the page is
807 * always mapped with cache enabled and there is no full cache
808 * coherency across the GART remapping. The unmapping avoids
809 * automatic prefetches from the CPU allocating cache lines in
810 * there. All CPU accesses are done via the direct mapping to
811 * the backing memory. The GART address is only used by PCI
812 * devices.
814 set_memory_np((unsigned long)__va(iommu_bus_base),
815 iommu_size >> PAGE_SHIFT);
817 * Tricky. The GART table remaps the physical memory range,
818 * so the CPU wont notice potential aliases and if the memory
819 * is remapped to UC later on, we might surprise the PCI devices
820 * with a stray writeout of a cacheline. So play it sure and
821 * do an explicit, full-scale wbinvd() _after_ having marked all
822 * the pages as Not-Present:
824 wbinvd();
827 * Try to workaround a bug (thanks to BenH)
828 * Set unmapped entries to a scratch page instead of 0.
829 * Any prefetches that hit unmapped entries won't get an bus abort
830 * then.
832 scratch = get_zeroed_page(GFP_KERNEL);
833 if (!scratch)
834 panic("Cannot allocate iommu scratch page");
835 gart_unmapped_entry = GPTE_ENCODE(__pa(scratch));
836 for (i = EMERGENCY_PAGES; i < iommu_pages; i++)
837 iommu_gatt_base[i] = gart_unmapped_entry;
839 flush_gart();
840 dma_ops = &gart_dma_ops;
843 void __init gart_parse_options(char *p)
845 int arg;
847 #ifdef CONFIG_IOMMU_LEAK
848 if (!strncmp(p, "leak", 4)) {
849 leak_trace = 1;
850 p += 4;
851 if (*p == '=') ++p;
852 if (isdigit(*p) && get_option(&p, &arg))
853 iommu_leak_pages = arg;
855 #endif
856 if (isdigit(*p) && get_option(&p, &arg))
857 iommu_size = arg;
858 if (!strncmp(p, "fullflush", 8))
859 iommu_fullflush = 1;
860 if (!strncmp(p, "nofullflush", 11))
861 iommu_fullflush = 0;
862 if (!strncmp(p, "noagp", 5))
863 no_agp = 1;
864 if (!strncmp(p, "noaperture", 10))
865 fix_aperture = 0;
866 /* duplicated from pci-dma.c */
867 if (!strncmp(p, "force", 5))
868 gart_iommu_aperture_allowed = 1;
869 if (!strncmp(p, "allowed", 7))
870 gart_iommu_aperture_allowed = 1;
871 if (!strncmp(p, "memaper", 7)) {
872 fallback_aper_force = 1;
873 p += 7;
874 if (*p == '=') {
875 ++p;
876 if (get_option(&p, &arg))
877 fallback_aper_order = arg;