[PATCH] x86_64: Tell user to enable GART_IOMMU when needed
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / arch / x86_64 / kernel / pci-nommu.c
blobe415649751950d142c6c72c4ba9e2a4bc73a06d7
1 /* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
3 #include <linux/mm.h>
4 #include <linux/init.h>
5 #include <linux/pci.h>
6 #include <linux/string.h>
7 #include <asm/proto.h>
8 #include <asm/processor.h>
9 #include <asm/dma.h>
11 static int
12 check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size)
14 if (hwdev && bus + size > *hwdev->dma_mask) {
15 printk(KERN_ERR
16 "nommu_%s: overflow %Lx+%lu of device mask %Lx\n",
17 name, (long long)bus, size, (long long)*hwdev->dma_mask);
18 return 0;
20 return 1;
23 static dma_addr_t
24 nommu_map_single(struct device *hwdev, void *ptr, size_t size,
25 int direction)
27 dma_addr_t bus = virt_to_bus(ptr);
28 if (!check_addr("map_single", hwdev, bus, size))
29 return bad_dma_address;
30 return bus;
33 void nommu_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
34 int direction)
38 /* Map a set of buffers described by scatterlist in streaming
39 * mode for DMA. This is the scatter-gather version of the
40 * above pci_map_single interface. Here the scatter gather list
41 * elements are each tagged with the appropriate dma address
42 * and length. They are obtained via sg_dma_{address,length}(SG).
44 * NOTE: An implementation may be able to use a smaller number of
45 * DMA address/length pairs than there are SG table elements.
46 * (for example via virtual mapping capabilities)
47 * The routine returns the number of addr/length pairs actually
48 * used, at most nents.
50 * Device ownership issues as mentioned above for pci_map_single are
51 * the same here.
53 int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
54 int nents, int direction)
56 int i;
58 BUG_ON(direction == DMA_NONE);
59 for (i = 0; i < nents; i++ ) {
60 struct scatterlist *s = &sg[i];
61 BUG_ON(!s->page);
62 s->dma_address = virt_to_bus(page_address(s->page) +s->offset);
63 if (!check_addr("map_sg", hwdev, s->dma_address, s->length))
64 return 0;
65 s->dma_length = s->length;
67 return nents;
70 /* Unmap a set of streaming mode DMA translations.
71 * Again, cpu read rules concerning calls here are the same as for
72 * pci_unmap_single() above.
74 void nommu_unmap_sg(struct device *dev, struct scatterlist *sg,
75 int nents, int dir)
79 struct dma_mapping_ops nommu_dma_ops = {
80 .map_single = nommu_map_single,
81 .unmap_single = nommu_unmap_single,
82 .map_sg = nommu_map_sg,
83 .unmap_sg = nommu_unmap_sg,
84 .is_phys = 1,
87 void __init no_iommu_init(void)
89 if (dma_ops)
90 return;
91 printk(KERN_INFO "PCI-DMA: Disabling IOMMU.\n");
92 dma_ops = &nommu_dma_ops;
93 if (end_pfn > MAX_DMA32_PFN) {
94 printk(KERN_ERR
95 "WARNING more than 4GB of memory but IOMMU not compiled in.\n"
96 KERN_ERR "WARNING 32bit PCI may malfunction.\n"
97 KERN_ERR "You might want to enable CONFIG_GART_IOMMU\n");