1 /* Fallback functions when the main IOMMU code is not compiled in. This
2 code is roughly equivalent to i386. */
3 #include <linux/dma-mapping.h>
4 #include <linux/scatterlist.h>
5 #include <linux/string.h>
6 #include <linux/init.h>
11 #include <asm/processor.h>
12 #include <asm/iommu.h>
16 check_addr(char *name
, struct device
*hwdev
, dma_addr_t bus
, size_t size
)
18 if (hwdev
&& !dma_capable(hwdev
, bus
, size
)) {
19 if (*hwdev
->dma_mask
>= DMA_BIT_MASK(32))
21 "nommu_%s: overflow %Lx+%zu of device mask %Lx\n",
22 name
, (long long)bus
, size
,
23 (long long)*hwdev
->dma_mask
);
29 static dma_addr_t
nommu_map_page(struct device
*dev
, struct page
*page
,
30 unsigned long offset
, size_t size
,
31 enum dma_data_direction dir
,
32 struct dma_attrs
*attrs
)
34 dma_addr_t bus
= page_to_phys(page
) + offset
;
36 if (!check_addr("map_single", dev
, bus
, size
))
37 return DMA_ERROR_CODE
;
38 flush_write_buffers();
42 /* Map a set of buffers described by scatterlist in streaming
43 * mode for DMA. This is the scatter-gather version of the
44 * above pci_map_single interface. Here the scatter gather list
45 * elements are each tagged with the appropriate dma address
46 * and length. They are obtained via sg_dma_{address,length}(SG).
48 * NOTE: An implementation may be able to use a smaller number of
49 * DMA address/length pairs than there are SG table elements.
50 * (for example via virtual mapping capabilities)
51 * The routine returns the number of addr/length pairs actually
52 * used, at most nents.
54 * Device ownership issues as mentioned above for pci_map_single are
57 static int nommu_map_sg(struct device
*hwdev
, struct scatterlist
*sg
,
58 int nents
, enum dma_data_direction dir
,
59 struct dma_attrs
*attrs
)
61 struct scatterlist
*s
;
64 WARN_ON(nents
== 0 || sg
[0].length
== 0);
66 for_each_sg(sg
, s
, nents
, i
) {
68 s
->dma_address
= sg_phys(s
);
69 if (!check_addr("map_sg", hwdev
, s
->dma_address
, s
->length
))
71 s
->dma_length
= s
->length
;
73 flush_write_buffers();
77 static void nommu_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
80 free_pages((unsigned long)vaddr
, get_order(size
));
83 static void nommu_sync_single_for_device(struct device
*dev
,
84 dma_addr_t addr
, size_t size
,
85 enum dma_data_direction dir
)
87 flush_write_buffers();
91 static void nommu_sync_sg_for_device(struct device
*dev
,
92 struct scatterlist
*sg
, int nelems
,
93 enum dma_data_direction dir
)
95 flush_write_buffers();
98 struct dma_map_ops nommu_dma_ops
= {
99 .alloc_coherent
= dma_generic_alloc_coherent
,
100 .free_coherent
= nommu_free_coherent
,
101 .map_sg
= nommu_map_sg
,
102 .map_page
= nommu_map_page
,
103 .sync_single_for_device
= nommu_sync_single_for_device
,
104 .sync_sg_for_device
= nommu_sync_sg_for_device
,