2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * busses using the iommu infrastructure
11 * Generic iommu implementation
14 /* Allocates a contiguous real buffer and creates mappings over it.
15 * Returns the virtual address of the buffer and sets dma_handle
16 * to the dma address (mapping) of the first page.
18 static void *dma_iommu_alloc_coherent(struct device
*dev
, size_t size
,
19 dma_addr_t
*dma_handle
, gfp_t flag
)
21 return iommu_alloc_coherent(dev
, get_iommu_table_base(dev
), size
,
22 dma_handle
, dev
->coherent_dma_mask
, flag
,
26 static void dma_iommu_free_coherent(struct device
*dev
, size_t size
,
27 void *vaddr
, dma_addr_t dma_handle
)
29 iommu_free_coherent(get_iommu_table_base(dev
), size
, vaddr
, dma_handle
);
32 /* Creates TCEs for a user provided buffer. The user buffer must be
33 * contiguous real kernel storage (not vmalloc). The address passed here
34 * comprises a page address and offset into that page. The dma_addr_t
35 * returned will point to the same byte within the page as was passed in.
37 static dma_addr_t
dma_iommu_map_page(struct device
*dev
, struct page
*page
,
38 unsigned long offset
, size_t size
,
39 enum dma_data_direction direction
,
40 struct dma_attrs
*attrs
)
42 return iommu_map_page(dev
, get_iommu_table_base(dev
), page
, offset
,
43 size
, device_to_mask(dev
), direction
, attrs
);
47 static void dma_iommu_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
48 size_t size
, enum dma_data_direction direction
,
49 struct dma_attrs
*attrs
)
51 iommu_unmap_page(get_iommu_table_base(dev
), dma_handle
, size
, direction
,
56 static int dma_iommu_map_sg(struct device
*dev
, struct scatterlist
*sglist
,
57 int nelems
, enum dma_data_direction direction
,
58 struct dma_attrs
*attrs
)
60 return iommu_map_sg(dev
, get_iommu_table_base(dev
), sglist
, nelems
,
61 device_to_mask(dev
), direction
, attrs
);
64 static void dma_iommu_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
,
65 int nelems
, enum dma_data_direction direction
,
66 struct dma_attrs
*attrs
)
68 iommu_unmap_sg(get_iommu_table_base(dev
), sglist
, nelems
, direction
,
72 /* We support DMA to/from any memory page via the iommu */
73 static int dma_iommu_dma_supported(struct device
*dev
, u64 mask
)
75 struct iommu_table
*tbl
= get_iommu_table_base(dev
);
78 dev_info(dev
, "Warning: IOMMU dma not supported: mask 0x%08llx"
79 ", table unavailable\n", mask
);
83 if ((tbl
->it_offset
+ tbl
->it_size
) > (mask
>> IOMMU_PAGE_SHIFT
)) {
84 dev_info(dev
, "Warning: IOMMU window too big for device mask\n");
85 dev_info(dev
, "mask: 0x%08llx, table end: 0x%08lx\n",
86 mask
, (tbl
->it_offset
+ tbl
->it_size
) <<
93 struct dma_map_ops dma_iommu_ops
= {
94 .alloc_coherent
= dma_iommu_alloc_coherent
,
95 .free_coherent
= dma_iommu_free_coherent
,
96 .map_sg
= dma_iommu_map_sg
,
97 .unmap_sg
= dma_iommu_unmap_sg
,
98 .dma_supported
= dma_iommu_dma_supported
,
99 .map_page
= dma_iommu_map_page
,
100 .unmap_page
= dma_iommu_unmap_page
,
102 EXPORT_SYMBOL(dma_iommu_ops
);