2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
11 #include <asm/abs_addr.h>
14 * Generic direct DMA implementation
16 * This implementation supports a per-device offset that can be applied if
17 * the address at which memory is visible to devices is not 0. Platform code
18 * can set archdata.dma_data to an unsigned long holding the offset. By
19 * default the offset is PCI_DRAM_OFFSET.
22 static unsigned long get_dma_direct_offset(struct device
*dev
)
25 return (unsigned long)dev
->archdata
.dma_data
;
27 return PCI_DRAM_OFFSET
;
30 void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
31 dma_addr_t
*dma_handle
, gfp_t flag
)
34 #ifdef CONFIG_NOT_COHERENT_CACHE
35 ret
= __dma_alloc_coherent(size
, dma_handle
, flag
);
38 *dma_handle
+= get_dma_direct_offset(dev
);
42 int node
= dev_to_node(dev
);
44 /* ignore region specifiers */
45 flag
&= ~(__GFP_HIGHMEM
);
47 page
= alloc_pages_node(node
, flag
, get_order(size
));
50 ret
= page_address(page
);
52 *dma_handle
= virt_to_abs(ret
) + get_dma_direct_offset(dev
);
58 void dma_direct_free_coherent(struct device
*dev
, size_t size
,
59 void *vaddr
, dma_addr_t dma_handle
)
61 #ifdef CONFIG_NOT_COHERENT_CACHE
62 __dma_free_coherent(size
, vaddr
);
64 free_pages((unsigned long)vaddr
, get_order(size
));
68 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
69 int nents
, enum dma_data_direction direction
,
70 struct dma_attrs
*attrs
)
72 struct scatterlist
*sg
;
75 for_each_sg(sgl
, sg
, nents
, i
) {
76 sg
->dma_address
= sg_phys(sg
) + get_dma_direct_offset(dev
);
77 sg
->dma_length
= sg
->length
;
83 static void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
84 int nents
, enum dma_data_direction direction
,
85 struct dma_attrs
*attrs
)
89 static int dma_direct_dma_supported(struct device
*dev
, u64 mask
)
92 /* Could be improved to check for memory though it better be
93 * done via some global so platforms can set the limit in case
94 * they have limited DMA windows
96 return mask
>= DMA_32BIT_MASK
;
102 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
104 unsigned long offset
,
106 enum dma_data_direction dir
,
107 struct dma_attrs
*attrs
)
109 BUG_ON(dir
== DMA_NONE
);
110 __dma_sync_page(page
, offset
, size
, dir
);
111 return page_to_phys(page
) + offset
+ get_dma_direct_offset(dev
);
114 static inline void dma_direct_unmap_page(struct device
*dev
,
115 dma_addr_t dma_address
,
117 enum dma_data_direction direction
,
118 struct dma_attrs
*attrs
)
122 struct dma_mapping_ops dma_direct_ops
= {
123 .alloc_coherent
= dma_direct_alloc_coherent
,
124 .free_coherent
= dma_direct_free_coherent
,
125 .map_sg
= dma_direct_map_sg
,
126 .unmap_sg
= dma_direct_unmap_sg
,
127 .dma_supported
= dma_direct_dma_supported
,
128 .map_page
= dma_direct_map_page
,
129 .unmap_page
= dma_direct_unmap_page
,
131 EXPORT_SYMBOL(dma_direct_ops
);