4 * Copyright (c) 2009, 2020 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
13 #include "exec/memory.h"
14 #include "exec/address-spaces.h"
15 #include "block/block.h"
16 #include "block/accounting.h"
18 typedef struct ScatterGatherEntry ScatterGatherEntry
;
21 DMA_DIRECTION_TO_DEVICE
= 0,
22 DMA_DIRECTION_FROM_DEVICE
= 1,
26 ScatterGatherEntry
*sg
;
34 #ifndef CONFIG_USER_ONLY
37 * When an IOMMU is present, bus addresses become distinct from
38 * CPU/memory physical addresses and may be a different size. Because
39 * the IOVA size depends more on the bus than on the platform, we more
40 * or less have to treat these as 64-bit always to cover all (or at
43 typedef uint64_t dma_addr_t
;
45 #define DMA_ADDR_BITS 64
46 #define DMA_ADDR_FMT "%" PRIx64
48 static inline void dma_barrier(AddressSpace
*as
, DMADirection dir
)
51 * This is called before DMA read and write operations
52 * unless the _relaxed form is used and is responsible
53 * for providing some sane ordering of accesses vs
54 * concurrently running VCPUs.
56 * Users of map(), unmap() or lower level st/ld_*
57 * operations are responsible for providing their own
58 * ordering via barriers.
60 * This primitive implementation does a simple smp_mb()
61 * before each operation which provides pretty much full
64 * A smarter implementation can be devised if needed to
65 * use lighter barriers based on the direction of the
66 * transfer, the DMA context, etc...
71 /* Checks that the given range of addresses is valid for DMA. This is
72 * useful for certain cases, but usually you should just use
73 * dma_memory_{read,write}() and check for errors */
74 static inline bool dma_memory_valid(AddressSpace
*as
,
75 dma_addr_t addr
, dma_addr_t len
,
78 return address_space_access_valid(as
, addr
, len
,
79 dir
== DMA_DIRECTION_FROM_DEVICE
,
80 MEMTXATTRS_UNSPECIFIED
);
83 static inline MemTxResult
dma_memory_rw_relaxed(AddressSpace
*as
,
85 void *buf
, dma_addr_t len
,
88 return address_space_rw(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
89 buf
, len
, dir
== DMA_DIRECTION_FROM_DEVICE
);
92 static inline MemTxResult
dma_memory_read_relaxed(AddressSpace
*as
,
94 void *buf
, dma_addr_t len
)
96 return dma_memory_rw_relaxed(as
, addr
, buf
, len
, DMA_DIRECTION_TO_DEVICE
);
99 static inline MemTxResult
dma_memory_write_relaxed(AddressSpace
*as
,
104 return dma_memory_rw_relaxed(as
, addr
, (void *)buf
, len
,
105 DMA_DIRECTION_FROM_DEVICE
);
109 * dma_memory_rw: Read from or write to an address space from DMA controller.
111 * Return a MemTxResult indicating whether the operation succeeded
112 * or failed (eg unassigned memory, device rejected the transaction,
115 * @as: #AddressSpace to be accessed
116 * @addr: address within that address space
117 * @buf: buffer with the data transferred
118 * @len: the number of bytes to read or write
119 * @dir: indicates the transfer direction
121 static inline MemTxResult
dma_memory_rw(AddressSpace
*as
, dma_addr_t addr
,
122 void *buf
, dma_addr_t len
,
125 dma_barrier(as
, dir
);
127 return dma_memory_rw_relaxed(as
, addr
, buf
, len
, dir
);
131 * dma_memory_read: Read from an address space from DMA controller.
133 * Return a MemTxResult indicating whether the operation succeeded
134 * or failed (eg unassigned memory, device rejected the transaction,
135 * IOMMU fault). Called within RCU critical section.
137 * @as: #AddressSpace to be accessed
138 * @addr: address within that address space
139 * @buf: buffer with the data transferred
140 * @len: length of the data transferred
142 static inline MemTxResult
dma_memory_read(AddressSpace
*as
, dma_addr_t addr
,
143 void *buf
, dma_addr_t len
)
145 return dma_memory_rw(as
, addr
, buf
, len
, DMA_DIRECTION_TO_DEVICE
);
149 * address_space_write: Write to address space from DMA controller.
151 * Return a MemTxResult indicating whether the operation succeeded
152 * or failed (eg unassigned memory, device rejected the transaction,
155 * @as: #AddressSpace to be accessed
156 * @addr: address within that address space
157 * @buf: buffer with the data transferred
158 * @len: the number of bytes to write
160 static inline MemTxResult
dma_memory_write(AddressSpace
*as
, dma_addr_t addr
,
161 const void *buf
, dma_addr_t len
)
163 return dma_memory_rw(as
, addr
, (void *)buf
, len
,
164 DMA_DIRECTION_FROM_DEVICE
);
168 * dma_memory_set: Fill memory with a constant byte from DMA controller.
170 * Return a MemTxResult indicating whether the operation succeeded
171 * or failed (eg unassigned memory, device rejected the transaction,
174 * @as: #AddressSpace to be accessed
175 * @addr: address within that address space
176 * @c: constant byte to fill the memory
177 * @len: the number of bytes to fill with the constant byte
179 MemTxResult
dma_memory_set(AddressSpace
*as
, dma_addr_t addr
,
180 uint8_t c
, dma_addr_t len
);
183 * address_space_map: Map a physical memory region into a host virtual address.
185 * May map a subset of the requested range, given by and returned in @plen.
186 * May return %NULL and set *@plen to zero(0), if resources needed to perform
187 * the mapping are exhausted.
188 * Use only for reads OR writes - not for read-modify-write operations.
190 * @as: #AddressSpace to be accessed
191 * @addr: address within that address space
192 * @len: pointer to length of buffer; updated on return
193 * @dir: indicates the transfer direction
195 static inline void *dma_memory_map(AddressSpace
*as
,
196 dma_addr_t addr
, dma_addr_t
*len
,
202 p
= address_space_map(as
, addr
, &xlen
, dir
== DMA_DIRECTION_FROM_DEVICE
,
203 MEMTXATTRS_UNSPECIFIED
);
209 * address_space_unmap: Unmaps a memory region previously mapped
210 * by dma_memory_map()
212 * Will also mark the memory as dirty if @dir == %DMA_DIRECTION_FROM_DEVICE.
213 * @access_len gives the amount of memory that was actually read or written
216 * @as: #AddressSpace used
217 * @buffer: host pointer as returned by address_space_map()
218 * @len: buffer length as returned by address_space_map()
219 * @dir: indicates the transfer direction
220 * @access_len: amount of data actually transferred
222 static inline void dma_memory_unmap(AddressSpace
*as
,
223 void *buffer
, dma_addr_t len
,
224 DMADirection dir
, dma_addr_t access_len
)
226 address_space_unmap(as
, buffer
, (hwaddr
)len
,
227 dir
== DMA_DIRECTION_FROM_DEVICE
, access_len
);
230 #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
231 static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \
234 uint##_bits##_t val; \
235 dma_memory_read(as, addr, &val, (_bits) / 8); \
236 return _end##_bits##_to_cpu(val); \
238 static inline void st##_sname##_##_end##_dma(AddressSpace *as, \
240 uint##_bits##_t val) \
242 val = cpu_to_##_end##_bits(val); \
243 dma_memory_write(as, addr, &val, (_bits) / 8); \
246 static inline uint8_t ldub_dma(AddressSpace
*as
, dma_addr_t addr
)
250 dma_memory_read(as
, addr
, &val
, 1);
254 static inline void stb_dma(AddressSpace
*as
, dma_addr_t addr
, uint8_t val
)
256 dma_memory_write(as
, addr
, &val
, 1);
259 DEFINE_LDST_DMA(uw
, w
, 16, le
);
260 DEFINE_LDST_DMA(l
, l
, 32, le
);
261 DEFINE_LDST_DMA(q
, q
, 64, le
);
262 DEFINE_LDST_DMA(uw
, w
, 16, be
);
263 DEFINE_LDST_DMA(l
, l
, 32, be
);
264 DEFINE_LDST_DMA(q
, q
, 64, be
);
266 #undef DEFINE_LDST_DMA
268 struct ScatterGatherEntry
{
273 void qemu_sglist_init(QEMUSGList
*qsg
, DeviceState
*dev
, int alloc_hint
,
275 void qemu_sglist_add(QEMUSGList
*qsg
, dma_addr_t base
, dma_addr_t len
);
276 void qemu_sglist_destroy(QEMUSGList
*qsg
);
279 typedef BlockAIOCB
*DMAIOFunc(int64_t offset
, QEMUIOVector
*iov
,
280 BlockCompletionFunc
*cb
, void *cb_opaque
,
283 BlockAIOCB
*dma_blk_io(AioContext
*ctx
,
284 QEMUSGList
*sg
, uint64_t offset
, uint32_t align
,
285 DMAIOFunc
*io_func
, void *io_func_opaque
,
286 BlockCompletionFunc
*cb
, void *opaque
, DMADirection dir
);
287 BlockAIOCB
*dma_blk_read(BlockBackend
*blk
,
288 QEMUSGList
*sg
, uint64_t offset
, uint32_t align
,
289 BlockCompletionFunc
*cb
, void *opaque
);
290 BlockAIOCB
*dma_blk_write(BlockBackend
*blk
,
291 QEMUSGList
*sg
, uint64_t offset
, uint32_t align
,
292 BlockCompletionFunc
*cb
, void *opaque
);
293 uint64_t dma_buf_read(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
);
294 uint64_t dma_buf_write(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
);
296 void dma_acct_start(BlockBackend
*blk
, BlockAcctCookie
*cookie
,
297 QEMUSGList
*sg
, enum BlockAcctType type
);
300 * dma_aligned_pow2_mask: Return the address bit mask of the largest
301 * power of 2 size less or equal than @end - @start + 1, aligned with @start,
302 * and bounded by 1 << @max_addr_bits bits.
304 * @start: range start address
305 * @end: range end address (greater than @start)
306 * @max_addr_bits: max address bits (<= 64)
308 uint64_t dma_aligned_pow2_mask(uint64_t start
, uint64_t end
,