4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
13 #include "qemu-thread.h"
15 /* #define DEBUG_IOMMU */
17 static void do_dma_memory_set(dma_addr_t addr
, uint8_t c
, dma_addr_t len
)
19 #define FILLBUF_SIZE 512
20 uint8_t fillbuf
[FILLBUF_SIZE
];
23 memset(fillbuf
, c
, FILLBUF_SIZE
);
25 l
= len
< FILLBUF_SIZE
? len
: FILLBUF_SIZE
;
26 cpu_physical_memory_rw(addr
, fillbuf
, l
, true);
32 int dma_memory_set(DMAContext
*dma
, dma_addr_t addr
, uint8_t c
, dma_addr_t len
)
34 dma_barrier(dma
, DMA_DIRECTION_FROM_DEVICE
);
36 if (dma_has_iommu(dma
)) {
37 return iommu_dma_memory_set(dma
, addr
, c
, len
);
39 do_dma_memory_set(addr
, c
, len
);
44 void qemu_sglist_init(QEMUSGList
*qsg
, int alloc_hint
, DMAContext
*dma
)
46 qsg
->sg
= g_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
48 qsg
->nalloc
= alloc_hint
;
53 void qemu_sglist_add(QEMUSGList
*qsg
, dma_addr_t base
, dma_addr_t len
)
55 if (qsg
->nsg
== qsg
->nalloc
) {
56 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
57 qsg
->sg
= g_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
59 qsg
->sg
[qsg
->nsg
].base
= base
;
60 qsg
->sg
[qsg
->nsg
].len
= len
;
65 void qemu_sglist_destroy(QEMUSGList
*qsg
)
71 BlockDriverAIOCB common
;
73 BlockDriverAIOCB
*acb
;
79 dma_addr_t sg_cur_byte
;
85 static void dma_bdrv_cb(void *opaque
, int ret
);
87 static void reschedule_dma(void *opaque
)
89 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
91 qemu_bh_delete(dbs
->bh
);
96 static void continue_after_map_failure(void *opaque
)
98 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
100 dbs
->bh
= qemu_bh_new(reschedule_dma
, dbs
);
101 qemu_bh_schedule(dbs
->bh
);
104 static void dma_bdrv_unmap(DMAAIOCB
*dbs
)
108 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
109 dma_memory_unmap(dbs
->sg
->dma
, dbs
->iov
.iov
[i
].iov_base
,
110 dbs
->iov
.iov
[i
].iov_len
, dbs
->dir
,
111 dbs
->iov
.iov
[i
].iov_len
);
113 qemu_iovec_reset(&dbs
->iov
);
116 static void dma_complete(DMAAIOCB
*dbs
, int ret
)
118 trace_dma_complete(dbs
, ret
, dbs
->common
.cb
);
121 if (dbs
->common
.cb
) {
122 dbs
->common
.cb(dbs
->common
.opaque
, ret
);
124 qemu_iovec_destroy(&dbs
->iov
);
126 qemu_bh_delete(dbs
->bh
);
129 if (!dbs
->in_cancel
) {
130 /* Requests may complete while dma_aio_cancel is in progress. In
131 * this case, the AIOCB should not be released because it is still
132 * referenced by dma_aio_cancel. */
133 qemu_aio_release(dbs
);
137 static void dma_bdrv_cb(void *opaque
, int ret
)
139 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
140 dma_addr_t cur_addr
, cur_len
;
143 trace_dma_bdrv_cb(dbs
, ret
);
146 dbs
->sector_num
+= dbs
->iov
.size
/ 512;
149 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
150 dma_complete(dbs
, ret
);
154 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
155 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
156 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
157 mem
= dma_memory_map(dbs
->sg
->dma
, cur_addr
, &cur_len
, dbs
->dir
);
160 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
161 dbs
->sg_cur_byte
+= cur_len
;
162 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
163 dbs
->sg_cur_byte
= 0;
168 if (dbs
->iov
.size
== 0) {
169 trace_dma_map_wait(dbs
);
170 cpu_register_map_client(dbs
, continue_after_map_failure
);
174 dbs
->acb
= dbs
->io_func(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
175 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
179 static void dma_aio_cancel(BlockDriverAIOCB
*acb
)
181 DMAAIOCB
*dbs
= container_of(acb
, DMAAIOCB
, common
);
183 trace_dma_aio_cancel(dbs
);
186 BlockDriverAIOCB
*acb
= dbs
->acb
;
188 dbs
->in_cancel
= true;
189 bdrv_aio_cancel(acb
);
190 dbs
->in_cancel
= false;
192 dbs
->common
.cb
= NULL
;
193 dma_complete(dbs
, 0);
196 static AIOPool dma_aio_pool
= {
197 .aiocb_size
= sizeof(DMAAIOCB
),
198 .cancel
= dma_aio_cancel
,
201 BlockDriverAIOCB
*dma_bdrv_io(
202 BlockDriverState
*bs
, QEMUSGList
*sg
, uint64_t sector_num
,
203 DMAIOFunc
*io_func
, BlockDriverCompletionFunc
*cb
,
204 void *opaque
, DMADirection dir
)
206 DMAAIOCB
*dbs
= qemu_aio_get(&dma_aio_pool
, bs
, cb
, opaque
);
208 trace_dma_bdrv_io(dbs
, bs
, sector_num
, (dir
== DMA_DIRECTION_TO_DEVICE
));
213 dbs
->sector_num
= sector_num
;
214 dbs
->sg_cur_index
= 0;
215 dbs
->sg_cur_byte
= 0;
217 dbs
->io_func
= io_func
;
219 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
225 BlockDriverAIOCB
*dma_bdrv_read(BlockDriverState
*bs
,
226 QEMUSGList
*sg
, uint64_t sector
,
227 void (*cb
)(void *opaque
, int ret
), void *opaque
)
229 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_readv
, cb
, opaque
,
230 DMA_DIRECTION_FROM_DEVICE
);
233 BlockDriverAIOCB
*dma_bdrv_write(BlockDriverState
*bs
,
234 QEMUSGList
*sg
, uint64_t sector
,
235 void (*cb
)(void *opaque
, int ret
), void *opaque
)
237 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_writev
, cb
, opaque
,
238 DMA_DIRECTION_TO_DEVICE
);
242 static uint64_t dma_buf_rw(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
,
250 len
= MIN(len
, resid
);
252 ScatterGatherEntry entry
= sg
->sg
[sg_cur_index
++];
253 int32_t xfer
= MIN(len
, entry
.len
);
254 dma_memory_rw(sg
->dma
, entry
.base
, ptr
, xfer
, dir
);
263 uint64_t dma_buf_read(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
265 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_FROM_DEVICE
);
268 uint64_t dma_buf_write(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
270 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_TO_DEVICE
);
273 void dma_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
,
274 QEMUSGList
*sg
, enum BlockAcctType type
)
276 bdrv_acct_start(bs
, cookie
, sg
->size
, type
);
279 bool iommu_dma_memory_valid(DMAContext
*dma
, dma_addr_t addr
, dma_addr_t len
,
282 target_phys_addr_t paddr
, plen
;
285 fprintf(stderr
, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
286 " len=0x" DMA_ADDR_FMT
" dir=%d\n", dma
, addr
, len
, dir
);
290 if (dma
->translate(dma
, addr
, &paddr
, &plen
, dir
) != 0) {
294 /* The translation might be valid for larger regions. */
306 int iommu_dma_memory_rw(DMAContext
*dma
, dma_addr_t addr
,
307 void *buf
, dma_addr_t len
, DMADirection dir
)
309 target_phys_addr_t paddr
, plen
;
313 fprintf(stderr
, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT
" len=0x"
314 DMA_ADDR_FMT
" dir=%d\n", dma
, addr
, len
, dir
);
318 err
= dma
->translate(dma
, addr
, &paddr
, &plen
, dir
);
321 * In case of failure on reads from the guest, we clean the
322 * destination buffer so that a device that doesn't test
323 * for errors will not expose qemu internal memory.
329 /* The translation might be valid for larger regions. */
334 cpu_physical_memory_rw(paddr
, buf
, plen
,
335 dir
== DMA_DIRECTION_FROM_DEVICE
);
345 int iommu_dma_memory_set(DMAContext
*dma
, dma_addr_t addr
, uint8_t c
,
348 target_phys_addr_t paddr
, plen
;
352 fprintf(stderr
, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
353 " len=0x" DMA_ADDR_FMT
"\n", dma
, addr
, len
);
357 err
= dma
->translate(dma
, addr
, &paddr
, &plen
,
358 DMA_DIRECTION_FROM_DEVICE
);
363 /* The translation might be valid for larger regions. */
368 do_dma_memory_set(paddr
, c
, plen
);
377 void dma_context_init(DMAContext
*dma
, DMATranslateFunc translate
,
378 DMAMapFunc map
, DMAUnmapFunc unmap
)
381 fprintf(stderr
, "dma_context_init(%p, %p, %p, %p)\n",
382 dma
, translate
, map
, unmap
);
384 dma
->translate
= translate
;
389 void *iommu_dma_memory_map(DMAContext
*dma
, dma_addr_t addr
, dma_addr_t
*len
,
393 target_phys_addr_t paddr
, plen
;
397 return dma
->map(dma
, addr
, len
, dir
);
401 err
= dma
->translate(dma
, addr
, &paddr
, &plen
, dir
);
407 * If this is true, the virtual region is contiguous,
408 * but the translated physical region isn't. We just
409 * clamp *len, much like cpu_physical_memory_map() does.
415 buf
= cpu_physical_memory_map(paddr
, &plen
,
416 dir
== DMA_DIRECTION_FROM_DEVICE
);
422 void iommu_dma_memory_unmap(DMAContext
*dma
, void *buffer
, dma_addr_t len
,
423 DMADirection dir
, dma_addr_t access_len
)
426 dma
->unmap(dma
, buffer
, len
, dir
, access_len
);
430 cpu_physical_memory_unmap(buffer
, len
,
431 dir
== DMA_DIRECTION_FROM_DEVICE
,