4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
13 #include "qemu-thread.h"
15 /* #define DEBUG_IOMMU */
17 static void do_dma_memory_set(dma_addr_t addr
, uint8_t c
, dma_addr_t len
)
19 #define FILLBUF_SIZE 512
20 uint8_t fillbuf
[FILLBUF_SIZE
];
23 memset(fillbuf
, c
, FILLBUF_SIZE
);
25 l
= len
< FILLBUF_SIZE
? len
: FILLBUF_SIZE
;
26 cpu_physical_memory_rw(addr
, fillbuf
, l
, true);
32 int dma_memory_set(DMAContext
*dma
, dma_addr_t addr
, uint8_t c
, dma_addr_t len
)
34 dma_barrier(dma
, DMA_DIRECTION_FROM_DEVICE
);
36 if (dma_has_iommu(dma
)) {
37 return iommu_dma_memory_set(dma
, addr
, c
, len
);
39 do_dma_memory_set(addr
, c
, len
);
44 void qemu_sglist_init(QEMUSGList
*qsg
, int alloc_hint
, DMAContext
*dma
)
46 qsg
->sg
= g_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
48 qsg
->nalloc
= alloc_hint
;
53 void qemu_sglist_add(QEMUSGList
*qsg
, dma_addr_t base
, dma_addr_t len
)
55 if (qsg
->nsg
== qsg
->nalloc
) {
56 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
57 qsg
->sg
= g_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
59 qsg
->sg
[qsg
->nsg
].base
= base
;
60 qsg
->sg
[qsg
->nsg
].len
= len
;
65 void qemu_sglist_destroy(QEMUSGList
*qsg
)
68 memset(qsg
, 0, sizeof(*qsg
));
72 BlockDriverAIOCB common
;
74 BlockDriverAIOCB
*acb
;
80 dma_addr_t sg_cur_byte
;
86 static void dma_bdrv_cb(void *opaque
, int ret
);
88 static void reschedule_dma(void *opaque
)
90 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
92 qemu_bh_delete(dbs
->bh
);
97 static void continue_after_map_failure(void *opaque
)
99 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
101 dbs
->bh
= qemu_bh_new(reschedule_dma
, dbs
);
102 qemu_bh_schedule(dbs
->bh
);
105 static void dma_bdrv_unmap(DMAAIOCB
*dbs
)
109 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
110 dma_memory_unmap(dbs
->sg
->dma
, dbs
->iov
.iov
[i
].iov_base
,
111 dbs
->iov
.iov
[i
].iov_len
, dbs
->dir
,
112 dbs
->iov
.iov
[i
].iov_len
);
114 qemu_iovec_reset(&dbs
->iov
);
117 static void dma_complete(DMAAIOCB
*dbs
, int ret
)
119 trace_dma_complete(dbs
, ret
, dbs
->common
.cb
);
122 if (dbs
->common
.cb
) {
123 dbs
->common
.cb(dbs
->common
.opaque
, ret
);
125 qemu_iovec_destroy(&dbs
->iov
);
127 qemu_bh_delete(dbs
->bh
);
130 if (!dbs
->in_cancel
) {
131 /* Requests may complete while dma_aio_cancel is in progress. In
132 * this case, the AIOCB should not be released because it is still
133 * referenced by dma_aio_cancel. */
134 qemu_aio_release(dbs
);
138 static void dma_bdrv_cb(void *opaque
, int ret
)
140 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
141 dma_addr_t cur_addr
, cur_len
;
144 trace_dma_bdrv_cb(dbs
, ret
);
147 dbs
->sector_num
+= dbs
->iov
.size
/ 512;
150 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
151 dma_complete(dbs
, ret
);
155 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
156 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
157 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
158 mem
= dma_memory_map(dbs
->sg
->dma
, cur_addr
, &cur_len
, dbs
->dir
);
161 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
162 dbs
->sg_cur_byte
+= cur_len
;
163 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
164 dbs
->sg_cur_byte
= 0;
169 if (dbs
->iov
.size
== 0) {
170 trace_dma_map_wait(dbs
);
171 cpu_register_map_client(dbs
, continue_after_map_failure
);
175 dbs
->acb
= dbs
->io_func(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
176 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
180 static void dma_aio_cancel(BlockDriverAIOCB
*acb
)
182 DMAAIOCB
*dbs
= container_of(acb
, DMAAIOCB
, common
);
184 trace_dma_aio_cancel(dbs
);
187 BlockDriverAIOCB
*acb
= dbs
->acb
;
189 dbs
->in_cancel
= true;
190 bdrv_aio_cancel(acb
);
191 dbs
->in_cancel
= false;
193 dbs
->common
.cb
= NULL
;
194 dma_complete(dbs
, 0);
197 static AIOPool dma_aio_pool
= {
198 .aiocb_size
= sizeof(DMAAIOCB
),
199 .cancel
= dma_aio_cancel
,
202 BlockDriverAIOCB
*dma_bdrv_io(
203 BlockDriverState
*bs
, QEMUSGList
*sg
, uint64_t sector_num
,
204 DMAIOFunc
*io_func
, BlockDriverCompletionFunc
*cb
,
205 void *opaque
, DMADirection dir
)
207 DMAAIOCB
*dbs
= qemu_aio_get(&dma_aio_pool
, bs
, cb
, opaque
);
209 trace_dma_bdrv_io(dbs
, bs
, sector_num
, (dir
== DMA_DIRECTION_TO_DEVICE
));
214 dbs
->sector_num
= sector_num
;
215 dbs
->sg_cur_index
= 0;
216 dbs
->sg_cur_byte
= 0;
218 dbs
->io_func
= io_func
;
220 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
226 BlockDriverAIOCB
*dma_bdrv_read(BlockDriverState
*bs
,
227 QEMUSGList
*sg
, uint64_t sector
,
228 void (*cb
)(void *opaque
, int ret
), void *opaque
)
230 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_readv
, cb
, opaque
,
231 DMA_DIRECTION_FROM_DEVICE
);
234 BlockDriverAIOCB
*dma_bdrv_write(BlockDriverState
*bs
,
235 QEMUSGList
*sg
, uint64_t sector
,
236 void (*cb
)(void *opaque
, int ret
), void *opaque
)
238 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_writev
, cb
, opaque
,
239 DMA_DIRECTION_TO_DEVICE
);
243 static uint64_t dma_buf_rw(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
,
251 len
= MIN(len
, resid
);
253 ScatterGatherEntry entry
= sg
->sg
[sg_cur_index
++];
254 int32_t xfer
= MIN(len
, entry
.len
);
255 dma_memory_rw(sg
->dma
, entry
.base
, ptr
, xfer
, dir
);
264 uint64_t dma_buf_read(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
266 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_FROM_DEVICE
);
269 uint64_t dma_buf_write(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
271 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_TO_DEVICE
);
274 void dma_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
,
275 QEMUSGList
*sg
, enum BlockAcctType type
)
277 bdrv_acct_start(bs
, cookie
, sg
->size
, type
);
280 bool iommu_dma_memory_valid(DMAContext
*dma
, dma_addr_t addr
, dma_addr_t len
,
283 target_phys_addr_t paddr
, plen
;
286 fprintf(stderr
, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
287 " len=0x" DMA_ADDR_FMT
" dir=%d\n", dma
, addr
, len
, dir
);
291 if (dma
->translate(dma
, addr
, &paddr
, &plen
, dir
) != 0) {
295 /* The translation might be valid for larger regions. */
307 int iommu_dma_memory_rw(DMAContext
*dma
, dma_addr_t addr
,
308 void *buf
, dma_addr_t len
, DMADirection dir
)
310 target_phys_addr_t paddr
, plen
;
314 fprintf(stderr
, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT
" len=0x"
315 DMA_ADDR_FMT
" dir=%d\n", dma
, addr
, len
, dir
);
319 err
= dma
->translate(dma
, addr
, &paddr
, &plen
, dir
);
322 * In case of failure on reads from the guest, we clean the
323 * destination buffer so that a device that doesn't test
324 * for errors will not expose qemu internal memory.
330 /* The translation might be valid for larger regions. */
335 cpu_physical_memory_rw(paddr
, buf
, plen
,
336 dir
== DMA_DIRECTION_FROM_DEVICE
);
346 int iommu_dma_memory_set(DMAContext
*dma
, dma_addr_t addr
, uint8_t c
,
349 target_phys_addr_t paddr
, plen
;
353 fprintf(stderr
, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
354 " len=0x" DMA_ADDR_FMT
"\n", dma
, addr
, len
);
358 err
= dma
->translate(dma
, addr
, &paddr
, &plen
,
359 DMA_DIRECTION_FROM_DEVICE
);
364 /* The translation might be valid for larger regions. */
369 do_dma_memory_set(paddr
, c
, plen
);
378 void dma_context_init(DMAContext
*dma
, DMATranslateFunc translate
,
379 DMAMapFunc map
, DMAUnmapFunc unmap
)
382 fprintf(stderr
, "dma_context_init(%p, %p, %p, %p)\n",
383 dma
, translate
, map
, unmap
);
385 dma
->translate
= translate
;
390 void *iommu_dma_memory_map(DMAContext
*dma
, dma_addr_t addr
, dma_addr_t
*len
,
394 target_phys_addr_t paddr
, plen
;
398 return dma
->map(dma
, addr
, len
, dir
);
402 err
= dma
->translate(dma
, addr
, &paddr
, &plen
, dir
);
408 * If this is true, the virtual region is contiguous,
409 * but the translated physical region isn't. We just
410 * clamp *len, much like cpu_physical_memory_map() does.
416 buf
= cpu_physical_memory_map(paddr
, &plen
,
417 dir
== DMA_DIRECTION_FROM_DEVICE
);
423 void iommu_dma_memory_unmap(DMAContext
*dma
, void *buffer
, dma_addr_t len
,
424 DMADirection dir
, dma_addr_t access_len
)
427 dma
->unmap(dma
, buffer
, len
, dir
, access_len
);
431 cpu_physical_memory_unmap(buffer
, len
,
432 dir
== DMA_DIRECTION_FROM_DEVICE
,