4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
10 #include "qemu/osdep.h"
11 #include "sysemu/block-backend.h"
12 #include "sysemu/dma.h"
14 #include "qemu/thread.h"
15 #include "qemu/main-loop.h"
17 /* #define DEBUG_IOMMU */
19 int dma_memory_set(AddressSpace
*as
, dma_addr_t addr
, uint8_t c
, dma_addr_t len
)
21 dma_barrier(as
, DMA_DIRECTION_FROM_DEVICE
);
23 #define FILLBUF_SIZE 512
24 uint8_t fillbuf
[FILLBUF_SIZE
];
28 memset(fillbuf
, c
, FILLBUF_SIZE
);
30 l
= len
< FILLBUF_SIZE
? len
: FILLBUF_SIZE
;
31 error
|= address_space_rw(as
, addr
, MEMTXATTRS_UNSPECIFIED
,
40 void qemu_sglist_init(QEMUSGList
*qsg
, DeviceState
*dev
, int alloc_hint
,
43 qsg
->sg
= g_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
45 qsg
->nalloc
= alloc_hint
;
49 object_ref(OBJECT(dev
));
52 void qemu_sglist_add(QEMUSGList
*qsg
, dma_addr_t base
, dma_addr_t len
)
54 if (qsg
->nsg
== qsg
->nalloc
) {
55 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
56 qsg
->sg
= g_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
58 qsg
->sg
[qsg
->nsg
].base
= base
;
59 qsg
->sg
[qsg
->nsg
].len
= len
;
64 void qemu_sglist_destroy(QEMUSGList
*qsg
)
66 object_unref(OBJECT(qsg
->dev
));
68 memset(qsg
, 0, sizeof(*qsg
));
79 dma_addr_t sg_cur_byte
;
86 static void dma_blk_cb(void *opaque
, int ret
);
88 static void reschedule_dma(void *opaque
)
90 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
92 qemu_bh_delete(dbs
->bh
);
97 static void dma_blk_unmap(DMAAIOCB
*dbs
)
101 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
102 dma_memory_unmap(dbs
->sg
->as
, dbs
->iov
.iov
[i
].iov_base
,
103 dbs
->iov
.iov
[i
].iov_len
, dbs
->dir
,
104 dbs
->iov
.iov
[i
].iov_len
);
106 qemu_iovec_reset(&dbs
->iov
);
109 static void dma_complete(DMAAIOCB
*dbs
, int ret
)
111 trace_dma_complete(dbs
, ret
, dbs
->common
.cb
);
114 if (dbs
->common
.cb
) {
115 dbs
->common
.cb(dbs
->common
.opaque
, ret
);
117 qemu_iovec_destroy(&dbs
->iov
);
119 qemu_bh_delete(dbs
->bh
);
125 static void dma_blk_cb(void *opaque
, int ret
)
127 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
128 dma_addr_t cur_addr
, cur_len
;
131 trace_dma_blk_cb(dbs
, ret
);
134 dbs
->offset
+= dbs
->iov
.size
;
136 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
137 dma_complete(dbs
, ret
);
142 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
143 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
144 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
145 mem
= dma_memory_map(dbs
->sg
->as
, cur_addr
, &cur_len
, dbs
->dir
);
148 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
149 dbs
->sg_cur_byte
+= cur_len
;
150 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
151 dbs
->sg_cur_byte
= 0;
156 if (dbs
->iov
.size
== 0) {
157 trace_dma_map_wait(dbs
);
158 dbs
->bh
= aio_bh_new(dbs
->ctx
, reschedule_dma
, dbs
);
159 cpu_register_map_client(dbs
->bh
);
163 if (dbs
->iov
.size
& ~BDRV_SECTOR_MASK
) {
164 qemu_iovec_discard_back(&dbs
->iov
, dbs
->iov
.size
& ~BDRV_SECTOR_MASK
);
167 dbs
->acb
= dbs
->io_func(dbs
->offset
, &dbs
->iov
,
168 dma_blk_cb
, dbs
, dbs
->io_func_opaque
);
172 static void dma_aio_cancel(BlockAIOCB
*acb
)
174 DMAAIOCB
*dbs
= container_of(acb
, DMAAIOCB
, common
);
176 trace_dma_aio_cancel(dbs
);
179 blk_aio_cancel_async(dbs
->acb
);
182 cpu_unregister_map_client(dbs
->bh
);
183 qemu_bh_delete(dbs
->bh
);
189 static const AIOCBInfo dma_aiocb_info
= {
190 .aiocb_size
= sizeof(DMAAIOCB
),
191 .cancel_async
= dma_aio_cancel
,
194 BlockAIOCB
*dma_blk_io(AioContext
*ctx
,
195 QEMUSGList
*sg
, uint64_t offset
,
196 DMAIOFunc
*io_func
, void *io_func_opaque
,
197 BlockCompletionFunc
*cb
,
198 void *opaque
, DMADirection dir
)
200 DMAAIOCB
*dbs
= qemu_aio_get(&dma_aiocb_info
, NULL
, cb
, opaque
);
202 trace_dma_blk_io(dbs
, io_func_opaque
, offset
, (dir
== DMA_DIRECTION_TO_DEVICE
));
207 dbs
->offset
= offset
;
208 dbs
->sg_cur_index
= 0;
209 dbs
->sg_cur_byte
= 0;
211 dbs
->io_func
= io_func
;
212 dbs
->io_func_opaque
= io_func_opaque
;
214 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
221 BlockAIOCB
*dma_blk_read_io_func(int64_t offset
, QEMUIOVector
*iov
,
222 BlockCompletionFunc
*cb
, void *cb_opaque
,
225 BlockBackend
*blk
= opaque
;
226 return blk_aio_preadv(blk
, offset
, iov
, 0, cb
, cb_opaque
);
229 BlockAIOCB
*dma_blk_read(BlockBackend
*blk
,
230 QEMUSGList
*sg
, uint64_t offset
,
231 void (*cb
)(void *opaque
, int ret
), void *opaque
)
233 return dma_blk_io(blk_get_aio_context(blk
),
234 sg
, offset
, dma_blk_read_io_func
, blk
, cb
, opaque
,
235 DMA_DIRECTION_FROM_DEVICE
);
239 BlockAIOCB
*dma_blk_write_io_func(int64_t offset
, QEMUIOVector
*iov
,
240 BlockCompletionFunc
*cb
, void *cb_opaque
,
243 BlockBackend
*blk
= opaque
;
244 return blk_aio_pwritev(blk
, offset
, iov
, 0, cb
, cb_opaque
);
247 BlockAIOCB
*dma_blk_write(BlockBackend
*blk
,
248 QEMUSGList
*sg
, uint64_t offset
,
249 void (*cb
)(void *opaque
, int ret
), void *opaque
)
251 return dma_blk_io(blk_get_aio_context(blk
),
252 sg
, offset
, dma_blk_write_io_func
, blk
, cb
, opaque
,
253 DMA_DIRECTION_TO_DEVICE
);
257 static uint64_t dma_buf_rw(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
,
265 len
= MIN(len
, resid
);
267 ScatterGatherEntry entry
= sg
->sg
[sg_cur_index
++];
268 int32_t xfer
= MIN(len
, entry
.len
);
269 dma_memory_rw(sg
->as
, entry
.base
, ptr
, xfer
, dir
);
278 uint64_t dma_buf_read(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
280 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_FROM_DEVICE
);
283 uint64_t dma_buf_write(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
285 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_TO_DEVICE
);
288 void dma_acct_start(BlockBackend
*blk
, BlockAcctCookie
*cookie
,
289 QEMUSGList
*sg
, enum BlockAcctType type
)
291 block_acct_start(blk_get_stats(blk
), cookie
, sg
->size
, type
);