4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
10 #include "sysemu/dma.h"
12 #include "qemu/range.h"
13 #include "qemu/thread.h"
14 #include "qemu/main-loop.h"
16 /* #define DEBUG_IOMMU */
18 int dma_memory_set(AddressSpace
*as
, dma_addr_t addr
, uint8_t c
, dma_addr_t len
)
20 dma_barrier(as
, DMA_DIRECTION_FROM_DEVICE
);
22 #define FILLBUF_SIZE 512
23 uint8_t fillbuf
[FILLBUF_SIZE
];
27 memset(fillbuf
, c
, FILLBUF_SIZE
);
29 l
= len
< FILLBUF_SIZE
? len
: FILLBUF_SIZE
;
30 error
|= address_space_rw(as
, addr
, fillbuf
, l
, true);
38 void qemu_sglist_init(QEMUSGList
*qsg
, DeviceState
*dev
, int alloc_hint
,
41 qsg
->sg
= g_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
43 qsg
->nalloc
= alloc_hint
;
47 object_ref(OBJECT(dev
));
50 void qemu_sglist_add(QEMUSGList
*qsg
, dma_addr_t base
, dma_addr_t len
)
52 if (qsg
->nsg
== qsg
->nalloc
) {
53 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
54 qsg
->sg
= g_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
56 qsg
->sg
[qsg
->nsg
].base
= base
;
57 qsg
->sg
[qsg
->nsg
].len
= len
;
62 void qemu_sglist_destroy(QEMUSGList
*qsg
)
64 object_unref(OBJECT(qsg
->dev
));
66 memset(qsg
, 0, sizeof(*qsg
));
70 BlockDriverAIOCB common
;
72 BlockDriverAIOCB
*acb
;
78 dma_addr_t sg_cur_byte
;
84 static void dma_bdrv_cb(void *opaque
, int ret
);
86 static void reschedule_dma(void *opaque
)
88 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
90 qemu_bh_delete(dbs
->bh
);
95 static void continue_after_map_failure(void *opaque
)
97 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
99 dbs
->bh
= qemu_bh_new(reschedule_dma
, dbs
);
100 qemu_bh_schedule(dbs
->bh
);
103 static void dma_bdrv_unmap(DMAAIOCB
*dbs
)
107 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
108 dma_memory_unmap(dbs
->sg
->as
, dbs
->iov
.iov
[i
].iov_base
,
109 dbs
->iov
.iov
[i
].iov_len
, dbs
->dir
,
110 dbs
->iov
.iov
[i
].iov_len
);
112 qemu_iovec_reset(&dbs
->iov
);
115 static void dma_complete(DMAAIOCB
*dbs
, int ret
)
117 trace_dma_complete(dbs
, ret
, dbs
->common
.cb
);
120 if (dbs
->common
.cb
) {
121 dbs
->common
.cb(dbs
->common
.opaque
, ret
);
123 qemu_iovec_destroy(&dbs
->iov
);
125 qemu_bh_delete(dbs
->bh
);
128 if (!dbs
->in_cancel
) {
129 /* Requests may complete while dma_aio_cancel is in progress. In
130 * this case, the AIOCB should not be released because it is still
131 * referenced by dma_aio_cancel. */
132 qemu_aio_release(dbs
);
136 static void dma_bdrv_cb(void *opaque
, int ret
)
138 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
139 dma_addr_t cur_addr
, cur_len
;
142 trace_dma_bdrv_cb(dbs
, ret
);
145 dbs
->sector_num
+= dbs
->iov
.size
/ 512;
147 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
148 dma_complete(dbs
, ret
);
153 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
154 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
155 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
156 mem
= dma_memory_map(dbs
->sg
->as
, cur_addr
, &cur_len
, dbs
->dir
);
159 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
160 dbs
->sg_cur_byte
+= cur_len
;
161 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
162 dbs
->sg_cur_byte
= 0;
167 if (dbs
->iov
.size
== 0) {
168 trace_dma_map_wait(dbs
);
169 cpu_register_map_client(dbs
, continue_after_map_failure
);
173 if (dbs
->iov
.size
& ~BDRV_SECTOR_MASK
) {
174 qemu_iovec_discard_back(&dbs
->iov
, dbs
->iov
.size
& ~BDRV_SECTOR_MASK
);
177 dbs
->acb
= dbs
->io_func(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
178 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
182 static void dma_aio_cancel(BlockDriverAIOCB
*acb
)
184 DMAAIOCB
*dbs
= container_of(acb
, DMAAIOCB
, common
);
186 trace_dma_aio_cancel(dbs
);
189 BlockDriverAIOCB
*acb
= dbs
->acb
;
191 dbs
->in_cancel
= true;
192 bdrv_aio_cancel(acb
);
193 dbs
->in_cancel
= false;
195 dbs
->common
.cb
= NULL
;
196 dma_complete(dbs
, 0);
199 static const AIOCBInfo dma_aiocb_info
= {
200 .aiocb_size
= sizeof(DMAAIOCB
),
201 .cancel
= dma_aio_cancel
,
204 BlockDriverAIOCB
*dma_bdrv_io(
205 BlockDriverState
*bs
, QEMUSGList
*sg
, uint64_t sector_num
,
206 DMAIOFunc
*io_func
, BlockDriverCompletionFunc
*cb
,
207 void *opaque
, DMADirection dir
)
209 DMAAIOCB
*dbs
= qemu_aio_get(&dma_aiocb_info
, bs
, cb
, opaque
);
211 trace_dma_bdrv_io(dbs
, bs
, sector_num
, (dir
== DMA_DIRECTION_TO_DEVICE
));
216 dbs
->sector_num
= sector_num
;
217 dbs
->sg_cur_index
= 0;
218 dbs
->sg_cur_byte
= 0;
220 dbs
->in_cancel
= false;
221 dbs
->io_func
= io_func
;
223 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
229 BlockDriverAIOCB
*dma_bdrv_read(BlockDriverState
*bs
,
230 QEMUSGList
*sg
, uint64_t sector
,
231 void (*cb
)(void *opaque
, int ret
), void *opaque
)
233 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_readv
, cb
, opaque
,
234 DMA_DIRECTION_FROM_DEVICE
);
237 BlockDriverAIOCB
*dma_bdrv_write(BlockDriverState
*bs
,
238 QEMUSGList
*sg
, uint64_t sector
,
239 void (*cb
)(void *opaque
, int ret
), void *opaque
)
241 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_writev
, cb
, opaque
,
242 DMA_DIRECTION_TO_DEVICE
);
246 static uint64_t dma_buf_rw(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
,
254 len
= MIN(len
, resid
);
256 ScatterGatherEntry entry
= sg
->sg
[sg_cur_index
++];
257 int32_t xfer
= MIN(len
, entry
.len
);
258 dma_memory_rw(sg
->as
, entry
.base
, ptr
, xfer
, dir
);
267 uint64_t dma_buf_read(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
269 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_FROM_DEVICE
);
272 uint64_t dma_buf_write(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
274 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_TO_DEVICE
);
277 void dma_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
,
278 QEMUSGList
*sg
, enum BlockAcctType type
)
280 bdrv_acct_start(bs
, cookie
, sg
->size
, type
);