4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
11 #include "block_int.h"
14 void qemu_sglist_init(QEMUSGList
*qsg
, int alloc_hint
)
16 qsg
->sg
= g_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
18 qsg
->nalloc
= alloc_hint
;
22 void qemu_sglist_add(QEMUSGList
*qsg
, dma_addr_t base
, dma_addr_t len
)
24 if (qsg
->nsg
== qsg
->nalloc
) {
25 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
26 qsg
->sg
= g_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
28 qsg
->sg
[qsg
->nsg
].base
= base
;
29 qsg
->sg
[qsg
->nsg
].len
= len
;
34 void qemu_sglist_destroy(QEMUSGList
*qsg
)
40 BlockDriverAIOCB common
;
42 BlockDriverAIOCB
*acb
;
48 dma_addr_t sg_cur_byte
;
54 static void dma_bdrv_cb(void *opaque
, int ret
);
56 static void reschedule_dma(void *opaque
)
58 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
60 qemu_bh_delete(dbs
->bh
);
65 static void continue_after_map_failure(void *opaque
)
67 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
69 dbs
->bh
= qemu_bh_new(reschedule_dma
, dbs
);
70 qemu_bh_schedule(dbs
->bh
);
73 static void dma_bdrv_unmap(DMAAIOCB
*dbs
)
77 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
78 cpu_physical_memory_unmap(dbs
->iov
.iov
[i
].iov_base
,
79 dbs
->iov
.iov
[i
].iov_len
, !dbs
->to_dev
,
80 dbs
->iov
.iov
[i
].iov_len
);
82 qemu_iovec_reset(&dbs
->iov
);
85 static void dma_complete(DMAAIOCB
*dbs
, int ret
)
87 trace_dma_complete(dbs
, ret
, dbs
->common
.cb
);
91 dbs
->common
.cb(dbs
->common
.opaque
, ret
);
93 qemu_iovec_destroy(&dbs
->iov
);
95 qemu_bh_delete(dbs
->bh
);
98 if (!dbs
->in_cancel
) {
99 /* Requests may complete while dma_aio_cancel is in progress. In
100 * this case, the AIOCB should not be released because it is still
101 * referenced by dma_aio_cancel. */
102 qemu_aio_release(dbs
);
106 static void dma_bdrv_cb(void *opaque
, int ret
)
108 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
109 target_phys_addr_t cur_addr
, cur_len
;
112 trace_dma_bdrv_cb(dbs
, ret
);
115 dbs
->sector_num
+= dbs
->iov
.size
/ 512;
118 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
119 dma_complete(dbs
, ret
);
123 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
124 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
125 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
126 mem
= cpu_physical_memory_map(cur_addr
, &cur_len
, !dbs
->to_dev
);
129 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
130 dbs
->sg_cur_byte
+= cur_len
;
131 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
132 dbs
->sg_cur_byte
= 0;
137 if (dbs
->iov
.size
== 0) {
138 trace_dma_map_wait(dbs
);
139 cpu_register_map_client(dbs
, continue_after_map_failure
);
143 dbs
->acb
= dbs
->io_func(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
144 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
148 static void dma_aio_cancel(BlockDriverAIOCB
*acb
)
150 DMAAIOCB
*dbs
= container_of(acb
, DMAAIOCB
, common
);
152 trace_dma_aio_cancel(dbs
);
155 BlockDriverAIOCB
*acb
= dbs
->acb
;
157 dbs
->in_cancel
= true;
158 bdrv_aio_cancel(acb
);
159 dbs
->in_cancel
= false;
161 dbs
->common
.cb
= NULL
;
162 dma_complete(dbs
, 0);
165 static AIOPool dma_aio_pool
= {
166 .aiocb_size
= sizeof(DMAAIOCB
),
167 .cancel
= dma_aio_cancel
,
170 BlockDriverAIOCB
*dma_bdrv_io(
171 BlockDriverState
*bs
, QEMUSGList
*sg
, uint64_t sector_num
,
172 DMAIOFunc
*io_func
, BlockDriverCompletionFunc
*cb
,
173 void *opaque
, bool to_dev
)
175 DMAAIOCB
*dbs
= qemu_aio_get(&dma_aio_pool
, bs
, cb
, opaque
);
177 trace_dma_bdrv_io(dbs
, bs
, sector_num
, to_dev
);
182 dbs
->sector_num
= sector_num
;
183 dbs
->sg_cur_index
= 0;
184 dbs
->sg_cur_byte
= 0;
185 dbs
->to_dev
= to_dev
;
186 dbs
->io_func
= io_func
;
188 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
194 BlockDriverAIOCB
*dma_bdrv_read(BlockDriverState
*bs
,
195 QEMUSGList
*sg
, uint64_t sector
,
196 void (*cb
)(void *opaque
, int ret
), void *opaque
)
198 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_readv
, cb
, opaque
, false);
201 BlockDriverAIOCB
*dma_bdrv_write(BlockDriverState
*bs
,
202 QEMUSGList
*sg
, uint64_t sector
,
203 void (*cb
)(void *opaque
, int ret
), void *opaque
)
205 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_writev
, cb
, opaque
, true);
209 static uint64_t dma_buf_rw(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
, bool to_dev
)
216 len
= MIN(len
, resid
);
218 ScatterGatherEntry entry
= sg
->sg
[sg_cur_index
++];
219 int32_t xfer
= MIN(len
, entry
.len
);
220 cpu_physical_memory_rw(entry
.base
, ptr
, xfer
, !to_dev
);
229 uint64_t dma_buf_read(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
231 return dma_buf_rw(ptr
, len
, sg
, 0);
234 uint64_t dma_buf_write(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
236 return dma_buf_rw(ptr
, len
, sg
, 1);
239 void dma_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
,
240 QEMUSGList
*sg
, enum BlockAcctType type
)
242 bdrv_acct_start(bs
, cookie
, sg
->size
, type
);