4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
11 #include "block_int.h"
13 static AIOPool dma_aio_pool
;
15 void qemu_sglist_init(QEMUSGList
*qsg
, int alloc_hint
)
17 qsg
->sg
= qemu_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
19 qsg
->nalloc
= alloc_hint
;
23 void qemu_sglist_add(QEMUSGList
*qsg
, target_phys_addr_t base
,
24 target_phys_addr_t len
)
26 if (qsg
->nsg
== qsg
->nalloc
) {
27 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
28 qsg
->sg
= qemu_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
30 qsg
->sg
[qsg
->nsg
].base
= base
;
31 qsg
->sg
[qsg
->nsg
].len
= len
;
36 void qemu_sglist_destroy(QEMUSGList
*qsg
)
43 BlockDriverAIOCB
*acb
;
48 target_phys_addr_t sg_cur_byte
;
53 static void dma_bdrv_cb(void *opaque
, int ret
);
55 static void reschedule_dma(void *opaque
)
57 DMABlockState
*dbs
= (DMABlockState
*)opaque
;
59 qemu_bh_delete(dbs
->bh
);
61 dma_bdrv_cb(opaque
, 0);
64 static void continue_after_map_failure(void *opaque
)
66 DMABlockState
*dbs
= (DMABlockState
*)opaque
;
68 dbs
->bh
= qemu_bh_new(reschedule_dma
, dbs
);
69 qemu_bh_schedule(dbs
->bh
);
72 static void dma_bdrv_cb(void *opaque
, int ret
)
74 DMABlockState
*dbs
= (DMABlockState
*)opaque
;
75 target_phys_addr_t cur_addr
, cur_len
;
79 dbs
->sector_num
+= dbs
->iov
.size
/ 512;
80 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
81 cpu_physical_memory_unmap(dbs
->iov
.iov
[i
].iov_base
,
82 dbs
->iov
.iov
[i
].iov_len
, !dbs
->is_write
,
83 dbs
->iov
.iov
[i
].iov_len
);
85 qemu_iovec_reset(&dbs
->iov
);
87 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
88 dbs
->acb
->cb(dbs
->acb
->opaque
, ret
);
89 qemu_iovec_destroy(&dbs
->iov
);
90 qemu_aio_release(dbs
->acb
);
95 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
96 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
97 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
98 mem
= cpu_physical_memory_map(cur_addr
, &cur_len
, !dbs
->is_write
);
101 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
102 dbs
->sg_cur_byte
+= cur_len
;
103 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
104 dbs
->sg_cur_byte
= 0;
109 if (dbs
->iov
.size
== 0) {
110 cpu_register_map_client(dbs
, continue_after_map_failure
);
115 bdrv_aio_writev(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
116 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
118 bdrv_aio_readv(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
119 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
123 static BlockDriverAIOCB
*dma_bdrv_io(
124 BlockDriverState
*bs
, QEMUSGList
*sg
, uint64_t sector_num
,
125 BlockDriverCompletionFunc
*cb
, void *opaque
,
128 DMABlockState
*dbs
= qemu_malloc(sizeof(*dbs
));
131 dbs
->acb
= qemu_aio_get_pool(&dma_aio_pool
, bs
, cb
, opaque
);
133 dbs
->sector_num
= sector_num
;
134 dbs
->sg_cur_index
= 0;
135 dbs
->sg_cur_byte
= 0;
136 dbs
->is_write
= is_write
;
138 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
144 BlockDriverAIOCB
*dma_bdrv_read(BlockDriverState
*bs
,
145 QEMUSGList
*sg
, uint64_t sector
,
146 void (*cb
)(void *opaque
, int ret
), void *opaque
)
148 return dma_bdrv_io(bs
, sg
, sector
, cb
, opaque
, 0);
151 BlockDriverAIOCB
*dma_bdrv_write(BlockDriverState
*bs
,
152 QEMUSGList
*sg
, uint64_t sector
,
153 void (*cb
)(void *opaque
, int ret
), void *opaque
)
155 return dma_bdrv_io(bs
, sg
, sector
, cb
, opaque
, 1);
158 static void dma_aio_cancel(BlockDriverAIOCB
*acb
)
160 DMABlockState
*dbs
= (DMABlockState
*)acb
->opaque
;
162 bdrv_aio_cancel(dbs
->acb
);
165 void dma_helper_init(void)
167 aio_pool_init(&dma_aio_pool
, sizeof(BlockDriverAIOCB
), dma_aio_cancel
);