net: drop packet from tap device if all NICs are down
[qemu-kvm/fedora.git] / dma-helpers.c
blobf71ca3baf298a816b2eade0251736852f2bfc1cc
1 /*
2 * DMA helper functions
4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
10 #include "dma.h"
11 #include "block_int.h"
12 #include "cache-utils.h"
14 static AIOPool dma_aio_pool;
16 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
18 qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
19 qsg->nsg = 0;
20 qsg->nalloc = alloc_hint;
21 qsg->size = 0;
24 void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
25 target_phys_addr_t len)
27 if (qsg->nsg == qsg->nalloc) {
28 qsg->nalloc = 2 * qsg->nalloc + 1;
29 qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
31 qsg->sg[qsg->nsg].base = base;
32 qsg->sg[qsg->nsg].len = len;
33 qsg->size += len;
34 ++qsg->nsg;
37 void qemu_sglist_destroy(QEMUSGList *qsg)
39 qemu_free(qsg->sg);
42 typedef struct {
43 BlockDriverAIOCB common;
44 BlockDriverState *bs;
45 BlockDriverAIOCB *acb;
46 QEMUSGList *sg;
47 uint64_t sector_num;
48 int is_write;
49 int sg_cur_index;
50 target_phys_addr_t sg_cur_byte;
51 QEMUIOVector iov;
52 QEMUBH *bh;
53 } DMAAIOCB;
55 static void dma_bdrv_cb(void *opaque, int ret);
57 static void reschedule_dma(void *opaque)
59 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
61 qemu_bh_delete(dbs->bh);
62 dbs->bh = NULL;
63 dma_bdrv_cb(opaque, 0);
66 static void continue_after_map_failure(void *opaque)
68 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
70 dbs->bh = qemu_bh_new(reschedule_dma, dbs);
71 qemu_bh_schedule(dbs->bh);
74 static void dma_bdrv_unmap(DMAAIOCB *dbs)
76 int i;
78 for (i = 0; i < dbs->iov.niov; ++i) {
79 cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
80 dbs->iov.iov[i].iov_len, !dbs->is_write,
81 dbs->iov.iov[i].iov_len);
85 static void dma_bdrv_cb(void *opaque, int ret)
87 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
88 target_phys_addr_t cur_addr, cur_len;
89 void *mem;
91 dbs->acb = NULL;
92 dbs->sector_num += dbs->iov.size / 512;
93 dma_bdrv_unmap(dbs);
94 qemu_iovec_reset(&dbs->iov);
96 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
97 dbs->common.cb(dbs->common.opaque, ret);
98 qemu_iovec_destroy(&dbs->iov);
99 qemu_aio_release(dbs);
100 return;
103 while (dbs->sg_cur_index < dbs->sg->nsg) {
104 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
105 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
106 mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
107 if (!mem)
108 break;
109 qemu_iovec_add(&dbs->iov, mem, cur_len);
110 dbs->sg_cur_byte += cur_len;
111 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
112 dbs->sg_cur_byte = 0;
113 ++dbs->sg_cur_index;
117 if (dbs->iov.size == 0) {
118 cpu_register_map_client(dbs, continue_after_map_failure);
119 return;
122 if (dbs->is_write) {
123 dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
124 dbs->iov.size / 512, dma_bdrv_cb, dbs);
125 } else {
126 dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
127 dbs->iov.size / 512, dma_bdrv_cb, dbs);
129 if (!dbs->acb) {
130 dma_bdrv_unmap(dbs);
131 qemu_iovec_destroy(&dbs->iov);
132 return;
136 static BlockDriverAIOCB *dma_bdrv_io(
137 BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
138 BlockDriverCompletionFunc *cb, void *opaque,
139 int is_write)
141 int i;
142 QEMUIOVector *qiov;
143 DMAAIOCB *dbs = qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
145 dbs->acb = NULL;
146 dbs->bs = bs;
147 dbs->sg = sg;
148 dbs->sector_num = sector_num;
149 dbs->sg_cur_index = 0;
150 dbs->sg_cur_byte = 0;
151 dbs->is_write = is_write;
152 dbs->bh = NULL;
153 qemu_iovec_init(&dbs->iov, sg->nsg);
154 dma_bdrv_cb(dbs, 0);
156 if (!is_write) {
157 qiov = &dbs->iov;
158 for (i = 0; i < qiov->niov; ++i) {
159 qemu_sync_idcache((unsigned long)qiov->iov[i].iov_base,
160 (unsigned long)(qiov->iov[i].iov_base + qiov->iov[i].iov_len));
164 if (!dbs->acb) {
165 qemu_aio_release(dbs);
166 return NULL;
168 return &dbs->common;
172 BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
173 QEMUSGList *sg, uint64_t sector,
174 void (*cb)(void *opaque, int ret), void *opaque)
176 return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
179 BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
180 QEMUSGList *sg, uint64_t sector,
181 void (*cb)(void *opaque, int ret), void *opaque)
183 return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
186 static void dma_aio_cancel(BlockDriverAIOCB *acb)
188 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
190 if (dbs->acb) {
191 bdrv_aio_cancel(dbs->acb);
195 void dma_helper_init(void)
197 aio_pool_init(&dma_aio_pool, sizeof(DMAAIOCB), dma_aio_cancel);