4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include <qemu-common.h>
16 #include "virtio-blk.h"
17 #include "block_int.h"
22 typedef struct VirtIOBlock
30 static VirtIOBlock
*to_virtio_blk(VirtIODevice
*vdev
)
32 return (VirtIOBlock
*)vdev
;
35 typedef struct VirtIOBlockReq
38 VirtQueueElement elem
;
39 struct virtio_blk_inhdr
*in
;
40 struct virtio_blk_outhdr
*out
;
41 struct virtio_scsi_inhdr
*scsi
;
43 struct VirtIOBlockReq
*next
;
46 static void virtio_blk_req_complete(VirtIOBlockReq
*req
, int status
)
48 VirtIOBlock
*s
= req
->dev
;
50 req
->in
->status
= status
;
51 virtqueue_push(s
->vq
, &req
->elem
, req
->qiov
.size
+ sizeof(*req
->in
));
52 virtio_notify(&s
->vdev
, s
->vq
);
57 static int virtio_blk_handle_write_error(VirtIOBlockReq
*req
, int error
)
59 BlockInterfaceErrorAction action
= drive_get_onerror(req
->dev
->bs
);
60 VirtIOBlock
*s
= req
->dev
;
62 if (action
== BLOCK_ERR_IGNORE
)
65 if ((error
== ENOSPC
&& action
== BLOCK_ERR_STOP_ENOSPC
)
66 || action
== BLOCK_ERR_STOP_ANY
) {
71 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
77 static void virtio_blk_rw_complete(void *opaque
, int ret
)
79 VirtIOBlockReq
*req
= opaque
;
81 if (ret
&& (req
->out
->type
& VIRTIO_BLK_T_OUT
)) {
82 if (virtio_blk_handle_write_error(req
, -ret
))
86 virtio_blk_req_complete(req
, VIRTIO_BLK_S_OK
);
89 static VirtIOBlockReq
*virtio_blk_alloc_request(VirtIOBlock
*s
)
91 VirtIOBlockReq
*req
= qemu_mallocz(sizeof(*req
));
96 static VirtIOBlockReq
*virtio_blk_get_request(VirtIOBlock
*s
)
98 VirtIOBlockReq
*req
= virtio_blk_alloc_request(s
);
101 if (!virtqueue_pop(s
->vq
, &req
->elem
)) {
111 static void virtio_blk_handle_scsi(VirtIOBlockReq
*req
)
113 struct sg_io_hdr hdr
;
119 * We require at least one output segment each for the virtio_blk_outhdr
120 * and the SCSI command block.
122 * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
123 * and the sense buffer pointer in the input segments.
125 if (req
->elem
.out_num
< 2 || req
->elem
.in_num
< 3) {
126 virtio_blk_req_complete(req
, VIRTIO_BLK_S_IOERR
);
131 * No support for bidirection commands yet.
133 if (req
->elem
.out_num
> 2 && req
->elem
.in_num
> 3) {
134 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
139 * The scsi inhdr is placed in the second-to-last input segment, just
140 * before the regular inhdr.
142 req
->scsi
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 2].iov_base
;
143 size
= sizeof(*req
->in
) + sizeof(*req
->scsi
);
145 memset(&hdr
, 0, sizeof(struct sg_io_hdr
));
146 hdr
.interface_id
= 'S';
147 hdr
.cmd_len
= req
->elem
.out_sg
[1].iov_len
;
148 hdr
.cmdp
= req
->elem
.out_sg
[1].iov_base
;
151 if (req
->elem
.out_num
> 2) {
153 * If there are more than the minimally required 2 output segments
154 * there is write payload starting from the third iovec.
156 hdr
.dxfer_direction
= SG_DXFER_TO_DEV
;
157 hdr
.iovec_count
= req
->elem
.out_num
- 2;
159 for (i
= 0; i
< hdr
.iovec_count
; i
++)
160 hdr
.dxfer_len
+= req
->elem
.out_sg
[i
+ 2].iov_len
;
162 hdr
.dxferp
= req
->elem
.out_sg
+ 2;
164 } else if (req
->elem
.in_num
> 3) {
166 * If we have more than 3 input segments the guest wants to actually
169 hdr
.dxfer_direction
= SG_DXFER_FROM_DEV
;
170 hdr
.iovec_count
= req
->elem
.in_num
- 3;
171 for (i
= 0; i
< hdr
.iovec_count
; i
++)
172 hdr
.dxfer_len
+= req
->elem
.in_sg
[i
].iov_len
;
174 hdr
.dxferp
= req
->elem
.in_sg
;
175 size
+= hdr
.dxfer_len
;
178 * Some SCSI commands don't actually transfer any data.
180 hdr
.dxfer_direction
= SG_DXFER_NONE
;
183 hdr
.sbp
= req
->elem
.in_sg
[req
->elem
.in_num
- 3].iov_base
;
184 hdr
.mx_sb_len
= req
->elem
.in_sg
[req
->elem
.in_num
- 3].iov_len
;
185 size
+= hdr
.mx_sb_len
;
187 ret
= bdrv_ioctl(req
->dev
->bs
, SG_IO
, &hdr
);
189 status
= VIRTIO_BLK_S_UNSUPP
;
191 hdr
.resid
= hdr
.dxfer_len
;
192 } else if (hdr
.status
) {
193 status
= VIRTIO_BLK_S_IOERR
;
195 status
= VIRTIO_BLK_S_OK
;
198 req
->scsi
->errors
= hdr
.status
;
199 req
->scsi
->residual
= hdr
.resid
;
200 req
->scsi
->sense_len
= hdr
.sb_len_wr
;
201 req
->scsi
->data_len
= hdr
.dxfer_len
;
203 virtio_blk_req_complete(req
, status
);
206 static void virtio_blk_handle_scsi(VirtIOBlockReq
*req
)
208 virtio_blk_req_complete(req
, VIRTIO_BLK_S_UNSUPP
);
210 #endif /* __linux__ */
212 static void virtio_blk_handle_write(VirtIOBlockReq
*req
)
214 bdrv_aio_writev(req
->dev
->bs
, req
->out
->sector
, &req
->qiov
,
215 req
->qiov
.size
/ 512, virtio_blk_rw_complete
, req
);
218 static void virtio_blk_handle_read(VirtIOBlockReq
*req
)
220 bdrv_aio_readv(req
->dev
->bs
, req
->out
->sector
, &req
->qiov
,
221 req
->qiov
.size
/ 512, virtio_blk_rw_complete
, req
);
224 static void virtio_blk_handle_output(VirtIODevice
*vdev
, VirtQueue
*vq
)
226 VirtIOBlock
*s
= to_virtio_blk(vdev
);
229 while ((req
= virtio_blk_get_request(s
))) {
230 if (req
->elem
.out_num
< 1 || req
->elem
.in_num
< 1) {
231 fprintf(stderr
, "virtio-blk missing headers\n");
235 if (req
->elem
.out_sg
[0].iov_len
< sizeof(*req
->out
) ||
236 req
->elem
.in_sg
[req
->elem
.in_num
- 1].iov_len
< sizeof(*req
->in
)) {
237 fprintf(stderr
, "virtio-blk header not in correct element\n");
241 req
->out
= (void *)req
->elem
.out_sg
[0].iov_base
;
242 req
->in
= (void *)req
->elem
.in_sg
[req
->elem
.in_num
- 1].iov_base
;
244 if (req
->out
->type
& VIRTIO_BLK_T_SCSI_CMD
) {
245 virtio_blk_handle_scsi(req
);
246 } else if (req
->out
->type
& VIRTIO_BLK_T_OUT
) {
247 qemu_iovec_init_external(&req
->qiov
, &req
->elem
.out_sg
[1],
248 req
->elem
.out_num
- 1);
249 virtio_blk_handle_write(req
);
251 qemu_iovec_init_external(&req
->qiov
, &req
->elem
.in_sg
[0],
252 req
->elem
.in_num
- 1);
253 virtio_blk_handle_read(req
);
257 * FIXME: Want to check for completions before returning to guest mode,
258 * so cached reads and writes are reported as quickly as possible. But
259 * that should be done in the generic block layer.
263 static void virtio_blk_dma_restart_cb(void *opaque
, int running
, int reason
)
265 VirtIOBlock
*s
= opaque
;
266 VirtIOBlockReq
*req
= s
->rq
;
274 virtio_blk_handle_write(req
);
279 static void virtio_blk_reset(VirtIODevice
*vdev
)
282 * This should cancel pending requests, but can't do nicely until there
283 * are per-device request lists.
288 static void virtio_blk_update_config(VirtIODevice
*vdev
, uint8_t *config
)
290 VirtIOBlock
*s
= to_virtio_blk(vdev
);
291 struct virtio_blk_config blkcfg
;
293 int cylinders
, heads
, secs
;
295 bdrv_get_geometry(s
->bs
, &capacity
);
296 bdrv_get_geometry_hint(s
->bs
, &cylinders
, &heads
, &secs
);
297 stq_raw(&blkcfg
.capacity
, capacity
);
298 stl_raw(&blkcfg
.seg_max
, 128 - 2);
299 stw_raw(&blkcfg
.cylinders
, cylinders
);
300 blkcfg
.heads
= heads
;
301 blkcfg
.sectors
= secs
;
303 memcpy(config
, &blkcfg
, sizeof(blkcfg
));
306 static uint32_t virtio_blk_get_features(VirtIODevice
*vdev
)
308 uint32_t features
= 0;
310 features
|= (1 << VIRTIO_BLK_F_SEG_MAX
);
311 features
|= (1 << VIRTIO_BLK_F_GEOMETRY
);
313 features
|= (1 << VIRTIO_BLK_F_SCSI
);
319 static void virtio_blk_save(QEMUFile
*f
, void *opaque
)
321 VirtIOBlock
*s
= opaque
;
322 VirtIOBlockReq
*req
= s
->rq
;
324 virtio_save(&s
->vdev
, f
);
327 qemu_put_sbyte(f
, 1);
328 qemu_put_buffer(f
, (unsigned char*)&req
->elem
, sizeof(req
->elem
));
331 qemu_put_sbyte(f
, 0);
334 static int virtio_blk_load(QEMUFile
*f
, void *opaque
, int version_id
)
336 VirtIOBlock
*s
= opaque
;
341 virtio_load(&s
->vdev
, f
);
342 while (qemu_get_sbyte(f
)) {
343 VirtIOBlockReq
*req
= virtio_blk_alloc_request(s
);
344 qemu_get_buffer(f
, (unsigned char*)&req
->elem
, sizeof(req
->elem
));
352 VirtIODevice
*virtio_blk_init(DeviceState
*dev
)
355 int cylinders
, heads
, secs
;
356 static int virtio_blk_id
;
357 BlockDriverState
*bs
;
359 s
= (VirtIOBlock
*)virtio_common_init("virtio-blk", VIRTIO_ID_BLOCK
,
360 sizeof(struct virtio_blk_config
),
361 sizeof(VirtIOBlock
));
363 bs
= qdev_init_bdrv(dev
, IF_VIRTIO
);
364 s
->vdev
.get_config
= virtio_blk_update_config
;
365 s
->vdev
.get_features
= virtio_blk_get_features
;
366 s
->vdev
.reset
= virtio_blk_reset
;
370 bdrv_guess_geometry(s
->bs
, &cylinders
, &heads
, &secs
);
371 bdrv_set_geometry_hint(s
->bs
, cylinders
, heads
, secs
);
373 s
->vq
= virtio_add_queue(&s
->vdev
, 128, virtio_blk_handle_output
);
375 qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb
, s
);
376 register_savevm("virtio-blk", virtio_blk_id
++, 2,
377 virtio_blk_save
, virtio_blk_load
, s
);