2 * Copyright (C) 2009-2010 Nippon Telegraph and Telephone Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version
6 * 2 as published by the Free Software Foundation.
8 * You should have received a copy of the GNU General Public License
9 * along with this program. If not, see <http://www.gnu.org/licenses/>.
11 * Contributions after 2012-01-13 are licensed under the terms of the
12 * GNU GPL, version 2 or (at your option) any later version.
15 #include "qemu-common.h"
17 #include "qemu/error-report.h"
18 #include "qemu/sockets.h"
19 #include "block/block_int.h"
20 #include "qemu/bitops.h"
22 #define SD_PROTO_VER 0x01
24 #define SD_DEFAULT_ADDR "localhost"
25 #define SD_DEFAULT_PORT 7000
27 #define SD_OP_CREATE_AND_WRITE_OBJ 0x01
28 #define SD_OP_READ_OBJ 0x02
29 #define SD_OP_WRITE_OBJ 0x03
30 /* 0x04 is used internally by Sheepdog */
31 #define SD_OP_DISCARD_OBJ 0x05
33 #define SD_OP_NEW_VDI 0x11
34 #define SD_OP_LOCK_VDI 0x12
35 #define SD_OP_RELEASE_VDI 0x13
36 #define SD_OP_GET_VDI_INFO 0x14
37 #define SD_OP_READ_VDIS 0x15
38 #define SD_OP_FLUSH_VDI 0x16
39 #define SD_OP_DEL_VDI 0x17
41 #define SD_FLAG_CMD_WRITE 0x01
42 #define SD_FLAG_CMD_COW 0x02
43 #define SD_FLAG_CMD_CACHE 0x04 /* Writeback mode for cache */
44 #define SD_FLAG_CMD_DIRECT 0x08 /* Don't use cache */
46 #define SD_RES_SUCCESS 0x00 /* Success */
47 #define SD_RES_UNKNOWN 0x01 /* Unknown error */
48 #define SD_RES_NO_OBJ 0x02 /* No object found */
49 #define SD_RES_EIO 0x03 /* I/O error */
50 #define SD_RES_VDI_EXIST 0x04 /* Vdi exists already */
51 #define SD_RES_INVALID_PARMS 0x05 /* Invalid parameters */
52 #define SD_RES_SYSTEM_ERROR 0x06 /* System error */
53 #define SD_RES_VDI_LOCKED 0x07 /* Vdi is locked */
54 #define SD_RES_NO_VDI 0x08 /* No vdi found */
55 #define SD_RES_NO_BASE_VDI 0x09 /* No base vdi found */
56 #define SD_RES_VDI_READ 0x0A /* Cannot read requested vdi */
57 #define SD_RES_VDI_WRITE 0x0B /* Cannot write requested vdi */
58 #define SD_RES_BASE_VDI_READ 0x0C /* Cannot read base vdi */
59 #define SD_RES_BASE_VDI_WRITE 0x0D /* Cannot write base vdi */
60 #define SD_RES_NO_TAG 0x0E /* Requested tag is not found */
61 #define SD_RES_STARTUP 0x0F /* Sheepdog is on starting up */
62 #define SD_RES_VDI_NOT_LOCKED 0x10 /* Vdi is not locked */
63 #define SD_RES_SHUTDOWN 0x11 /* Sheepdog is shutting down */
64 #define SD_RES_NO_MEM 0x12 /* Cannot allocate memory */
65 #define SD_RES_FULL_VDI 0x13 /* we already have the maximum vdis */
66 #define SD_RES_VER_MISMATCH 0x14 /* Protocol version mismatch */
67 #define SD_RES_NO_SPACE 0x15 /* Server has no room for new objects */
68 #define SD_RES_WAIT_FOR_FORMAT 0x16 /* Waiting for a format operation */
69 #define SD_RES_WAIT_FOR_JOIN 0x17 /* Waiting for other nodes joining */
70 #define SD_RES_JOIN_FAILED 0x18 /* Target node had failed to join sheepdog */
71 #define SD_RES_HALT 0x19 /* Sheepdog is stopped serving IO request */
72 #define SD_RES_READONLY 0x1A /* Object is read-only */
77 * 0 - 19 (20 bits): data object space
78 * 20 - 31 (12 bits): reserved data object space
79 * 32 - 55 (24 bits): vdi object space
80 * 56 - 59 ( 4 bits): reserved vdi object space
81 * 60 - 63 ( 4 bits): object type identifier space
84 #define VDI_SPACE_SHIFT 32
85 #define VDI_BIT (UINT64_C(1) << 63)
86 #define VMSTATE_BIT (UINT64_C(1) << 62)
87 #define MAX_DATA_OBJS (UINT64_C(1) << 20)
88 #define MAX_CHILDREN 1024
89 #define SD_MAX_VDI_LEN 256
90 #define SD_MAX_VDI_TAG_LEN 256
91 #define SD_NR_VDIS (1U << 24)
92 #define SD_DATA_OBJ_SIZE (UINT64_C(1) << 22)
93 #define SD_MAX_VDI_SIZE (SD_DATA_OBJ_SIZE * MAX_DATA_OBJS)
95 #define SD_INODE_SIZE (sizeof(SheepdogInode))
96 #define CURRENT_VDI_ID 0
98 typedef struct SheepdogReq
{
104 uint32_t data_length
;
105 uint32_t opcode_specific
[8];
108 typedef struct SheepdogRsp
{
114 uint32_t data_length
;
116 uint32_t opcode_specific
[7];
119 typedef struct SheepdogObjReq
{
125 uint32_t data_length
;
134 typedef struct SheepdogObjRsp
{
140 uint32_t data_length
;
148 typedef struct SheepdogVdiReq
{
154 uint32_t data_length
;
164 typedef struct SheepdogVdiRsp
{
170 uint32_t data_length
;
177 typedef struct SheepdogInode
{
178 char name
[SD_MAX_VDI_LEN
];
179 char tag
[SD_MAX_VDI_TAG_LEN
];
182 uint64_t vm_clock_nsec
;
184 uint64_t vm_state_size
;
185 uint16_t copy_policy
;
187 uint8_t block_size_shift
;
190 uint32_t parent_vdi_id
;
191 uint32_t child_vdi_id
[MAX_CHILDREN
];
192 uint32_t data_vdi_id
[MAX_DATA_OBJS
];
196 * 64 bit FNV-1a non-zero initial basis
198 #define FNV1A_64_INIT ((uint64_t)0xcbf29ce484222325ULL)
201 * 64 bit Fowler/Noll/Vo FNV-1a hash code
203 static inline uint64_t fnv_64a_buf(void *buf
, size_t len
, uint64_t hval
)
205 unsigned char *bp
= buf
;
206 unsigned char *be
= bp
+ len
;
208 hval
^= (uint64_t) *bp
++;
209 hval
+= (hval
<< 1) + (hval
<< 4) + (hval
<< 5) +
210 (hval
<< 7) + (hval
<< 8) + (hval
<< 40);
215 static inline bool is_data_obj_writable(SheepdogInode
*inode
, unsigned int idx
)
217 return inode
->vdi_id
== inode
->data_vdi_id
[idx
];
220 static inline bool is_data_obj(uint64_t oid
)
222 return !(VDI_BIT
& oid
);
225 static inline uint64_t data_oid_to_idx(uint64_t oid
)
227 return oid
& (MAX_DATA_OBJS
- 1);
230 static inline uint32_t oid_to_vid(uint64_t oid
)
232 return (oid
& ~VDI_BIT
) >> VDI_SPACE_SHIFT
;
235 static inline uint64_t vid_to_vdi_oid(uint32_t vid
)
237 return VDI_BIT
| ((uint64_t)vid
<< VDI_SPACE_SHIFT
);
240 static inline uint64_t vid_to_vmstate_oid(uint32_t vid
, uint32_t idx
)
242 return VMSTATE_BIT
| ((uint64_t)vid
<< VDI_SPACE_SHIFT
) | idx
;
245 static inline uint64_t vid_to_data_oid(uint32_t vid
, uint32_t idx
)
247 return ((uint64_t)vid
<< VDI_SPACE_SHIFT
) | idx
;
250 static inline bool is_snapshot(struct SheepdogInode
*inode
)
252 return !!inode
->snap_ctime
;
257 #define DPRINTF(fmt, args...) \
259 fprintf(stdout, "%s %d: " fmt, __func__, __LINE__, ##args); \
262 #define DPRINTF(fmt, args...)
265 typedef struct SheepdogAIOCB SheepdogAIOCB
;
267 typedef struct AIOReq
{
268 SheepdogAIOCB
*aiocb
;
269 unsigned int iov_offset
;
274 unsigned int data_len
;
278 QLIST_ENTRY(AIOReq
) aio_siblings
;
288 struct SheepdogAIOCB
{
289 BlockDriverAIOCB common
;
297 enum AIOCBState aiocb_type
;
299 Coroutine
*coroutine
;
300 void (*aio_done_func
)(SheepdogAIOCB
*);
307 typedef struct BDRVSheepdogState
{
308 BlockDriverState
*bs
;
312 uint32_t min_dirty_data_idx
;
313 uint32_t max_dirty_data_idx
;
315 char name
[SD_MAX_VDI_LEN
];
317 uint32_t cache_flags
;
318 bool discard_supported
;
328 uint32_t aioreq_seq_num
;
330 /* Every aio request must be linked to either of these queues. */
331 QLIST_HEAD(inflight_aio_head
, AIOReq
) inflight_aio_head
;
332 QLIST_HEAD(pending_aio_head
, AIOReq
) pending_aio_head
;
333 QLIST_HEAD(failed_aio_head
, AIOReq
) failed_aio_head
;
336 static const char * sd_strerror(int err
)
340 static const struct {
344 {SD_RES_SUCCESS
, "Success"},
345 {SD_RES_UNKNOWN
, "Unknown error"},
346 {SD_RES_NO_OBJ
, "No object found"},
347 {SD_RES_EIO
, "I/O error"},
348 {SD_RES_VDI_EXIST
, "VDI exists already"},
349 {SD_RES_INVALID_PARMS
, "Invalid parameters"},
350 {SD_RES_SYSTEM_ERROR
, "System error"},
351 {SD_RES_VDI_LOCKED
, "VDI is already locked"},
352 {SD_RES_NO_VDI
, "No vdi found"},
353 {SD_RES_NO_BASE_VDI
, "No base VDI found"},
354 {SD_RES_VDI_READ
, "Failed read the requested VDI"},
355 {SD_RES_VDI_WRITE
, "Failed to write the requested VDI"},
356 {SD_RES_BASE_VDI_READ
, "Failed to read the base VDI"},
357 {SD_RES_BASE_VDI_WRITE
, "Failed to write the base VDI"},
358 {SD_RES_NO_TAG
, "Failed to find the requested tag"},
359 {SD_RES_STARTUP
, "The system is still booting"},
360 {SD_RES_VDI_NOT_LOCKED
, "VDI isn't locked"},
361 {SD_RES_SHUTDOWN
, "The system is shutting down"},
362 {SD_RES_NO_MEM
, "Out of memory on the server"},
363 {SD_RES_FULL_VDI
, "We already have the maximum vdis"},
364 {SD_RES_VER_MISMATCH
, "Protocol version mismatch"},
365 {SD_RES_NO_SPACE
, "Server has no space for new objects"},
366 {SD_RES_WAIT_FOR_FORMAT
, "Sheepdog is waiting for a format operation"},
367 {SD_RES_WAIT_FOR_JOIN
, "Sheepdog is waiting for other nodes joining"},
368 {SD_RES_JOIN_FAILED
, "Target node had failed to join sheepdog"},
369 {SD_RES_HALT
, "Sheepdog is stopped serving IO request"},
370 {SD_RES_READONLY
, "Object is read-only"},
373 for (i
= 0; i
< ARRAY_SIZE(errors
); ++i
) {
374 if (errors
[i
].err
== err
) {
375 return errors
[i
].desc
;
379 return "Invalid error code";
383 * Sheepdog I/O handling:
385 * 1. In sd_co_rw_vector, we send the I/O requests to the server and
386 * link the requests to the inflight_list in the
387 * BDRVSheepdogState. The function exits without waiting for
388 * receiving the response.
390 * 2. We receive the response in aio_read_response, the fd handler to
391 * the sheepdog connection. If metadata update is needed, we send
392 * the write request to the vdi object in sd_write_done, the write
393 * completion function. We switch back to sd_co_readv/writev after
394 * all the requests belonging to the AIOCB are finished.
397 static inline AIOReq
*alloc_aio_req(BDRVSheepdogState
*s
, SheepdogAIOCB
*acb
,
398 uint64_t oid
, unsigned int data_len
,
399 uint64_t offset
, uint8_t flags
,
400 uint64_t base_oid
, unsigned int iov_offset
)
404 aio_req
= g_malloc(sizeof(*aio_req
));
405 aio_req
->aiocb
= acb
;
406 aio_req
->iov_offset
= iov_offset
;
408 aio_req
->base_oid
= base_oid
;
409 aio_req
->offset
= offset
;
410 aio_req
->data_len
= data_len
;
411 aio_req
->flags
= flags
;
412 aio_req
->id
= s
->aioreq_seq_num
++;
418 static inline void free_aio_req(BDRVSheepdogState
*s
, AIOReq
*aio_req
)
420 SheepdogAIOCB
*acb
= aio_req
->aiocb
;
422 acb
->cancelable
= false;
423 QLIST_REMOVE(aio_req
, aio_siblings
);
429 static void coroutine_fn
sd_finish_aiocb(SheepdogAIOCB
*acb
)
431 qemu_coroutine_enter(acb
->coroutine
, NULL
);
433 *acb
->finished
= true;
435 qemu_aio_release(acb
);
439 * Check whether the specified acb can be canceled
441 * We can cancel aio when any request belonging to the acb is:
442 * - Not processed by the sheepdog server.
443 * - Not linked to the inflight queue.
445 static bool sd_acb_cancelable(const SheepdogAIOCB
*acb
)
447 BDRVSheepdogState
*s
= acb
->common
.bs
->opaque
;
450 if (!acb
->cancelable
) {
454 QLIST_FOREACH(aioreq
, &s
->inflight_aio_head
, aio_siblings
) {
455 if (aioreq
->aiocb
== acb
) {
463 static void sd_aio_cancel(BlockDriverAIOCB
*blockacb
)
465 SheepdogAIOCB
*acb
= (SheepdogAIOCB
*)blockacb
;
466 BDRVSheepdogState
*s
= acb
->common
.bs
->opaque
;
467 AIOReq
*aioreq
, *next
;
468 bool finished
= false;
470 acb
->finished
= &finished
;
472 if (sd_acb_cancelable(acb
)) {
473 /* Remove outstanding requests from pending and failed queues. */
474 QLIST_FOREACH_SAFE(aioreq
, &s
->pending_aio_head
, aio_siblings
,
476 if (aioreq
->aiocb
== acb
) {
477 free_aio_req(s
, aioreq
);
480 QLIST_FOREACH_SAFE(aioreq
, &s
->failed_aio_head
, aio_siblings
,
482 if (aioreq
->aiocb
== acb
) {
483 free_aio_req(s
, aioreq
);
487 assert(acb
->nr_pending
== 0);
488 sd_finish_aiocb(acb
);
495 static const AIOCBInfo sd_aiocb_info
= {
496 .aiocb_size
= sizeof(SheepdogAIOCB
),
497 .cancel
= sd_aio_cancel
,
500 static SheepdogAIOCB
*sd_aio_setup(BlockDriverState
*bs
, QEMUIOVector
*qiov
,
501 int64_t sector_num
, int nb_sectors
)
505 acb
= qemu_aio_get(&sd_aiocb_info
, bs
, NULL
, NULL
);
509 acb
->sector_num
= sector_num
;
510 acb
->nb_sectors
= nb_sectors
;
512 acb
->aio_done_func
= NULL
;
513 acb
->cancelable
= true;
514 acb
->finished
= NULL
;
515 acb
->coroutine
= qemu_coroutine_self();
521 static int connect_to_sdog(BDRVSheepdogState
*s
)
527 fd
= unix_connect(s
->host_spec
, &err
);
529 fd
= inet_connect(s
->host_spec
, &err
);
532 int ret
= socket_set_nodelay(fd
);
534 error_report("%s", strerror(errno
));
540 qerror_report_err(err
);
543 qemu_set_nonblock(fd
);
549 static coroutine_fn
int send_co_req(int sockfd
, SheepdogReq
*hdr
, void *data
,
554 ret
= qemu_co_send(sockfd
, hdr
, sizeof(*hdr
));
555 if (ret
!= sizeof(*hdr
)) {
556 error_report("failed to send a req, %s", strerror(errno
));
560 ret
= qemu_co_send(sockfd
, data
, *wlen
);
562 error_report("failed to send a req, %s", strerror(errno
));
568 static void restart_co_req(void *opaque
)
570 Coroutine
*co
= opaque
;
572 qemu_coroutine_enter(co
, NULL
);
575 typedef struct SheepdogReqCo
{
585 static coroutine_fn
void do_co_req(void *opaque
)
589 SheepdogReqCo
*srco
= opaque
;
590 int sockfd
= srco
->sockfd
;
591 SheepdogReq
*hdr
= srco
->hdr
;
592 void *data
= srco
->data
;
593 unsigned int *wlen
= srco
->wlen
;
594 unsigned int *rlen
= srco
->rlen
;
596 co
= qemu_coroutine_self();
597 qemu_aio_set_fd_handler(sockfd
, NULL
, restart_co_req
, co
);
599 ret
= send_co_req(sockfd
, hdr
, data
, wlen
);
604 qemu_aio_set_fd_handler(sockfd
, restart_co_req
, NULL
, co
);
606 ret
= qemu_co_recv(sockfd
, hdr
, sizeof(*hdr
));
607 if (ret
!= sizeof(*hdr
)) {
608 error_report("failed to get a rsp, %s", strerror(errno
));
613 if (*rlen
> hdr
->data_length
) {
614 *rlen
= hdr
->data_length
;
618 ret
= qemu_co_recv(sockfd
, data
, *rlen
);
620 error_report("failed to get the data, %s", strerror(errno
));
627 /* there is at most one request for this sockfd, so it is safe to
628 * set each handler to NULL. */
629 qemu_aio_set_fd_handler(sockfd
, NULL
, NULL
, NULL
);
632 srco
->finished
= true;
635 static int do_req(int sockfd
, SheepdogReq
*hdr
, void *data
,
636 unsigned int *wlen
, unsigned int *rlen
)
639 SheepdogReqCo srco
= {
649 if (qemu_in_coroutine()) {
652 co
= qemu_coroutine_create(do_co_req
);
653 qemu_coroutine_enter(co
, &srco
);
654 while (!srco
.finished
) {
662 static void coroutine_fn
add_aio_request(BDRVSheepdogState
*s
, AIOReq
*aio_req
,
663 struct iovec
*iov
, int niov
, bool create
,
664 enum AIOCBState aiocb_type
);
665 static void coroutine_fn
resend_aioreq(BDRVSheepdogState
*s
, AIOReq
*aio_req
);
666 static int reload_inode(BDRVSheepdogState
*s
, uint32_t snapid
, const char *tag
);
667 static int get_sheep_fd(BDRVSheepdogState
*s
);
668 static void co_write_request(void *opaque
);
670 static AIOReq
*find_pending_req(BDRVSheepdogState
*s
, uint64_t oid
)
674 QLIST_FOREACH(aio_req
, &s
->pending_aio_head
, aio_siblings
) {
675 if (aio_req
->oid
== oid
) {
684 * This function searchs pending requests to the object `oid', and
687 static void coroutine_fn
send_pending_req(BDRVSheepdogState
*s
, uint64_t oid
)
692 while ((aio_req
= find_pending_req(s
, oid
)) != NULL
) {
693 acb
= aio_req
->aiocb
;
694 /* move aio_req from pending list to inflight one */
695 QLIST_REMOVE(aio_req
, aio_siblings
);
696 QLIST_INSERT_HEAD(&s
->inflight_aio_head
, aio_req
, aio_siblings
);
697 add_aio_request(s
, aio_req
, acb
->qiov
->iov
, acb
->qiov
->niov
, false,
702 static coroutine_fn
void reconnect_to_sdog(void *opaque
)
704 BDRVSheepdogState
*s
= opaque
;
705 AIOReq
*aio_req
, *next
;
707 qemu_aio_set_fd_handler(s
->fd
, NULL
, NULL
, NULL
);
711 /* Wait for outstanding write requests to be completed. */
712 while (s
->co_send
!= NULL
) {
713 co_write_request(opaque
);
716 /* Try to reconnect the sheepdog server every one second. */
718 s
->fd
= get_sheep_fd(s
);
720 DPRINTF("Wait for connection to be established\n");
721 co_aio_sleep_ns(bdrv_get_aio_context(s
->bs
), QEMU_CLOCK_REALTIME
,
727 * Now we have to resend all the request in the inflight queue. However,
728 * resend_aioreq() can yield and newly created requests can be added to the
729 * inflight queue before the coroutine is resumed. To avoid mixing them, we
730 * have to move all the inflight requests to the failed queue before
731 * resend_aioreq() is called.
733 QLIST_FOREACH_SAFE(aio_req
, &s
->inflight_aio_head
, aio_siblings
, next
) {
734 QLIST_REMOVE(aio_req
, aio_siblings
);
735 QLIST_INSERT_HEAD(&s
->failed_aio_head
, aio_req
, aio_siblings
);
738 /* Resend all the failed aio requests. */
739 while (!QLIST_EMPTY(&s
->failed_aio_head
)) {
740 aio_req
= QLIST_FIRST(&s
->failed_aio_head
);
741 QLIST_REMOVE(aio_req
, aio_siblings
);
742 QLIST_INSERT_HEAD(&s
->inflight_aio_head
, aio_req
, aio_siblings
);
743 resend_aioreq(s
, aio_req
);
748 * Receive responses of the I/O requests.
750 * This function is registered as a fd handler, and called from the
751 * main loop when s->fd is ready for reading responses.
753 static void coroutine_fn
aio_read_response(void *opaque
)
756 BDRVSheepdogState
*s
= opaque
;
759 AIOReq
*aio_req
= NULL
;
764 ret
= qemu_co_recv(fd
, &rsp
, sizeof(rsp
));
765 if (ret
!= sizeof(rsp
)) {
766 error_report("failed to get the header, %s", strerror(errno
));
770 /* find the right aio_req from the inflight aio list */
771 QLIST_FOREACH(aio_req
, &s
->inflight_aio_head
, aio_siblings
) {
772 if (aio_req
->id
== rsp
.id
) {
777 error_report("cannot find aio_req %x", rsp
.id
);
781 acb
= aio_req
->aiocb
;
783 switch (acb
->aiocb_type
) {
784 case AIOCB_WRITE_UDATA
:
785 /* this coroutine context is no longer suitable for co_recv
786 * because we may send data to update vdi objects */
788 if (!is_data_obj(aio_req
->oid
)) {
791 idx
= data_oid_to_idx(aio_req
->oid
);
793 if (s
->inode
.data_vdi_id
[idx
] != s
->inode
.vdi_id
) {
795 * If the object is newly created one, we need to update
796 * the vdi object (metadata object). min_dirty_data_idx
797 * and max_dirty_data_idx are changed to include updated
798 * index between them.
800 if (rsp
.result
== SD_RES_SUCCESS
) {
801 s
->inode
.data_vdi_id
[idx
] = s
->inode
.vdi_id
;
802 s
->max_dirty_data_idx
= MAX(idx
, s
->max_dirty_data_idx
);
803 s
->min_dirty_data_idx
= MIN(idx
, s
->min_dirty_data_idx
);
806 * Some requests may be blocked because simultaneous
807 * create requests are not allowed, so we search the
808 * pending requests here.
810 send_pending_req(s
, aio_req
->oid
);
813 case AIOCB_READ_UDATA
:
814 ret
= qemu_co_recvv(fd
, acb
->qiov
->iov
, acb
->qiov
->niov
,
815 aio_req
->iov_offset
, rsp
.data_length
);
816 if (ret
!= rsp
.data_length
) {
817 error_report("failed to get the data, %s", strerror(errno
));
821 case AIOCB_FLUSH_CACHE
:
822 if (rsp
.result
== SD_RES_INVALID_PARMS
) {
823 DPRINTF("disable cache since the server doesn't support it\n");
824 s
->cache_flags
= SD_FLAG_CMD_DIRECT
;
825 rsp
.result
= SD_RES_SUCCESS
;
828 case AIOCB_DISCARD_OBJ
:
829 switch (rsp
.result
) {
830 case SD_RES_INVALID_PARMS
:
831 error_report("sheep(%s) doesn't support discard command",
833 rsp
.result
= SD_RES_SUCCESS
;
834 s
->discard_supported
= false;
837 idx
= data_oid_to_idx(aio_req
->oid
);
838 s
->inode
.data_vdi_id
[idx
] = 0;
845 switch (rsp
.result
) {
848 case SD_RES_READONLY
:
849 if (s
->inode
.vdi_id
== oid_to_vid(aio_req
->oid
)) {
850 ret
= reload_inode(s
, 0, "");
855 if (is_data_obj(aio_req
->oid
)) {
856 aio_req
->oid
= vid_to_data_oid(s
->inode
.vdi_id
,
857 data_oid_to_idx(aio_req
->oid
));
859 aio_req
->oid
= vid_to_vdi_oid(s
->inode
.vdi_id
);
861 resend_aioreq(s
, aio_req
);
865 error_report("%s", sd_strerror(rsp
.result
));
869 free_aio_req(s
, aio_req
);
870 if (!acb
->nr_pending
) {
872 * We've finished all requests which belong to the AIOCB, so
873 * we can switch back to sd_co_readv/writev now.
875 acb
->aio_done_func(acb
);
882 reconnect_to_sdog(opaque
);
885 static void co_read_response(void *opaque
)
887 BDRVSheepdogState
*s
= opaque
;
890 s
->co_recv
= qemu_coroutine_create(aio_read_response
);
893 qemu_coroutine_enter(s
->co_recv
, opaque
);
896 static void co_write_request(void *opaque
)
898 BDRVSheepdogState
*s
= opaque
;
900 qemu_coroutine_enter(s
->co_send
, NULL
);
904 * Return a socket discriptor to read/write objects.
906 * We cannot use this discriptor for other operations because
907 * the block driver may be on waiting response from the server.
909 static int get_sheep_fd(BDRVSheepdogState
*s
)
913 fd
= connect_to_sdog(s
);
918 qemu_aio_set_fd_handler(fd
, co_read_response
, NULL
, s
);
922 static int sd_parse_uri(BDRVSheepdogState
*s
, const char *filename
,
923 char *vdi
, uint32_t *snapid
, char *tag
)
926 QueryParams
*qp
= NULL
;
929 uri
= uri_parse(filename
);
935 if (!strcmp(uri
->scheme
, "sheepdog")) {
937 } else if (!strcmp(uri
->scheme
, "sheepdog+tcp")) {
939 } else if (!strcmp(uri
->scheme
, "sheepdog+unix")) {
946 if (uri
->path
== NULL
|| !strcmp(uri
->path
, "/")) {
950 pstrcpy(vdi
, SD_MAX_VDI_LEN
, uri
->path
+ 1);
952 qp
= query_params_parse(uri
->query
);
953 if (qp
->n
> 1 || (s
->is_unix
&& !qp
->n
) || (!s
->is_unix
&& qp
->n
)) {
959 /* sheepdog+unix:///vdiname?socket=path */
960 if (uri
->server
|| uri
->port
|| strcmp(qp
->p
[0].name
, "socket")) {
964 s
->host_spec
= g_strdup(qp
->p
[0].value
);
966 /* sheepdog[+tcp]://[host:port]/vdiname */
967 s
->host_spec
= g_strdup_printf("%s:%d", uri
->server
?: SD_DEFAULT_ADDR
,
968 uri
->port
?: SD_DEFAULT_PORT
);
973 *snapid
= strtoul(uri
->fragment
, NULL
, 10);
975 pstrcpy(tag
, SD_MAX_VDI_TAG_LEN
, uri
->fragment
);
978 *snapid
= CURRENT_VDI_ID
; /* search current vdi */
983 query_params_free(qp
);
990 * Parse a filename (old syntax)
992 * filename must be one of the following formats:
994 * 2. [vdiname]:[snapid]
996 * 4. [hostname]:[port]:[vdiname]
997 * 5. [hostname]:[port]:[vdiname]:[snapid]
998 * 6. [hostname]:[port]:[vdiname]:[tag]
1000 * You can boot from the snapshot images by specifying `snapid` or
1003 * You can run VMs outside the Sheepdog cluster by specifying
1004 * `hostname' and `port' (experimental).
1006 static int parse_vdiname(BDRVSheepdogState
*s
, const char *filename
,
1007 char *vdi
, uint32_t *snapid
, char *tag
)
1010 const char *host_spec
, *vdi_spec
;
1013 strstart(filename
, "sheepdog:", (const char **)&filename
);
1014 p
= q
= g_strdup(filename
);
1016 /* count the number of separators */
1026 /* use the first two tokens as host_spec. */
1039 p
= strchr(vdi_spec
, ':');
1044 uri
= g_strdup_printf("sheepdog://%s/%s", host_spec
, vdi_spec
);
1046 ret
= sd_parse_uri(s
, uri
, vdi
, snapid
, tag
);
1054 static int find_vdi_name(BDRVSheepdogState
*s
, const char *filename
,
1055 uint32_t snapid
, const char *tag
, uint32_t *vid
,
1060 SheepdogVdiRsp
*rsp
= (SheepdogVdiRsp
*)&hdr
;
1061 unsigned int wlen
, rlen
= 0;
1062 char buf
[SD_MAX_VDI_LEN
+ SD_MAX_VDI_TAG_LEN
];
1064 fd
= connect_to_sdog(s
);
1069 /* This pair of strncpy calls ensures that the buffer is zero-filled,
1070 * which is desirable since we'll soon be sending those bytes, and
1071 * don't want the send_req to read uninitialized data.
1073 strncpy(buf
, filename
, SD_MAX_VDI_LEN
);
1074 strncpy(buf
+ SD_MAX_VDI_LEN
, tag
, SD_MAX_VDI_TAG_LEN
);
1076 memset(&hdr
, 0, sizeof(hdr
));
1078 hdr
.opcode
= SD_OP_LOCK_VDI
;
1080 hdr
.opcode
= SD_OP_GET_VDI_INFO
;
1082 wlen
= SD_MAX_VDI_LEN
+ SD_MAX_VDI_TAG_LEN
;
1083 hdr
.proto_ver
= SD_PROTO_VER
;
1084 hdr
.data_length
= wlen
;
1085 hdr
.snapid
= snapid
;
1086 hdr
.flags
= SD_FLAG_CMD_WRITE
;
1088 ret
= do_req(fd
, (SheepdogReq
*)&hdr
, buf
, &wlen
, &rlen
);
1093 if (rsp
->result
!= SD_RES_SUCCESS
) {
1094 error_report("cannot get vdi info, %s, %s %d %s",
1095 sd_strerror(rsp
->result
), filename
, snapid
, tag
);
1096 if (rsp
->result
== SD_RES_NO_VDI
) {
1111 static void coroutine_fn
add_aio_request(BDRVSheepdogState
*s
, AIOReq
*aio_req
,
1112 struct iovec
*iov
, int niov
, bool create
,
1113 enum AIOCBState aiocb_type
)
1115 int nr_copies
= s
->inode
.nr_copies
;
1117 unsigned int wlen
= 0;
1119 uint64_t oid
= aio_req
->oid
;
1120 unsigned int datalen
= aio_req
->data_len
;
1121 uint64_t offset
= aio_req
->offset
;
1122 uint8_t flags
= aio_req
->flags
;
1123 uint64_t old_oid
= aio_req
->base_oid
;
1126 error_report("bug");
1129 memset(&hdr
, 0, sizeof(hdr
));
1131 switch (aiocb_type
) {
1132 case AIOCB_FLUSH_CACHE
:
1133 hdr
.opcode
= SD_OP_FLUSH_VDI
;
1135 case AIOCB_READ_UDATA
:
1136 hdr
.opcode
= SD_OP_READ_OBJ
;
1139 case AIOCB_WRITE_UDATA
:
1141 hdr
.opcode
= SD_OP_CREATE_AND_WRITE_OBJ
;
1143 hdr
.opcode
= SD_OP_WRITE_OBJ
;
1146 hdr
.flags
= SD_FLAG_CMD_WRITE
| flags
;
1148 case AIOCB_DISCARD_OBJ
:
1149 hdr
.opcode
= SD_OP_DISCARD_OBJ
;
1153 if (s
->cache_flags
) {
1154 hdr
.flags
|= s
->cache_flags
;
1158 hdr
.cow_oid
= old_oid
;
1159 hdr
.copies
= s
->inode
.nr_copies
;
1161 hdr
.data_length
= datalen
;
1162 hdr
.offset
= offset
;
1164 hdr
.id
= aio_req
->id
;
1166 qemu_co_mutex_lock(&s
->lock
);
1167 s
->co_send
= qemu_coroutine_self();
1168 qemu_aio_set_fd_handler(s
->fd
, co_read_response
, co_write_request
, s
);
1169 socket_set_cork(s
->fd
, 1);
1172 ret
= qemu_co_send(s
->fd
, &hdr
, sizeof(hdr
));
1173 if (ret
!= sizeof(hdr
)) {
1174 error_report("failed to send a req, %s", strerror(errno
));
1179 ret
= qemu_co_sendv(s
->fd
, iov
, niov
, aio_req
->iov_offset
, wlen
);
1181 error_report("failed to send a data, %s", strerror(errno
));
1185 socket_set_cork(s
->fd
, 0);
1186 qemu_aio_set_fd_handler(s
->fd
, co_read_response
, NULL
, s
);
1188 qemu_co_mutex_unlock(&s
->lock
);
1191 static int read_write_object(int fd
, char *buf
, uint64_t oid
, uint8_t copies
,
1192 unsigned int datalen
, uint64_t offset
,
1193 bool write
, bool create
, uint32_t cache_flags
)
1196 SheepdogObjRsp
*rsp
= (SheepdogObjRsp
*)&hdr
;
1197 unsigned int wlen
, rlen
;
1200 memset(&hdr
, 0, sizeof(hdr
));
1205 hdr
.flags
= SD_FLAG_CMD_WRITE
;
1207 hdr
.opcode
= SD_OP_CREATE_AND_WRITE_OBJ
;
1209 hdr
.opcode
= SD_OP_WRITE_OBJ
;
1214 hdr
.opcode
= SD_OP_READ_OBJ
;
1217 hdr
.flags
|= cache_flags
;
1220 hdr
.data_length
= datalen
;
1221 hdr
.offset
= offset
;
1222 hdr
.copies
= copies
;
1224 ret
= do_req(fd
, (SheepdogReq
*)&hdr
, buf
, &wlen
, &rlen
);
1226 error_report("failed to send a request to the sheep");
1230 switch (rsp
->result
) {
1231 case SD_RES_SUCCESS
:
1234 error_report("%s", sd_strerror(rsp
->result
));
1239 static int read_object(int fd
, char *buf
, uint64_t oid
, uint8_t copies
,
1240 unsigned int datalen
, uint64_t offset
,
1241 uint32_t cache_flags
)
1243 return read_write_object(fd
, buf
, oid
, copies
, datalen
, offset
, false,
1244 false, cache_flags
);
1247 static int write_object(int fd
, char *buf
, uint64_t oid
, uint8_t copies
,
1248 unsigned int datalen
, uint64_t offset
, bool create
,
1249 uint32_t cache_flags
)
1251 return read_write_object(fd
, buf
, oid
, copies
, datalen
, offset
, true,
1252 create
, cache_flags
);
1255 /* update inode with the latest state */
1256 static int reload_inode(BDRVSheepdogState
*s
, uint32_t snapid
, const char *tag
)
1258 SheepdogInode
*inode
;
1262 fd
= connect_to_sdog(s
);
1267 inode
= g_malloc(sizeof(s
->inode
));
1269 ret
= find_vdi_name(s
, s
->name
, snapid
, tag
, &vid
, false);
1274 ret
= read_object(fd
, (char *)inode
, vid_to_vdi_oid(vid
),
1275 s
->inode
.nr_copies
, sizeof(*inode
), 0, s
->cache_flags
);
1280 if (inode
->vdi_id
!= s
->inode
.vdi_id
) {
1281 memcpy(&s
->inode
, inode
, sizeof(s
->inode
));
1291 /* Return true if the specified request is linked to the pending list. */
1292 static bool check_simultaneous_create(BDRVSheepdogState
*s
, AIOReq
*aio_req
)
1295 QLIST_FOREACH(areq
, &s
->inflight_aio_head
, aio_siblings
) {
1296 if (areq
!= aio_req
&& areq
->oid
== aio_req
->oid
) {
1298 * Sheepdog cannot handle simultaneous create requests to the same
1299 * object, so we cannot send the request until the previous request
1302 DPRINTF("simultaneous create to %" PRIx64
"\n", aio_req
->oid
);
1304 aio_req
->base_oid
= 0;
1305 QLIST_REMOVE(aio_req
, aio_siblings
);
1306 QLIST_INSERT_HEAD(&s
->pending_aio_head
, aio_req
, aio_siblings
);
1314 static void coroutine_fn
resend_aioreq(BDRVSheepdogState
*s
, AIOReq
*aio_req
)
1316 SheepdogAIOCB
*acb
= aio_req
->aiocb
;
1317 bool create
= false;
1319 /* check whether this request becomes a CoW one */
1320 if (acb
->aiocb_type
== AIOCB_WRITE_UDATA
&& is_data_obj(aio_req
->oid
)) {
1321 int idx
= data_oid_to_idx(aio_req
->oid
);
1323 if (is_data_obj_writable(&s
->inode
, idx
)) {
1327 if (check_simultaneous_create(s
, aio_req
)) {
1331 if (s
->inode
.data_vdi_id
[idx
]) {
1332 aio_req
->base_oid
= vid_to_data_oid(s
->inode
.data_vdi_id
[idx
], idx
);
1333 aio_req
->flags
|= SD_FLAG_CMD_COW
;
1338 if (is_data_obj(aio_req
->oid
)) {
1339 add_aio_request(s
, aio_req
, acb
->qiov
->iov
, acb
->qiov
->niov
, create
,
1343 iov
.iov_base
= &s
->inode
;
1344 iov
.iov_len
= sizeof(s
->inode
);
1345 add_aio_request(s
, aio_req
, &iov
, 1, false, AIOCB_WRITE_UDATA
);
1349 /* TODO Convert to fine grained options */
1350 static QemuOptsList runtime_opts
= {
1352 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
1356 .type
= QEMU_OPT_STRING
,
1357 .help
= "URL to the sheepdog image",
1359 { /* end of list */ }
1363 static int sd_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
1368 BDRVSheepdogState
*s
= bs
->opaque
;
1369 char vdi
[SD_MAX_VDI_LEN
], tag
[SD_MAX_VDI_TAG_LEN
];
1373 Error
*local_err
= NULL
;
1374 const char *filename
;
1378 opts
= qemu_opts_create_nofail(&runtime_opts
);
1379 qemu_opts_absorb_qdict(opts
, options
, &local_err
);
1380 if (error_is_set(&local_err
)) {
1381 qerror_report_err(local_err
);
1382 error_free(local_err
);
1387 filename
= qemu_opt_get(opts
, "filename");
1389 QLIST_INIT(&s
->inflight_aio_head
);
1390 QLIST_INIT(&s
->pending_aio_head
);
1391 QLIST_INIT(&s
->failed_aio_head
);
1394 memset(vdi
, 0, sizeof(vdi
));
1395 memset(tag
, 0, sizeof(tag
));
1397 if (strstr(filename
, "://")) {
1398 ret
= sd_parse_uri(s
, filename
, vdi
, &snapid
, tag
);
1400 ret
= parse_vdiname(s
, filename
, vdi
, &snapid
, tag
);
1405 s
->fd
= get_sheep_fd(s
);
1411 ret
= find_vdi_name(s
, vdi
, snapid
, tag
, &vid
, true);
1417 * QEMU block layer emulates writethrough cache as 'writeback + flush', so
1418 * we always set SD_FLAG_CMD_CACHE (writeback cache) as default.
1420 s
->cache_flags
= SD_FLAG_CMD_CACHE
;
1421 if (flags
& BDRV_O_NOCACHE
) {
1422 s
->cache_flags
= SD_FLAG_CMD_DIRECT
;
1424 s
->discard_supported
= true;
1426 if (snapid
|| tag
[0] != '\0') {
1427 DPRINTF("%" PRIx32
" snapshot inode was open.\n", vid
);
1428 s
->is_snapshot
= true;
1431 fd
= connect_to_sdog(s
);
1437 buf
= g_malloc(SD_INODE_SIZE
);
1438 ret
= read_object(fd
, buf
, vid_to_vdi_oid(vid
), 0, SD_INODE_SIZE
, 0,
1447 memcpy(&s
->inode
, buf
, sizeof(s
->inode
));
1448 s
->min_dirty_data_idx
= UINT32_MAX
;
1449 s
->max_dirty_data_idx
= 0;
1451 bs
->total_sectors
= s
->inode
.vdi_size
/ BDRV_SECTOR_SIZE
;
1452 pstrcpy(s
->name
, sizeof(s
->name
), vdi
);
1453 qemu_co_mutex_init(&s
->lock
);
1454 qemu_opts_del(opts
);
1458 qemu_aio_set_fd_handler(s
->fd
, NULL
, NULL
, NULL
);
1462 qemu_opts_del(opts
);
1467 static int do_sd_create(BDRVSheepdogState
*s
, char *filename
, int64_t vdi_size
,
1468 uint32_t base_vid
, uint32_t *vdi_id
, int snapshot
,
1469 uint8_t copy_policy
)
1472 SheepdogVdiRsp
*rsp
= (SheepdogVdiRsp
*)&hdr
;
1474 unsigned int wlen
, rlen
= 0;
1475 char buf
[SD_MAX_VDI_LEN
];
1477 fd
= connect_to_sdog(s
);
1482 /* FIXME: would it be better to fail (e.g., return -EIO) when filename
1483 * does not fit in buf? For now, just truncate and avoid buffer overrun.
1485 memset(buf
, 0, sizeof(buf
));
1486 pstrcpy(buf
, sizeof(buf
), filename
);
1488 memset(&hdr
, 0, sizeof(hdr
));
1489 hdr
.opcode
= SD_OP_NEW_VDI
;
1490 hdr
.vdi_id
= base_vid
;
1492 wlen
= SD_MAX_VDI_LEN
;
1494 hdr
.flags
= SD_FLAG_CMD_WRITE
;
1495 hdr
.snapid
= snapshot
;
1497 hdr
.data_length
= wlen
;
1498 hdr
.vdi_size
= vdi_size
;
1499 hdr
.copy_policy
= copy_policy
;
1501 ret
= do_req(fd
, (SheepdogReq
*)&hdr
, buf
, &wlen
, &rlen
);
1509 if (rsp
->result
!= SD_RES_SUCCESS
) {
1510 error_report("%s, %s", sd_strerror(rsp
->result
), filename
);
1515 *vdi_id
= rsp
->vdi_id
;
1521 static int sd_prealloc(const char *filename
)
1523 BlockDriverState
*bs
= NULL
;
1524 uint32_t idx
, max_idx
;
1526 void *buf
= g_malloc0(SD_DATA_OBJ_SIZE
);
1527 Error
*local_err
= NULL
;
1530 ret
= bdrv_file_open(&bs
, filename
, NULL
, BDRV_O_RDWR
, &local_err
);
1532 qerror_report_err(local_err
);
1533 error_free(local_err
);
1537 vdi_size
= bdrv_getlength(bs
);
1542 max_idx
= DIV_ROUND_UP(vdi_size
, SD_DATA_OBJ_SIZE
);
1544 for (idx
= 0; idx
< max_idx
; idx
++) {
1546 * The created image can be a cloned image, so we need to read
1547 * a data from the source image.
1549 ret
= bdrv_pread(bs
, idx
* SD_DATA_OBJ_SIZE
, buf
, SD_DATA_OBJ_SIZE
);
1553 ret
= bdrv_pwrite(bs
, idx
* SD_DATA_OBJ_SIZE
, buf
, SD_DATA_OBJ_SIZE
);
1567 static int sd_create(const char *filename
, QEMUOptionParameter
*options
,
1571 uint32_t vid
= 0, base_vid
= 0;
1572 int64_t vdi_size
= 0;
1573 char *backing_file
= NULL
;
1574 BDRVSheepdogState
*s
;
1575 char vdi
[SD_MAX_VDI_LEN
], tag
[SD_MAX_VDI_TAG_LEN
];
1577 bool prealloc
= false;
1578 Error
*local_err
= NULL
;
1580 s
= g_malloc0(sizeof(BDRVSheepdogState
));
1582 memset(vdi
, 0, sizeof(vdi
));
1583 memset(tag
, 0, sizeof(tag
));
1584 if (strstr(filename
, "://")) {
1585 ret
= sd_parse_uri(s
, filename
, vdi
, &snapid
, tag
);
1587 ret
= parse_vdiname(s
, filename
, vdi
, &snapid
, tag
);
1593 while (options
&& options
->name
) {
1594 if (!strcmp(options
->name
, BLOCK_OPT_SIZE
)) {
1595 vdi_size
= options
->value
.n
;
1596 } else if (!strcmp(options
->name
, BLOCK_OPT_BACKING_FILE
)) {
1597 backing_file
= options
->value
.s
;
1598 } else if (!strcmp(options
->name
, BLOCK_OPT_PREALLOC
)) {
1599 if (!options
->value
.s
|| !strcmp(options
->value
.s
, "off")) {
1601 } else if (!strcmp(options
->value
.s
, "full")) {
1604 error_report("Invalid preallocation mode: '%s'",
1613 if (vdi_size
> SD_MAX_VDI_SIZE
) {
1614 error_report("too big image size");
1620 BlockDriverState
*bs
;
1621 BDRVSheepdogState
*s
;
1624 /* Currently, only Sheepdog backing image is supported. */
1625 drv
= bdrv_find_protocol(backing_file
, true);
1626 if (!drv
|| strcmp(drv
->protocol_name
, "sheepdog") != 0) {
1627 error_report("backing_file must be a sheepdog image");
1632 ret
= bdrv_file_open(&bs
, backing_file
, NULL
, 0, &local_err
);
1634 qerror_report_err(local_err
);
1635 error_free(local_err
);
1641 if (!is_snapshot(&s
->inode
)) {
1642 error_report("cannot clone from a non snapshot vdi");
1648 base_vid
= s
->inode
.vdi_id
;
1652 /* TODO: allow users to specify copy number */
1653 ret
= do_sd_create(s
, vdi
, vdi_size
, base_vid
, &vid
, 0, 0);
1654 if (!prealloc
|| ret
) {
1658 ret
= sd_prealloc(filename
);
1664 static void sd_close(BlockDriverState
*bs
)
1666 BDRVSheepdogState
*s
= bs
->opaque
;
1668 SheepdogVdiRsp
*rsp
= (SheepdogVdiRsp
*)&hdr
;
1669 unsigned int wlen
, rlen
= 0;
1672 DPRINTF("%s\n", s
->name
);
1674 fd
= connect_to_sdog(s
);
1679 memset(&hdr
, 0, sizeof(hdr
));
1681 hdr
.opcode
= SD_OP_RELEASE_VDI
;
1682 hdr
.vdi_id
= s
->inode
.vdi_id
;
1683 wlen
= strlen(s
->name
) + 1;
1684 hdr
.data_length
= wlen
;
1685 hdr
.flags
= SD_FLAG_CMD_WRITE
;
1687 ret
= do_req(fd
, (SheepdogReq
*)&hdr
, s
->name
, &wlen
, &rlen
);
1691 if (!ret
&& rsp
->result
!= SD_RES_SUCCESS
&&
1692 rsp
->result
!= SD_RES_VDI_NOT_LOCKED
) {
1693 error_report("%s, %s", sd_strerror(rsp
->result
), s
->name
);
1696 qemu_aio_set_fd_handler(s
->fd
, NULL
, NULL
, NULL
);
1698 g_free(s
->host_spec
);
1701 static int64_t sd_getlength(BlockDriverState
*bs
)
1703 BDRVSheepdogState
*s
= bs
->opaque
;
1705 return s
->inode
.vdi_size
;
1708 static int sd_truncate(BlockDriverState
*bs
, int64_t offset
)
1710 BDRVSheepdogState
*s
= bs
->opaque
;
1712 unsigned int datalen
;
1714 if (offset
< s
->inode
.vdi_size
) {
1715 error_report("shrinking is not supported");
1717 } else if (offset
> SD_MAX_VDI_SIZE
) {
1718 error_report("too big image size");
1722 fd
= connect_to_sdog(s
);
1727 /* we don't need to update entire object */
1728 datalen
= SD_INODE_SIZE
- sizeof(s
->inode
.data_vdi_id
);
1729 s
->inode
.vdi_size
= offset
;
1730 ret
= write_object(fd
, (char *)&s
->inode
, vid_to_vdi_oid(s
->inode
.vdi_id
),
1731 s
->inode
.nr_copies
, datalen
, 0, false, s
->cache_flags
);
1735 error_report("failed to update an inode.");
1742 * This function is called after writing data objects. If we need to
1743 * update metadata, this sends a write request to the vdi object.
1744 * Otherwise, this switches back to sd_co_readv/writev.
1746 static void coroutine_fn
sd_write_done(SheepdogAIOCB
*acb
)
1748 BDRVSheepdogState
*s
= acb
->common
.bs
->opaque
;
1751 uint32_t offset
, data_len
, mn
, mx
;
1753 mn
= s
->min_dirty_data_idx
;
1754 mx
= s
->max_dirty_data_idx
;
1756 /* we need to update the vdi object. */
1757 offset
= sizeof(s
->inode
) - sizeof(s
->inode
.data_vdi_id
) +
1758 mn
* sizeof(s
->inode
.data_vdi_id
[0]);
1759 data_len
= (mx
- mn
+ 1) * sizeof(s
->inode
.data_vdi_id
[0]);
1761 s
->min_dirty_data_idx
= UINT32_MAX
;
1762 s
->max_dirty_data_idx
= 0;
1764 iov
.iov_base
= &s
->inode
;
1765 iov
.iov_len
= sizeof(s
->inode
);
1766 aio_req
= alloc_aio_req(s
, acb
, vid_to_vdi_oid(s
->inode
.vdi_id
),
1767 data_len
, offset
, 0, 0, offset
);
1768 QLIST_INSERT_HEAD(&s
->inflight_aio_head
, aio_req
, aio_siblings
);
1769 add_aio_request(s
, aio_req
, &iov
, 1, false, AIOCB_WRITE_UDATA
);
1771 acb
->aio_done_func
= sd_finish_aiocb
;
1772 acb
->aiocb_type
= AIOCB_WRITE_UDATA
;
1776 sd_finish_aiocb(acb
);
1779 /* Delete current working VDI on the snapshot chain */
1780 static bool sd_delete(BDRVSheepdogState
*s
)
1782 unsigned int wlen
= SD_MAX_VDI_LEN
, rlen
= 0;
1783 SheepdogVdiReq hdr
= {
1784 .opcode
= SD_OP_DEL_VDI
,
1785 .vdi_id
= s
->inode
.vdi_id
,
1786 .data_length
= wlen
,
1787 .flags
= SD_FLAG_CMD_WRITE
,
1789 SheepdogVdiRsp
*rsp
= (SheepdogVdiRsp
*)&hdr
;
1792 fd
= connect_to_sdog(s
);
1797 ret
= do_req(fd
, (SheepdogReq
*)&hdr
, s
->name
, &wlen
, &rlen
);
1802 switch (rsp
->result
) {
1804 error_report("%s was already deleted", s
->name
);
1806 case SD_RES_SUCCESS
:
1809 error_report("%s, %s", sd_strerror(rsp
->result
), s
->name
);
1817 * Create a writable VDI from a snapshot
1819 static int sd_create_branch(BDRVSheepdogState
*s
)
1826 DPRINTF("%" PRIx32
" is snapshot.\n", s
->inode
.vdi_id
);
1828 buf
= g_malloc(SD_INODE_SIZE
);
1831 * Even If deletion fails, we will just create extra snapshot based on
1832 * the workding VDI which was supposed to be deleted. So no need to
1835 deleted
= sd_delete(s
);
1836 ret
= do_sd_create(s
, s
->name
, s
->inode
.vdi_size
, s
->inode
.vdi_id
, &vid
,
1837 !deleted
, s
->inode
.copy_policy
);
1842 DPRINTF("%" PRIx32
" is created.\n", vid
);
1844 fd
= connect_to_sdog(s
);
1850 ret
= read_object(fd
, buf
, vid_to_vdi_oid(vid
), s
->inode
.nr_copies
,
1851 SD_INODE_SIZE
, 0, s
->cache_flags
);
1859 memcpy(&s
->inode
, buf
, sizeof(s
->inode
));
1861 s
->is_snapshot
= false;
1863 DPRINTF("%" PRIx32
" was newly created.\n", s
->inode
.vdi_id
);
1872 * Send I/O requests to the server.
1874 * This function sends requests to the server, links the requests to
1875 * the inflight_list in BDRVSheepdogState, and exits without
1876 * waiting the response. The responses are received in the
1877 * `aio_read_response' function which is called from the main loop as
1880 * Returns 1 when we need to wait a response, 0 when there is no sent
1881 * request and -errno in error cases.
1883 static int coroutine_fn
sd_co_rw_vector(void *p
)
1885 SheepdogAIOCB
*acb
= p
;
1887 unsigned long len
, done
= 0, total
= acb
->nb_sectors
* BDRV_SECTOR_SIZE
;
1888 unsigned long idx
= acb
->sector_num
* BDRV_SECTOR_SIZE
/ SD_DATA_OBJ_SIZE
;
1890 uint64_t offset
= (acb
->sector_num
* BDRV_SECTOR_SIZE
) % SD_DATA_OBJ_SIZE
;
1891 BDRVSheepdogState
*s
= acb
->common
.bs
->opaque
;
1892 SheepdogInode
*inode
= &s
->inode
;
1895 if (acb
->aiocb_type
== AIOCB_WRITE_UDATA
&& s
->is_snapshot
) {
1897 * In the case we open the snapshot VDI, Sheepdog creates the
1898 * writable VDI when we do a write operation first.
1900 ret
= sd_create_branch(s
);
1908 * Make sure we don't free the aiocb before we are done with all requests.
1909 * This additional reference is dropped at the end of this function.
1913 while (done
!= total
) {
1915 uint64_t old_oid
= 0;
1916 bool create
= false;
1918 oid
= vid_to_data_oid(inode
->data_vdi_id
[idx
], idx
);
1920 len
= MIN(total
- done
, SD_DATA_OBJ_SIZE
- offset
);
1922 switch (acb
->aiocb_type
) {
1923 case AIOCB_READ_UDATA
:
1924 if (!inode
->data_vdi_id
[idx
]) {
1925 qemu_iovec_memset(acb
->qiov
, done
, 0, len
);
1929 case AIOCB_WRITE_UDATA
:
1930 if (!inode
->data_vdi_id
[idx
]) {
1932 } else if (!is_data_obj_writable(inode
, idx
)) {
1936 flags
= SD_FLAG_CMD_COW
;
1939 case AIOCB_DISCARD_OBJ
:
1941 * We discard the object only when the whole object is
1942 * 1) allocated 2) trimmed. Otherwise, simply skip it.
1944 if (len
!= SD_DATA_OBJ_SIZE
|| inode
->data_vdi_id
[idx
] == 0) {
1953 DPRINTF("update ino (%" PRIu32
") %" PRIu64
" %" PRIu64
" %ld\n",
1955 vid_to_data_oid(inode
->data_vdi_id
[idx
], idx
), idx
);
1956 oid
= vid_to_data_oid(inode
->vdi_id
, idx
);
1957 DPRINTF("new oid %" PRIx64
"\n", oid
);
1960 aio_req
= alloc_aio_req(s
, acb
, oid
, len
, offset
, flags
, old_oid
, done
);
1961 QLIST_INSERT_HEAD(&s
->inflight_aio_head
, aio_req
, aio_siblings
);
1964 if (check_simultaneous_create(s
, aio_req
)) {
1969 add_aio_request(s
, aio_req
, acb
->qiov
->iov
, acb
->qiov
->niov
, create
,
1977 if (!--acb
->nr_pending
) {
1983 static coroutine_fn
int sd_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
1984 int nb_sectors
, QEMUIOVector
*qiov
)
1989 if (bs
->growable
&& sector_num
+ nb_sectors
> bs
->total_sectors
) {
1990 ret
= sd_truncate(bs
, (sector_num
+ nb_sectors
) * BDRV_SECTOR_SIZE
);
1994 bs
->total_sectors
= sector_num
+ nb_sectors
;
1997 acb
= sd_aio_setup(bs
, qiov
, sector_num
, nb_sectors
);
1998 acb
->aio_done_func
= sd_write_done
;
1999 acb
->aiocb_type
= AIOCB_WRITE_UDATA
;
2001 ret
= sd_co_rw_vector(acb
);
2003 qemu_aio_release(acb
);
2007 qemu_coroutine_yield();
2012 static coroutine_fn
int sd_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
2013 int nb_sectors
, QEMUIOVector
*qiov
)
2018 acb
= sd_aio_setup(bs
, qiov
, sector_num
, nb_sectors
);
2019 acb
->aiocb_type
= AIOCB_READ_UDATA
;
2020 acb
->aio_done_func
= sd_finish_aiocb
;
2022 ret
= sd_co_rw_vector(acb
);
2024 qemu_aio_release(acb
);
2028 qemu_coroutine_yield();
2033 static int coroutine_fn
sd_co_flush_to_disk(BlockDriverState
*bs
)
2035 BDRVSheepdogState
*s
= bs
->opaque
;
2039 if (s
->cache_flags
!= SD_FLAG_CMD_CACHE
) {
2043 acb
= sd_aio_setup(bs
, NULL
, 0, 0);
2044 acb
->aiocb_type
= AIOCB_FLUSH_CACHE
;
2045 acb
->aio_done_func
= sd_finish_aiocb
;
2047 aio_req
= alloc_aio_req(s
, acb
, vid_to_vdi_oid(s
->inode
.vdi_id
),
2049 QLIST_INSERT_HEAD(&s
->inflight_aio_head
, aio_req
, aio_siblings
);
2050 add_aio_request(s
, aio_req
, NULL
, 0, false, acb
->aiocb_type
);
2052 qemu_coroutine_yield();
2056 static int sd_snapshot_create(BlockDriverState
*bs
, QEMUSnapshotInfo
*sn_info
)
2058 BDRVSheepdogState
*s
= bs
->opaque
;
2061 SheepdogInode
*inode
;
2062 unsigned int datalen
;
2064 DPRINTF("sn_info: name %s id_str %s s: name %s vm_state_size %" PRId64
" "
2065 "is_snapshot %d\n", sn_info
->name
, sn_info
->id_str
,
2066 s
->name
, sn_info
->vm_state_size
, s
->is_snapshot
);
2068 if (s
->is_snapshot
) {
2069 error_report("You can't create a snapshot of a snapshot VDI, "
2070 "%s (%" PRIu32
").", s
->name
, s
->inode
.vdi_id
);
2075 DPRINTF("%s %s\n", sn_info
->name
, sn_info
->id_str
);
2077 s
->inode
.vm_state_size
= sn_info
->vm_state_size
;
2078 s
->inode
.vm_clock_nsec
= sn_info
->vm_clock_nsec
;
2079 /* It appears that inode.tag does not require a NUL terminator,
2080 * which means this use of strncpy is ok.
2082 strncpy(s
->inode
.tag
, sn_info
->name
, sizeof(s
->inode
.tag
));
2083 /* we don't need to update entire object */
2084 datalen
= SD_INODE_SIZE
- sizeof(s
->inode
.data_vdi_id
);
2086 /* refresh inode. */
2087 fd
= connect_to_sdog(s
);
2093 ret
= write_object(fd
, (char *)&s
->inode
, vid_to_vdi_oid(s
->inode
.vdi_id
),
2094 s
->inode
.nr_copies
, datalen
, 0, false, s
->cache_flags
);
2096 error_report("failed to write snapshot's inode.");
2100 ret
= do_sd_create(s
, s
->name
, s
->inode
.vdi_size
, s
->inode
.vdi_id
, &new_vid
,
2101 1, s
->inode
.copy_policy
);
2103 error_report("failed to create inode for snapshot. %s",
2108 inode
= (SheepdogInode
*)g_malloc(datalen
);
2110 ret
= read_object(fd
, (char *)inode
, vid_to_vdi_oid(new_vid
),
2111 s
->inode
.nr_copies
, datalen
, 0, s
->cache_flags
);
2114 error_report("failed to read new inode info. %s", strerror(errno
));
2118 memcpy(&s
->inode
, inode
, datalen
);
2119 DPRINTF("s->inode: name %s snap_id %x oid %x\n",
2120 s
->inode
.name
, s
->inode
.snap_id
, s
->inode
.vdi_id
);
2128 * We implement rollback(loadvm) operation to the specified snapshot by
2129 * 1) switch to the snapshot
2130 * 2) rely on sd_create_branch to delete working VDI and
2131 * 3) create a new working VDI based on the speicified snapshot
2133 static int sd_snapshot_goto(BlockDriverState
*bs
, const char *snapshot_id
)
2135 BDRVSheepdogState
*s
= bs
->opaque
;
2136 BDRVSheepdogState
*old_s
;
2137 char tag
[SD_MAX_VDI_TAG_LEN
];
2138 uint32_t snapid
= 0;
2141 old_s
= g_malloc(sizeof(BDRVSheepdogState
));
2143 memcpy(old_s
, s
, sizeof(BDRVSheepdogState
));
2145 snapid
= strtoul(snapshot_id
, NULL
, 10);
2149 pstrcpy(tag
, sizeof(tag
), snapshot_id
);
2152 ret
= reload_inode(s
, snapid
, tag
);
2157 ret
= sd_create_branch(s
);
2166 /* recover bdrv_sd_state */
2167 memcpy(s
, old_s
, sizeof(BDRVSheepdogState
));
2170 error_report("failed to open. recover old bdrv_sd_state.");
2175 static int sd_snapshot_delete(BlockDriverState
*bs
,
2176 const char *snapshot_id
,
2180 /* FIXME: Delete specified snapshot id. */
2184 static int sd_snapshot_list(BlockDriverState
*bs
, QEMUSnapshotInfo
**psn_tab
)
2186 BDRVSheepdogState
*s
= bs
->opaque
;
2188 int fd
, nr
= 1024, ret
, max
= BITS_TO_LONGS(SD_NR_VDIS
) * sizeof(long);
2189 QEMUSnapshotInfo
*sn_tab
= NULL
;
2190 unsigned wlen
, rlen
;
2192 static SheepdogInode inode
;
2193 unsigned long *vdi_inuse
;
2194 unsigned int start_nr
;
2198 vdi_inuse
= g_malloc(max
);
2200 fd
= connect_to_sdog(s
);
2209 memset(&req
, 0, sizeof(req
));
2211 req
.opcode
= SD_OP_READ_VDIS
;
2212 req
.data_length
= max
;
2214 ret
= do_req(fd
, (SheepdogReq
*)&req
, vdi_inuse
, &wlen
, &rlen
);
2221 sn_tab
= g_malloc0(nr
* sizeof(*sn_tab
));
2223 /* calculate a vdi id with hash function */
2224 hval
= fnv_64a_buf(s
->name
, strlen(s
->name
), FNV1A_64_INIT
);
2225 start_nr
= hval
& (SD_NR_VDIS
- 1);
2227 fd
= connect_to_sdog(s
);
2233 for (vid
= start_nr
; found
< nr
; vid
= (vid
+ 1) % SD_NR_VDIS
) {
2234 if (!test_bit(vid
, vdi_inuse
)) {
2238 /* we don't need to read entire object */
2239 ret
= read_object(fd
, (char *)&inode
, vid_to_vdi_oid(vid
),
2240 0, SD_INODE_SIZE
- sizeof(inode
.data_vdi_id
), 0,
2247 if (!strcmp(inode
.name
, s
->name
) && is_snapshot(&inode
)) {
2248 sn_tab
[found
].date_sec
= inode
.snap_ctime
>> 32;
2249 sn_tab
[found
].date_nsec
= inode
.snap_ctime
& 0xffffffff;
2250 sn_tab
[found
].vm_state_size
= inode
.vm_state_size
;
2251 sn_tab
[found
].vm_clock_nsec
= inode
.vm_clock_nsec
;
2253 snprintf(sn_tab
[found
].id_str
, sizeof(sn_tab
[found
].id_str
), "%u",
2255 pstrcpy(sn_tab
[found
].name
,
2256 MIN(sizeof(sn_tab
[found
].name
), sizeof(inode
.tag
)),
2275 static int do_load_save_vmstate(BDRVSheepdogState
*s
, uint8_t *data
,
2276 int64_t pos
, int size
, int load
)
2279 int fd
, ret
= 0, remaining
= size
;
2280 unsigned int data_len
;
2281 uint64_t vmstate_oid
;
2284 uint32_t vdi_id
= load
? s
->inode
.parent_vdi_id
: s
->inode
.vdi_id
;
2286 fd
= connect_to_sdog(s
);
2292 vdi_index
= pos
/ SD_DATA_OBJ_SIZE
;
2293 offset
= pos
% SD_DATA_OBJ_SIZE
;
2295 data_len
= MIN(remaining
, SD_DATA_OBJ_SIZE
- offset
);
2297 vmstate_oid
= vid_to_vmstate_oid(vdi_id
, vdi_index
);
2299 create
= (offset
== 0);
2301 ret
= read_object(fd
, (char *)data
, vmstate_oid
,
2302 s
->inode
.nr_copies
, data_len
, offset
,
2305 ret
= write_object(fd
, (char *)data
, vmstate_oid
,
2306 s
->inode
.nr_copies
, data_len
, offset
, create
,
2311 error_report("failed to save vmstate %s", strerror(errno
));
2317 remaining
-= data_len
;
2325 static int sd_save_vmstate(BlockDriverState
*bs
, QEMUIOVector
*qiov
,
2328 BDRVSheepdogState
*s
= bs
->opaque
;
2332 buf
= qemu_blockalign(bs
, qiov
->size
);
2333 qemu_iovec_to_buf(qiov
, 0, buf
, qiov
->size
);
2334 ret
= do_load_save_vmstate(s
, (uint8_t *) buf
, pos
, qiov
->size
, 0);
2340 static int sd_load_vmstate(BlockDriverState
*bs
, uint8_t *data
,
2341 int64_t pos
, int size
)
2343 BDRVSheepdogState
*s
= bs
->opaque
;
2345 return do_load_save_vmstate(s
, data
, pos
, size
, 1);
2349 static coroutine_fn
int sd_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
2354 BDRVSheepdogState
*s
= bs
->opaque
;
2357 if (!s
->discard_supported
) {
2361 acb
= sd_aio_setup(bs
, &dummy
, sector_num
, nb_sectors
);
2362 acb
->aiocb_type
= AIOCB_DISCARD_OBJ
;
2363 acb
->aio_done_func
= sd_finish_aiocb
;
2365 ret
= sd_co_rw_vector(acb
);
2367 qemu_aio_release(acb
);
2371 qemu_coroutine_yield();
2376 static coroutine_fn
int64_t
2377 sd_co_get_block_status(BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
,
2380 BDRVSheepdogState
*s
= bs
->opaque
;
2381 SheepdogInode
*inode
= &s
->inode
;
2382 unsigned long start
= sector_num
* BDRV_SECTOR_SIZE
/ SD_DATA_OBJ_SIZE
,
2383 end
= DIV_ROUND_UP((sector_num
+ nb_sectors
) *
2384 BDRV_SECTOR_SIZE
, SD_DATA_OBJ_SIZE
);
2386 int64_t ret
= BDRV_BLOCK_DATA
;
2388 for (idx
= start
; idx
< end
; idx
++) {
2389 if (inode
->data_vdi_id
[idx
] == 0) {
2394 /* Get the longest length of unallocated sectors */
2396 for (idx
= start
+ 1; idx
< end
; idx
++) {
2397 if (inode
->data_vdi_id
[idx
] != 0) {
2403 *pnum
= (idx
- start
) * SD_DATA_OBJ_SIZE
/ BDRV_SECTOR_SIZE
;
2404 if (*pnum
> nb_sectors
) {
2410 static QEMUOptionParameter sd_create_options
[] = {
2412 .name
= BLOCK_OPT_SIZE
,
2414 .help
= "Virtual disk size"
2417 .name
= BLOCK_OPT_BACKING_FILE
,
2419 .help
= "File name of a base image"
2422 .name
= BLOCK_OPT_PREALLOC
,
2424 .help
= "Preallocation mode (allowed values: off, full)"
2429 static BlockDriver bdrv_sheepdog
= {
2430 .format_name
= "sheepdog",
2431 .protocol_name
= "sheepdog",
2432 .instance_size
= sizeof(BDRVSheepdogState
),
2433 .bdrv_needs_filename
= true,
2434 .bdrv_file_open
= sd_open
,
2435 .bdrv_close
= sd_close
,
2436 .bdrv_create
= sd_create
,
2437 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
2438 .bdrv_getlength
= sd_getlength
,
2439 .bdrv_truncate
= sd_truncate
,
2441 .bdrv_co_readv
= sd_co_readv
,
2442 .bdrv_co_writev
= sd_co_writev
,
2443 .bdrv_co_flush_to_disk
= sd_co_flush_to_disk
,
2444 .bdrv_co_discard
= sd_co_discard
,
2445 .bdrv_co_get_block_status
= sd_co_get_block_status
,
2447 .bdrv_snapshot_create
= sd_snapshot_create
,
2448 .bdrv_snapshot_goto
= sd_snapshot_goto
,
2449 .bdrv_snapshot_delete
= sd_snapshot_delete
,
2450 .bdrv_snapshot_list
= sd_snapshot_list
,
2452 .bdrv_save_vmstate
= sd_save_vmstate
,
2453 .bdrv_load_vmstate
= sd_load_vmstate
,
2455 .create_options
= sd_create_options
,
2458 static BlockDriver bdrv_sheepdog_tcp
= {
2459 .format_name
= "sheepdog",
2460 .protocol_name
= "sheepdog+tcp",
2461 .instance_size
= sizeof(BDRVSheepdogState
),
2462 .bdrv_needs_filename
= true,
2463 .bdrv_file_open
= sd_open
,
2464 .bdrv_close
= sd_close
,
2465 .bdrv_create
= sd_create
,
2466 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
2467 .bdrv_getlength
= sd_getlength
,
2468 .bdrv_truncate
= sd_truncate
,
2470 .bdrv_co_readv
= sd_co_readv
,
2471 .bdrv_co_writev
= sd_co_writev
,
2472 .bdrv_co_flush_to_disk
= sd_co_flush_to_disk
,
2473 .bdrv_co_discard
= sd_co_discard
,
2474 .bdrv_co_get_block_status
= sd_co_get_block_status
,
2476 .bdrv_snapshot_create
= sd_snapshot_create
,
2477 .bdrv_snapshot_goto
= sd_snapshot_goto
,
2478 .bdrv_snapshot_delete
= sd_snapshot_delete
,
2479 .bdrv_snapshot_list
= sd_snapshot_list
,
2481 .bdrv_save_vmstate
= sd_save_vmstate
,
2482 .bdrv_load_vmstate
= sd_load_vmstate
,
2484 .create_options
= sd_create_options
,
2487 static BlockDriver bdrv_sheepdog_unix
= {
2488 .format_name
= "sheepdog",
2489 .protocol_name
= "sheepdog+unix",
2490 .instance_size
= sizeof(BDRVSheepdogState
),
2491 .bdrv_needs_filename
= true,
2492 .bdrv_file_open
= sd_open
,
2493 .bdrv_close
= sd_close
,
2494 .bdrv_create
= sd_create
,
2495 .bdrv_has_zero_init
= bdrv_has_zero_init_1
,
2496 .bdrv_getlength
= sd_getlength
,
2497 .bdrv_truncate
= sd_truncate
,
2499 .bdrv_co_readv
= sd_co_readv
,
2500 .bdrv_co_writev
= sd_co_writev
,
2501 .bdrv_co_flush_to_disk
= sd_co_flush_to_disk
,
2502 .bdrv_co_discard
= sd_co_discard
,
2503 .bdrv_co_get_block_status
= sd_co_get_block_status
,
2505 .bdrv_snapshot_create
= sd_snapshot_create
,
2506 .bdrv_snapshot_goto
= sd_snapshot_goto
,
2507 .bdrv_snapshot_delete
= sd_snapshot_delete
,
2508 .bdrv_snapshot_list
= sd_snapshot_list
,
2510 .bdrv_save_vmstate
= sd_save_vmstate
,
2511 .bdrv_load_vmstate
= sd_load_vmstate
,
2513 .create_options
= sd_create_options
,
2516 static void bdrv_sheepdog_init(void)
2518 bdrv_register(&bdrv_sheepdog
);
2519 bdrv_register(&bdrv_sheepdog_tcp
);
2520 bdrv_register(&bdrv_sheepdog_unix
);
2522 block_init(bdrv_sheepdog_init
);