2 * GlusterFS backend for QEMU
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
10 #include "qemu/osdep.h"
11 #include <glusterfs/api/glfs.h>
12 #include "block/block_int.h"
13 #include "qapi/error.h"
14 #include "qapi/qmp/qerror.h"
16 #include "qemu/error-report.h"
17 #include "qemu/cutils.h"
19 #define GLUSTER_OPT_FILENAME "filename"
20 #define GLUSTER_OPT_VOLUME "volume"
21 #define GLUSTER_OPT_PATH "path"
22 #define GLUSTER_OPT_TYPE "type"
23 #define GLUSTER_OPT_SERVER_PATTERN "server."
24 #define GLUSTER_OPT_HOST "host"
25 #define GLUSTER_OPT_PORT "port"
26 #define GLUSTER_OPT_TO "to"
27 #define GLUSTER_OPT_IPV4 "ipv4"
28 #define GLUSTER_OPT_IPV6 "ipv6"
29 #define GLUSTER_OPT_SOCKET "socket"
30 #define GLUSTER_OPT_DEBUG "debug"
31 #define GLUSTER_DEFAULT_PORT 24007
32 #define GLUSTER_DEBUG_DEFAULT 4
33 #define GLUSTER_DEBUG_MAX 9
34 #define GLUSTER_OPT_LOGFILE "logfile"
35 #define GLUSTER_LOGFILE_DEFAULT "-" /* handled in libgfapi as /dev/stderr */
37 #define GERR_INDEX_HINT "hint: check in 'server' array index '%d'\n"
39 typedef struct GlusterAIOCB
{
43 AioContext
*aio_context
;
46 typedef struct BDRVGlusterState
{
50 bool supports_seek_data
;
54 typedef struct BDRVGlusterReopenState
{
57 } BDRVGlusterReopenState
;
60 typedef struct GlfsPreopened
{
66 typedef struct ListElement
{
67 QLIST_ENTRY(ListElement
) list
;
71 static QLIST_HEAD(glfs_list
, ListElement
) glfs_list
;
73 static QemuOptsList qemu_gluster_create_opts
= {
74 .name
= "qemu-gluster-create-opts",
75 .head
= QTAILQ_HEAD_INITIALIZER(qemu_gluster_create_opts
.head
),
78 .name
= BLOCK_OPT_SIZE
,
79 .type
= QEMU_OPT_SIZE
,
80 .help
= "Virtual disk size"
83 .name
= BLOCK_OPT_PREALLOC
,
84 .type
= QEMU_OPT_STRING
,
85 .help
= "Preallocation mode (allowed values: off, full)"
88 .name
= GLUSTER_OPT_DEBUG
,
89 .type
= QEMU_OPT_NUMBER
,
90 .help
= "Gluster log level, valid range is 0-9",
93 .name
= GLUSTER_OPT_LOGFILE
,
94 .type
= QEMU_OPT_STRING
,
95 .help
= "Logfile path of libgfapi",
101 static QemuOptsList runtime_opts
= {
103 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
106 .name
= GLUSTER_OPT_FILENAME
,
107 .type
= QEMU_OPT_STRING
,
108 .help
= "URL to the gluster image",
111 .name
= GLUSTER_OPT_DEBUG
,
112 .type
= QEMU_OPT_NUMBER
,
113 .help
= "Gluster log level, valid range is 0-9",
116 .name
= GLUSTER_OPT_LOGFILE
,
117 .type
= QEMU_OPT_STRING
,
118 .help
= "Logfile path of libgfapi",
120 { /* end of list */ }
124 static QemuOptsList runtime_json_opts
= {
125 .name
= "gluster_json",
126 .head
= QTAILQ_HEAD_INITIALIZER(runtime_json_opts
.head
),
129 .name
= GLUSTER_OPT_VOLUME
,
130 .type
= QEMU_OPT_STRING
,
131 .help
= "name of gluster volume where VM image resides",
134 .name
= GLUSTER_OPT_PATH
,
135 .type
= QEMU_OPT_STRING
,
136 .help
= "absolute path to image file in gluster volume",
139 .name
= GLUSTER_OPT_DEBUG
,
140 .type
= QEMU_OPT_NUMBER
,
141 .help
= "Gluster log level, valid range is 0-9",
143 { /* end of list */ }
147 static QemuOptsList runtime_type_opts
= {
148 .name
= "gluster_type",
149 .head
= QTAILQ_HEAD_INITIALIZER(runtime_type_opts
.head
),
152 .name
= GLUSTER_OPT_TYPE
,
153 .type
= QEMU_OPT_STRING
,
156 { /* end of list */ }
160 static QemuOptsList runtime_unix_opts
= {
161 .name
= "gluster_unix",
162 .head
= QTAILQ_HEAD_INITIALIZER(runtime_unix_opts
.head
),
165 .name
= GLUSTER_OPT_SOCKET
,
166 .type
= QEMU_OPT_STRING
,
167 .help
= "socket file path)",
169 { /* end of list */ }
173 static QemuOptsList runtime_tcp_opts
= {
174 .name
= "gluster_tcp",
175 .head
= QTAILQ_HEAD_INITIALIZER(runtime_tcp_opts
.head
),
178 .name
= GLUSTER_OPT_TYPE
,
179 .type
= QEMU_OPT_STRING
,
183 .name
= GLUSTER_OPT_HOST
,
184 .type
= QEMU_OPT_STRING
,
185 .help
= "host address (hostname/ipv4/ipv6 addresses)",
188 .name
= GLUSTER_OPT_PORT
,
189 .type
= QEMU_OPT_STRING
,
190 .help
= "port number on which glusterd is listening (default 24007)",
194 .type
= QEMU_OPT_NUMBER
,
195 .help
= "max port number, not supported by gluster",
199 .type
= QEMU_OPT_BOOL
,
200 .help
= "ipv4 bool value, not supported by gluster",
204 .type
= QEMU_OPT_BOOL
,
205 .help
= "ipv6 bool value, not supported by gluster",
207 { /* end of list */ }
211 static void glfs_set_preopened(const char *volume
, glfs_t
*fs
)
213 ListElement
*entry
= NULL
;
215 entry
= g_new(ListElement
, 1);
217 entry
->saved
.volume
= g_strdup(volume
);
219 entry
->saved
.fs
= fs
;
220 entry
->saved
.ref
= 1;
222 QLIST_INSERT_HEAD(&glfs_list
, entry
, list
);
225 static glfs_t
*glfs_find_preopened(const char *volume
)
227 ListElement
*entry
= NULL
;
229 QLIST_FOREACH(entry
, &glfs_list
, list
) {
230 if (strcmp(entry
->saved
.volume
, volume
) == 0) {
232 return entry
->saved
.fs
;
239 static void glfs_clear_preopened(glfs_t
*fs
)
241 ListElement
*entry
= NULL
;
248 QLIST_FOREACH_SAFE(entry
, &glfs_list
, list
, next
) {
249 if (entry
->saved
.fs
== fs
) {
250 if (--entry
->saved
.ref
) {
254 QLIST_REMOVE(entry
, list
);
256 glfs_fini(entry
->saved
.fs
);
257 g_free(entry
->saved
.volume
);
263 static int parse_volume_options(BlockdevOptionsGluster
*gconf
, char *path
)
272 p
= q
= path
+ strspn(path
, "/");
273 p
+= strcspn(p
, "/");
277 gconf
->volume
= g_strndup(q
, p
- q
);
284 gconf
->path
= g_strdup(p
);
289 * file=gluster[+transport]://[host[:port]]/volume/path[?socket=...]
291 * 'gluster' is the protocol.
293 * 'transport' specifies the transport type used to connect to gluster
294 * management daemon (glusterd). Valid transport types are
295 * tcp or unix. If a transport type isn't specified, then tcp type is assumed.
297 * 'host' specifies the host where the volume file specification for
298 * the given volume resides. This can be either hostname or ipv4 address.
299 * If transport type is 'unix', then 'host' field should not be specified.
300 * The 'socket' field needs to be populated with the path to unix domain
303 * 'port' is the port number on which glusterd is listening. This is optional
304 * and if not specified, QEMU will send 0 which will make gluster to use the
305 * default port. If the transport type is unix, then 'port' should not be
308 * 'volume' is the name of the gluster volume which contains the VM image.
310 * 'path' is the path to the actual VM image that resides on gluster volume.
314 * file=gluster://1.2.3.4/testvol/a.img
315 * file=gluster+tcp://1.2.3.4/testvol/a.img
316 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
317 * file=gluster+tcp://host.domain.com:24007/testvol/dir/a.img
318 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
320 static int qemu_gluster_parse_uri(BlockdevOptionsGluster
*gconf
,
321 const char *filename
)
323 GlusterServer
*gsconf
;
325 QueryParams
*qp
= NULL
;
326 bool is_unix
= false;
329 uri
= uri_parse(filename
);
334 gconf
->server
= g_new0(GlusterServerList
, 1);
335 gconf
->server
->value
= gsconf
= g_new0(GlusterServer
, 1);
338 if (!uri
->scheme
|| !strcmp(uri
->scheme
, "gluster")) {
339 gsconf
->type
= GLUSTER_TRANSPORT_TCP
;
340 } else if (!strcmp(uri
->scheme
, "gluster+tcp")) {
341 gsconf
->type
= GLUSTER_TRANSPORT_TCP
;
342 } else if (!strcmp(uri
->scheme
, "gluster+unix")) {
343 gsconf
->type
= GLUSTER_TRANSPORT_UNIX
;
345 } else if (!strcmp(uri
->scheme
, "gluster+rdma")) {
346 gsconf
->type
= GLUSTER_TRANSPORT_TCP
;
347 error_report("Warning: rdma feature is not supported, falling "
354 ret
= parse_volume_options(gconf
, uri
->path
);
359 qp
= query_params_parse(uri
->query
);
360 if (qp
->n
> 1 || (is_unix
&& !qp
->n
) || (!is_unix
&& qp
->n
)) {
366 if (uri
->server
|| uri
->port
) {
370 if (strcmp(qp
->p
[0].name
, "socket")) {
374 gsconf
->u
.q_unix
.path
= g_strdup(qp
->p
[0].value
);
376 gsconf
->u
.tcp
.host
= g_strdup(uri
->server
? uri
->server
: "localhost");
378 gsconf
->u
.tcp
.port
= g_strdup_printf("%d", uri
->port
);
380 gsconf
->u
.tcp
.port
= g_strdup_printf("%d", GLUSTER_DEFAULT_PORT
);
386 query_params_free(qp
);
392 static struct glfs
*qemu_gluster_glfs_init(BlockdevOptionsGluster
*gconf
,
398 GlusterServerList
*server
;
399 unsigned long long port
;
401 glfs
= glfs_find_preopened(gconf
->volume
);
406 glfs
= glfs_new(gconf
->volume
);
411 glfs_set_preopened(gconf
->volume
, glfs
);
413 for (server
= gconf
->server
; server
; server
= server
->next
) {
414 if (server
->value
->type
== GLUSTER_TRANSPORT_UNIX
) {
415 ret
= glfs_set_volfile_server(glfs
,
416 GlusterTransport_lookup
[server
->value
->type
],
417 server
->value
->u
.q_unix
.path
, 0);
419 if (parse_uint_full(server
->value
->u
.tcp
.port
, &port
, 10) < 0 ||
421 error_setg(errp
, "'%s' is not a valid port number",
422 server
->value
->u
.tcp
.port
);
426 ret
= glfs_set_volfile_server(glfs
,
427 GlusterTransport_lookup
[server
->value
->type
],
428 server
->value
->u
.tcp
.host
,
437 ret
= glfs_set_logging(glfs
, gconf
->logfile
, gconf
->debug
);
442 ret
= glfs_init(glfs
);
444 error_setg(errp
, "Gluster connection for volume %s, path %s failed"
445 " to connect", gconf
->volume
, gconf
->path
);
446 for (server
= gconf
->server
; server
; server
= server
->next
) {
447 if (server
->value
->type
== GLUSTER_TRANSPORT_UNIX
) {
448 error_append_hint(errp
, "hint: failed on socket %s ",
449 server
->value
->u
.q_unix
.path
);
451 error_append_hint(errp
, "hint: failed on host %s and port %s ",
452 server
->value
->u
.tcp
.host
,
453 server
->value
->u
.tcp
.port
);
457 error_append_hint(errp
, "Please refer to gluster logs for more info\n");
459 /* glfs_init sometimes doesn't set errno although docs suggest that */
471 glfs_clear_preopened(glfs
);
477 static int qapi_enum_parse(const char *opt
)
482 return GLUSTER_TRANSPORT__MAX
;
485 for (i
= 0; i
< GLUSTER_TRANSPORT__MAX
; i
++) {
486 if (!strcmp(opt
, GlusterTransport_lookup
[i
])) {
495 * Convert the json formatted command line into qapi.
497 static int qemu_gluster_parse_json(BlockdevOptionsGluster
*gconf
,
498 QDict
*options
, Error
**errp
)
501 GlusterServer
*gsconf
;
502 GlusterServerList
*curr
= NULL
;
503 QDict
*backing_options
= NULL
;
504 Error
*local_err
= NULL
;
510 /* create opts info from runtime_json_opts list */
511 opts
= qemu_opts_create(&runtime_json_opts
, NULL
, 0, &error_abort
);
512 qemu_opts_absorb_qdict(opts
, options
, &local_err
);
517 num_servers
= qdict_array_entries(options
, GLUSTER_OPT_SERVER_PATTERN
);
518 if (num_servers
< 1) {
519 error_setg(&local_err
, QERR_MISSING_PARAMETER
, "server");
523 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_VOLUME
);
525 error_setg(&local_err
, QERR_MISSING_PARAMETER
, GLUSTER_OPT_VOLUME
);
528 gconf
->volume
= g_strdup(ptr
);
530 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_PATH
);
532 error_setg(&local_err
, QERR_MISSING_PARAMETER
, GLUSTER_OPT_PATH
);
535 gconf
->path
= g_strdup(ptr
);
538 for (i
= 0; i
< num_servers
; i
++) {
539 str
= g_strdup_printf(GLUSTER_OPT_SERVER_PATTERN
"%d.", i
);
540 qdict_extract_subqdict(options
, &backing_options
, str
);
542 /* create opts info from runtime_type_opts list */
543 opts
= qemu_opts_create(&runtime_type_opts
, NULL
, 0, &error_abort
);
544 qemu_opts_absorb_qdict(opts
, backing_options
, &local_err
);
549 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_TYPE
);
550 gsconf
= g_new0(GlusterServer
, 1);
551 gsconf
->type
= qapi_enum_parse(ptr
);
553 error_setg(&local_err
, QERR_MISSING_PARAMETER
, GLUSTER_OPT_TYPE
);
554 error_append_hint(&local_err
, GERR_INDEX_HINT
, i
);
558 if (gsconf
->type
== GLUSTER_TRANSPORT__MAX
) {
559 error_setg(&local_err
, QERR_INVALID_PARAMETER_VALUE
,
560 GLUSTER_OPT_TYPE
, "tcp or unix");
561 error_append_hint(&local_err
, GERR_INDEX_HINT
, i
);
566 if (gsconf
->type
== GLUSTER_TRANSPORT_TCP
) {
567 /* create opts info from runtime_tcp_opts list */
568 opts
= qemu_opts_create(&runtime_tcp_opts
, NULL
, 0, &error_abort
);
569 qemu_opts_absorb_qdict(opts
, backing_options
, &local_err
);
574 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_HOST
);
576 error_setg(&local_err
, QERR_MISSING_PARAMETER
,
578 error_append_hint(&local_err
, GERR_INDEX_HINT
, i
);
581 gsconf
->u
.tcp
.host
= g_strdup(ptr
);
582 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_PORT
);
584 error_setg(&local_err
, QERR_MISSING_PARAMETER
,
586 error_append_hint(&local_err
, GERR_INDEX_HINT
, i
);
589 gsconf
->u
.tcp
.port
= g_strdup(ptr
);
591 /* defend for unsupported fields in InetSocketAddress,
592 * i.e. @ipv4, @ipv6 and @to
594 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_TO
);
596 gsconf
->u
.tcp
.has_to
= true;
598 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_IPV4
);
600 gsconf
->u
.tcp
.has_ipv4
= true;
602 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_IPV6
);
604 gsconf
->u
.tcp
.has_ipv6
= true;
606 if (gsconf
->u
.tcp
.has_to
) {
607 error_setg(&local_err
, "Parameter 'to' not supported");
610 if (gsconf
->u
.tcp
.has_ipv4
|| gsconf
->u
.tcp
.has_ipv6
) {
611 error_setg(&local_err
, "Parameters 'ipv4/ipv6' not supported");
616 /* create opts info from runtime_unix_opts list */
617 opts
= qemu_opts_create(&runtime_unix_opts
, NULL
, 0, &error_abort
);
618 qemu_opts_absorb_qdict(opts
, backing_options
, &local_err
);
623 ptr
= qemu_opt_get(opts
, GLUSTER_OPT_SOCKET
);
625 error_setg(&local_err
, QERR_MISSING_PARAMETER
,
627 error_append_hint(&local_err
, GERR_INDEX_HINT
, i
);
630 gsconf
->u
.q_unix
.path
= g_strdup(ptr
);
634 if (gconf
->server
== NULL
) {
635 gconf
->server
= g_new0(GlusterServerList
, 1);
636 gconf
->server
->value
= gsconf
;
637 curr
= gconf
->server
;
639 curr
->next
= g_new0(GlusterServerList
, 1);
640 curr
->next
->value
= gsconf
;
644 qdict_del(backing_options
, str
);
652 error_propagate(errp
, local_err
);
655 qdict_del(backing_options
, str
);
662 static struct glfs
*qemu_gluster_init(BlockdevOptionsGluster
*gconf
,
663 const char *filename
,
664 QDict
*options
, Error
**errp
)
668 ret
= qemu_gluster_parse_uri(gconf
, filename
);
670 error_setg(errp
, "invalid URI");
671 error_append_hint(errp
, "Usage: file=gluster[+transport]://"
672 "[host[:port]]volume/path[?socket=...]"
674 "[,file.logfile=/path/filename.log]\n");
679 ret
= qemu_gluster_parse_json(gconf
, options
, errp
);
681 error_append_hint(errp
, "Usage: "
682 "-drive driver=qcow2,file.driver=gluster,"
683 "file.volume=testvol,file.path=/path/a.qcow2"
685 "[,file.logfile=/path/filename.log],"
686 "file.server.0.type=tcp,"
687 "file.server.0.host=1.2.3.4,"
688 "file.server.0.port=24007,"
689 "file.server.1.transport=unix,"
690 "file.server.1.socket=/var/run/glusterd.socket ..."
698 return qemu_gluster_glfs_init(gconf
, errp
);
701 static void qemu_gluster_complete_aio(void *opaque
)
703 GlusterAIOCB
*acb
= (GlusterAIOCB
*)opaque
;
705 qemu_coroutine_enter(acb
->coroutine
);
709 * AIO callback routine called from GlusterFS thread.
711 static void gluster_finish_aiocb(struct glfs_fd
*fd
, ssize_t ret
, void *arg
)
713 GlusterAIOCB
*acb
= (GlusterAIOCB
*)arg
;
715 if (!ret
|| ret
== acb
->size
) {
716 acb
->ret
= 0; /* Success */
717 } else if (ret
< 0) {
718 acb
->ret
= -errno
; /* Read/Write failed */
720 acb
->ret
= -EIO
; /* Partial read/write - fail it */
723 aio_bh_schedule_oneshot(acb
->aio_context
, qemu_gluster_complete_aio
, acb
);
726 static void qemu_gluster_parse_flags(int bdrv_flags
, int *open_flags
)
728 assert(open_flags
!= NULL
);
730 *open_flags
|= O_BINARY
;
732 if (bdrv_flags
& BDRV_O_RDWR
) {
733 *open_flags
|= O_RDWR
;
735 *open_flags
|= O_RDONLY
;
738 if ((bdrv_flags
& BDRV_O_NOCACHE
)) {
739 *open_flags
|= O_DIRECT
;
744 * Do SEEK_DATA/HOLE to detect if it is functional. Older broken versions of
745 * gfapi incorrectly return the current offset when SEEK_DATA/HOLE is used.
746 * - Corrected versions return -1 and set errno to EINVAL.
747 * - Versions that support SEEK_DATA/HOLE correctly, will return -1 and set
748 * errno to ENXIO when SEEK_DATA is called with a position of EOF.
750 static bool qemu_gluster_test_seek(struct glfs_fd
*fd
)
754 #if defined SEEK_HOLE && defined SEEK_DATA
757 eof
= glfs_lseek(fd
, 0, SEEK_END
);
759 /* this should never occur */
763 /* this should always fail with ENXIO if SEEK_DATA is supported */
764 ret
= glfs_lseek(fd
, eof
, SEEK_DATA
);
767 return (ret
< 0) && (errno
== ENXIO
);
770 static int qemu_gluster_open(BlockDriverState
*bs
, QDict
*options
,
771 int bdrv_flags
, Error
**errp
)
773 BDRVGlusterState
*s
= bs
->opaque
;
776 BlockdevOptionsGluster
*gconf
= NULL
;
778 Error
*local_err
= NULL
;
779 const char *filename
, *logfile
;
781 opts
= qemu_opts_create(&runtime_opts
, NULL
, 0, &error_abort
);
782 qemu_opts_absorb_qdict(opts
, options
, &local_err
);
784 error_propagate(errp
, local_err
);
789 filename
= qemu_opt_get(opts
, GLUSTER_OPT_FILENAME
);
791 s
->debug
= qemu_opt_get_number(opts
, GLUSTER_OPT_DEBUG
,
792 GLUSTER_DEBUG_DEFAULT
);
795 } else if (s
->debug
> GLUSTER_DEBUG_MAX
) {
796 s
->debug
= GLUSTER_DEBUG_MAX
;
799 gconf
= g_new0(BlockdevOptionsGluster
, 1);
800 gconf
->debug
= s
->debug
;
801 gconf
->has_debug
= true;
803 logfile
= qemu_opt_get(opts
, GLUSTER_OPT_LOGFILE
);
804 s
->logfile
= g_strdup(logfile
? logfile
: GLUSTER_LOGFILE_DEFAULT
);
806 gconf
->logfile
= g_strdup(s
->logfile
);
807 gconf
->has_logfile
= true;
809 s
->glfs
= qemu_gluster_init(gconf
, filename
, options
, errp
);
815 #ifdef CONFIG_GLUSTERFS_XLATOR_OPT
816 /* Without this, if fsync fails for a recoverable reason (for instance,
817 * ENOSPC), gluster will dump its cache, preventing retries. This means
818 * almost certain data loss. Not all gluster versions support the
819 * 'resync-failed-syncs-after-fsync' key value, but there is no way to
820 * discover during runtime if it is supported (this api returns success for
821 * unknown key/value pairs) */
822 ret
= glfs_set_xlator_option(s
->glfs
, "*-write-behind",
823 "resync-failed-syncs-after-fsync",
826 error_setg_errno(errp
, errno
, "Unable to set xlator key/value pair");
832 qemu_gluster_parse_flags(bdrv_flags
, &open_flags
);
834 s
->fd
= glfs_open(s
->glfs
, gconf
->path
, open_flags
);
839 s
->supports_seek_data
= qemu_gluster_test_seek(s
->fd
);
843 qapi_free_BlockdevOptionsGluster(gconf
);
852 glfs_clear_preopened(s
->glfs
);
857 static int qemu_gluster_reopen_prepare(BDRVReopenState
*state
,
858 BlockReopenQueue
*queue
, Error
**errp
)
862 BDRVGlusterReopenState
*reop_s
;
863 BlockdevOptionsGluster
*gconf
;
866 assert(state
!= NULL
);
867 assert(state
->bs
!= NULL
);
869 s
= state
->bs
->opaque
;
871 state
->opaque
= g_new0(BDRVGlusterReopenState
, 1);
872 reop_s
= state
->opaque
;
874 qemu_gluster_parse_flags(state
->flags
, &open_flags
);
876 gconf
= g_new0(BlockdevOptionsGluster
, 1);
877 gconf
->debug
= s
->debug
;
878 gconf
->has_debug
= true;
879 gconf
->logfile
= g_strdup(s
->logfile
);
880 gconf
->has_logfile
= true;
881 reop_s
->glfs
= qemu_gluster_init(gconf
, state
->bs
->filename
, NULL
, errp
);
882 if (reop_s
->glfs
== NULL
) {
887 #ifdef CONFIG_GLUSTERFS_XLATOR_OPT
888 ret
= glfs_set_xlator_option(reop_s
->glfs
, "*-write-behind",
889 "resync-failed-syncs-after-fsync", "on");
891 error_setg_errno(errp
, errno
, "Unable to set xlator key/value pair");
897 reop_s
->fd
= glfs_open(reop_s
->glfs
, gconf
->path
, open_flags
);
898 if (reop_s
->fd
== NULL
) {
899 /* reops->glfs will be cleaned up in _abort */
905 /* state->opaque will be freed in either the _abort or _commit */
906 qapi_free_BlockdevOptionsGluster(gconf
);
910 static void qemu_gluster_reopen_commit(BDRVReopenState
*state
)
912 BDRVGlusterReopenState
*reop_s
= state
->opaque
;
913 BDRVGlusterState
*s
= state
->bs
->opaque
;
921 glfs_clear_preopened(s
->glfs
);
923 /* use the newly opened image / connection */
925 s
->glfs
= reop_s
->glfs
;
927 g_free(state
->opaque
);
928 state
->opaque
= NULL
;
934 static void qemu_gluster_reopen_abort(BDRVReopenState
*state
)
936 BDRVGlusterReopenState
*reop_s
= state
->opaque
;
938 if (reop_s
== NULL
) {
943 glfs_close(reop_s
->fd
);
946 glfs_clear_preopened(reop_s
->glfs
);
948 g_free(state
->opaque
);
949 state
->opaque
= NULL
;
954 #ifdef CONFIG_GLUSTERFS_ZEROFILL
955 static coroutine_fn
int qemu_gluster_co_pwrite_zeroes(BlockDriverState
*bs
,
958 BdrvRequestFlags flags
)
962 BDRVGlusterState
*s
= bs
->opaque
;
966 acb
.coroutine
= qemu_coroutine_self();
967 acb
.aio_context
= bdrv_get_aio_context(bs
);
969 ret
= glfs_zerofill_async(s
->fd
, offset
, size
, gluster_finish_aiocb
, &acb
);
974 qemu_coroutine_yield();
978 static inline bool gluster_supports_zerofill(void)
983 static inline int qemu_gluster_zerofill(struct glfs_fd
*fd
, int64_t offset
,
986 return glfs_zerofill(fd
, offset
, size
);
990 static inline bool gluster_supports_zerofill(void)
995 static inline int qemu_gluster_zerofill(struct glfs_fd
*fd
, int64_t offset
,
1002 static int qemu_gluster_create(const char *filename
,
1003 QemuOpts
*opts
, Error
**errp
)
1005 BlockdevOptionsGluster
*gconf
;
1010 int64_t total_size
= 0;
1013 gconf
= g_new0(BlockdevOptionsGluster
, 1);
1014 gconf
->debug
= qemu_opt_get_number_del(opts
, GLUSTER_OPT_DEBUG
,
1015 GLUSTER_DEBUG_DEFAULT
);
1016 if (gconf
->debug
< 0) {
1018 } else if (gconf
->debug
> GLUSTER_DEBUG_MAX
) {
1019 gconf
->debug
= GLUSTER_DEBUG_MAX
;
1021 gconf
->has_debug
= true;
1023 gconf
->logfile
= qemu_opt_get_del(opts
, GLUSTER_OPT_LOGFILE
);
1024 if (!gconf
->logfile
) {
1025 gconf
->logfile
= g_strdup(GLUSTER_LOGFILE_DEFAULT
);
1027 gconf
->has_logfile
= true;
1029 glfs
= qemu_gluster_init(gconf
, filename
, NULL
, errp
);
1035 total_size
= ROUND_UP(qemu_opt_get_size_del(opts
, BLOCK_OPT_SIZE
, 0),
1038 tmp
= qemu_opt_get_del(opts
, BLOCK_OPT_PREALLOC
);
1039 if (!tmp
|| !strcmp(tmp
, "off")) {
1041 } else if (!strcmp(tmp
, "full") && gluster_supports_zerofill()) {
1044 error_setg(errp
, "Invalid preallocation mode: '%s'"
1045 " or GlusterFS doesn't support zerofill API", tmp
);
1050 fd
= glfs_creat(glfs
, gconf
->path
,
1051 O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
| S_IWUSR
);
1055 if (!glfs_ftruncate(fd
, total_size
)) {
1056 if (prealloc
&& qemu_gluster_zerofill(fd
, 0, total_size
)) {
1063 if (glfs_close(fd
) != 0) {
1069 qapi_free_BlockdevOptionsGluster(gconf
);
1070 glfs_clear_preopened(glfs
);
1074 static coroutine_fn
int qemu_gluster_co_rw(BlockDriverState
*bs
,
1075 int64_t sector_num
, int nb_sectors
,
1076 QEMUIOVector
*qiov
, int write
)
1080 BDRVGlusterState
*s
= bs
->opaque
;
1081 size_t size
= nb_sectors
* BDRV_SECTOR_SIZE
;
1082 off_t offset
= sector_num
* BDRV_SECTOR_SIZE
;
1086 acb
.coroutine
= qemu_coroutine_self();
1087 acb
.aio_context
= bdrv_get_aio_context(bs
);
1090 ret
= glfs_pwritev_async(s
->fd
, qiov
->iov
, qiov
->niov
, offset
, 0,
1091 gluster_finish_aiocb
, &acb
);
1093 ret
= glfs_preadv_async(s
->fd
, qiov
->iov
, qiov
->niov
, offset
, 0,
1094 gluster_finish_aiocb
, &acb
);
1101 qemu_coroutine_yield();
1105 static int qemu_gluster_truncate(BlockDriverState
*bs
, int64_t offset
)
1108 BDRVGlusterState
*s
= bs
->opaque
;
1110 ret
= glfs_ftruncate(s
->fd
, offset
);
1118 static coroutine_fn
int qemu_gluster_co_readv(BlockDriverState
*bs
,
1123 return qemu_gluster_co_rw(bs
, sector_num
, nb_sectors
, qiov
, 0);
1126 static coroutine_fn
int qemu_gluster_co_writev(BlockDriverState
*bs
,
1131 return qemu_gluster_co_rw(bs
, sector_num
, nb_sectors
, qiov
, 1);
1134 static void qemu_gluster_close(BlockDriverState
*bs
)
1136 BDRVGlusterState
*s
= bs
->opaque
;
1143 glfs_clear_preopened(s
->glfs
);
1146 static coroutine_fn
int qemu_gluster_co_flush_to_disk(BlockDriverState
*bs
)
1150 BDRVGlusterState
*s
= bs
->opaque
;
1154 acb
.coroutine
= qemu_coroutine_self();
1155 acb
.aio_context
= bdrv_get_aio_context(bs
);
1157 ret
= glfs_fsync_async(s
->fd
, gluster_finish_aiocb
, &acb
);
1163 qemu_coroutine_yield();
1172 /* Some versions of Gluster (3.5.6 -> 3.5.8?) will not retain its cache
1173 * after a fsync failure, so we have no way of allowing the guest to safely
1174 * continue. Gluster versions prior to 3.5.6 don't retain the cache
1175 * either, but will invalidate the fd on error, so this is again our only
1178 * The 'resync-failed-syncs-after-fsync' xlator option for the
1179 * write-behind cache will cause later gluster versions to retain its
1180 * cache after error, so long as the fd remains open. However, we
1181 * currently have no way of knowing if this option is supported.
1183 * TODO: Once gluster provides a way for us to determine if the option
1184 * is supported, bypass the closure and setting drv to NULL. */
1185 qemu_gluster_close(bs
);
1190 #ifdef CONFIG_GLUSTERFS_DISCARD
1191 static coroutine_fn
int qemu_gluster_co_pdiscard(BlockDriverState
*bs
,
1192 int64_t offset
, int size
)
1196 BDRVGlusterState
*s
= bs
->opaque
;
1200 acb
.coroutine
= qemu_coroutine_self();
1201 acb
.aio_context
= bdrv_get_aio_context(bs
);
1203 ret
= glfs_discard_async(s
->fd
, offset
, size
, gluster_finish_aiocb
, &acb
);
1208 qemu_coroutine_yield();
1213 static int64_t qemu_gluster_getlength(BlockDriverState
*bs
)
1215 BDRVGlusterState
*s
= bs
->opaque
;
1218 ret
= glfs_lseek(s
->fd
, 0, SEEK_END
);
1226 static int64_t qemu_gluster_allocated_file_size(BlockDriverState
*bs
)
1228 BDRVGlusterState
*s
= bs
->opaque
;
1232 ret
= glfs_fstat(s
->fd
, &st
);
1236 return st
.st_blocks
* 512;
1240 static int qemu_gluster_has_zero_init(BlockDriverState
*bs
)
1242 /* GlusterFS volume could be backed by a block device */
1247 * Find allocation range in @bs around offset @start.
1248 * May change underlying file descriptor's file offset.
1249 * If @start is not in a hole, store @start in @data, and the
1250 * beginning of the next hole in @hole, and return 0.
1251 * If @start is in a non-trailing hole, store @start in @hole and the
1252 * beginning of the next non-hole in @data, and return 0.
1253 * If @start is in a trailing hole or beyond EOF, return -ENXIO.
1254 * If we can't find out, return a negative errno other than -ENXIO.
1256 * (Shamefully copied from file-posix.c, only miniscule adaptions.)
1258 static int find_allocation(BlockDriverState
*bs
, off_t start
,
1259 off_t
*data
, off_t
*hole
)
1261 BDRVGlusterState
*s
= bs
->opaque
;
1263 if (!s
->supports_seek_data
) {
1267 #if defined SEEK_HOLE && defined SEEK_DATA
1272 * D1. offs == start: start is in data
1273 * D2. offs > start: start is in a hole, next data at offs
1274 * D3. offs < 0, errno = ENXIO: either start is in a trailing hole
1275 * or start is beyond EOF
1276 * If the latter happens, the file has been truncated behind
1277 * our back since we opened it. All bets are off then.
1278 * Treating like a trailing hole is simplest.
1279 * D4. offs < 0, errno != ENXIO: we learned nothing
1281 offs
= glfs_lseek(s
->fd
, start
, SEEK_DATA
);
1283 return -errno
; /* D3 or D4 */
1285 assert(offs
>= start
);
1288 /* D2: in hole, next data at offs */
1294 /* D1: in data, end not yet known */
1298 * H1. offs == start: start is in a hole
1299 * If this happens here, a hole has been dug behind our back
1300 * since the previous lseek().
1301 * H2. offs > start: either start is in data, next hole at offs,
1302 * or start is in trailing hole, EOF at offs
1303 * Linux treats trailing holes like any other hole: offs ==
1304 * start. Solaris seeks to EOF instead: offs > start (blech).
1305 * If that happens here, a hole has been dug behind our back
1306 * since the previous lseek().
1307 * H3. offs < 0, errno = ENXIO: start is beyond EOF
1308 * If this happens, the file has been truncated behind our
1309 * back since we opened it. Treat it like a trailing hole.
1310 * H4. offs < 0, errno != ENXIO: we learned nothing
1311 * Pretend we know nothing at all, i.e. "forget" about D1.
1313 offs
= glfs_lseek(s
->fd
, start
, SEEK_HOLE
);
1315 return -errno
; /* D1 and (H3 or H4) */
1317 assert(offs
>= start
);
1321 * D1 and H2: either in data, next hole at offs, or it was in
1322 * data but is now in a trailing hole. In the latter case,
1323 * all bets are off. Treating it as if it there was data all
1324 * the way to EOF is safe, so simply do that.
1340 * Returns the allocation status of the specified sectors.
1342 * If 'sector_num' is beyond the end of the disk image the return value is 0
1343 * and 'pnum' is set to 0.
1345 * 'pnum' is set to the number of sectors (including and immediately following
1346 * the specified sector) that are known to be in the same
1347 * allocated/unallocated state.
1349 * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
1350 * beyond the end of the disk image it will be clamped.
1352 * (Based on raw_co_get_block_status() from file-posix.c.)
1354 static int64_t coroutine_fn
qemu_gluster_co_get_block_status(
1355 BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
, int *pnum
,
1356 BlockDriverState
**file
)
1358 BDRVGlusterState
*s
= bs
->opaque
;
1359 off_t start
, data
= 0, hole
= 0;
1367 start
= sector_num
* BDRV_SECTOR_SIZE
;
1368 total_size
= bdrv_getlength(bs
);
1369 if (total_size
< 0) {
1371 } else if (start
>= total_size
) {
1374 } else if (start
+ nb_sectors
* BDRV_SECTOR_SIZE
> total_size
) {
1375 nb_sectors
= DIV_ROUND_UP(total_size
- start
, BDRV_SECTOR_SIZE
);
1378 ret
= find_allocation(bs
, start
, &data
, &hole
);
1379 if (ret
== -ENXIO
) {
1382 ret
= BDRV_BLOCK_ZERO
;
1383 } else if (ret
< 0) {
1384 /* No info available, so pretend there are no holes */
1386 ret
= BDRV_BLOCK_DATA
;
1387 } else if (data
== start
) {
1388 /* On a data extent, compute sectors to the end of the extent,
1389 * possibly including a partial sector at EOF. */
1390 *pnum
= MIN(nb_sectors
, DIV_ROUND_UP(hole
- start
, BDRV_SECTOR_SIZE
));
1391 ret
= BDRV_BLOCK_DATA
;
1393 /* On a hole, compute sectors to the beginning of the next extent. */
1394 assert(hole
== start
);
1395 *pnum
= MIN(nb_sectors
, (data
- start
) / BDRV_SECTOR_SIZE
);
1396 ret
= BDRV_BLOCK_ZERO
;
1401 return ret
| BDRV_BLOCK_OFFSET_VALID
| start
;
1405 static BlockDriver bdrv_gluster
= {
1406 .format_name
= "gluster",
1407 .protocol_name
= "gluster",
1408 .instance_size
= sizeof(BDRVGlusterState
),
1409 .bdrv_needs_filename
= false,
1410 .bdrv_file_open
= qemu_gluster_open
,
1411 .bdrv_reopen_prepare
= qemu_gluster_reopen_prepare
,
1412 .bdrv_reopen_commit
= qemu_gluster_reopen_commit
,
1413 .bdrv_reopen_abort
= qemu_gluster_reopen_abort
,
1414 .bdrv_close
= qemu_gluster_close
,
1415 .bdrv_create
= qemu_gluster_create
,
1416 .bdrv_getlength
= qemu_gluster_getlength
,
1417 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
1418 .bdrv_truncate
= qemu_gluster_truncate
,
1419 .bdrv_co_readv
= qemu_gluster_co_readv
,
1420 .bdrv_co_writev
= qemu_gluster_co_writev
,
1421 .bdrv_co_flush_to_disk
= qemu_gluster_co_flush_to_disk
,
1422 .bdrv_has_zero_init
= qemu_gluster_has_zero_init
,
1423 #ifdef CONFIG_GLUSTERFS_DISCARD
1424 .bdrv_co_pdiscard
= qemu_gluster_co_pdiscard
,
1426 #ifdef CONFIG_GLUSTERFS_ZEROFILL
1427 .bdrv_co_pwrite_zeroes
= qemu_gluster_co_pwrite_zeroes
,
1429 .bdrv_co_get_block_status
= qemu_gluster_co_get_block_status
,
1430 .create_opts
= &qemu_gluster_create_opts
,
1433 static BlockDriver bdrv_gluster_tcp
= {
1434 .format_name
= "gluster",
1435 .protocol_name
= "gluster+tcp",
1436 .instance_size
= sizeof(BDRVGlusterState
),
1437 .bdrv_needs_filename
= false,
1438 .bdrv_file_open
= qemu_gluster_open
,
1439 .bdrv_reopen_prepare
= qemu_gluster_reopen_prepare
,
1440 .bdrv_reopen_commit
= qemu_gluster_reopen_commit
,
1441 .bdrv_reopen_abort
= qemu_gluster_reopen_abort
,
1442 .bdrv_close
= qemu_gluster_close
,
1443 .bdrv_create
= qemu_gluster_create
,
1444 .bdrv_getlength
= qemu_gluster_getlength
,
1445 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
1446 .bdrv_truncate
= qemu_gluster_truncate
,
1447 .bdrv_co_readv
= qemu_gluster_co_readv
,
1448 .bdrv_co_writev
= qemu_gluster_co_writev
,
1449 .bdrv_co_flush_to_disk
= qemu_gluster_co_flush_to_disk
,
1450 .bdrv_has_zero_init
= qemu_gluster_has_zero_init
,
1451 #ifdef CONFIG_GLUSTERFS_DISCARD
1452 .bdrv_co_pdiscard
= qemu_gluster_co_pdiscard
,
1454 #ifdef CONFIG_GLUSTERFS_ZEROFILL
1455 .bdrv_co_pwrite_zeroes
= qemu_gluster_co_pwrite_zeroes
,
1457 .bdrv_co_get_block_status
= qemu_gluster_co_get_block_status
,
1458 .create_opts
= &qemu_gluster_create_opts
,
1461 static BlockDriver bdrv_gluster_unix
= {
1462 .format_name
= "gluster",
1463 .protocol_name
= "gluster+unix",
1464 .instance_size
= sizeof(BDRVGlusterState
),
1465 .bdrv_needs_filename
= true,
1466 .bdrv_file_open
= qemu_gluster_open
,
1467 .bdrv_reopen_prepare
= qemu_gluster_reopen_prepare
,
1468 .bdrv_reopen_commit
= qemu_gluster_reopen_commit
,
1469 .bdrv_reopen_abort
= qemu_gluster_reopen_abort
,
1470 .bdrv_close
= qemu_gluster_close
,
1471 .bdrv_create
= qemu_gluster_create
,
1472 .bdrv_getlength
= qemu_gluster_getlength
,
1473 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
1474 .bdrv_truncate
= qemu_gluster_truncate
,
1475 .bdrv_co_readv
= qemu_gluster_co_readv
,
1476 .bdrv_co_writev
= qemu_gluster_co_writev
,
1477 .bdrv_co_flush_to_disk
= qemu_gluster_co_flush_to_disk
,
1478 .bdrv_has_zero_init
= qemu_gluster_has_zero_init
,
1479 #ifdef CONFIG_GLUSTERFS_DISCARD
1480 .bdrv_co_pdiscard
= qemu_gluster_co_pdiscard
,
1482 #ifdef CONFIG_GLUSTERFS_ZEROFILL
1483 .bdrv_co_pwrite_zeroes
= qemu_gluster_co_pwrite_zeroes
,
1485 .bdrv_co_get_block_status
= qemu_gluster_co_get_block_status
,
1486 .create_opts
= &qemu_gluster_create_opts
,
1489 /* rdma is deprecated (actually never supported for volfile fetch).
1490 * Let's maintain it for the protocol compatibility, to make sure things
1491 * won't break immediately. For now, gluster+rdma will fall back to gluster+tcp
1492 * protocol with a warning.
1493 * TODO: remove gluster+rdma interface support
1495 static BlockDriver bdrv_gluster_rdma
= {
1496 .format_name
= "gluster",
1497 .protocol_name
= "gluster+rdma",
1498 .instance_size
= sizeof(BDRVGlusterState
),
1499 .bdrv_needs_filename
= true,
1500 .bdrv_file_open
= qemu_gluster_open
,
1501 .bdrv_reopen_prepare
= qemu_gluster_reopen_prepare
,
1502 .bdrv_reopen_commit
= qemu_gluster_reopen_commit
,
1503 .bdrv_reopen_abort
= qemu_gluster_reopen_abort
,
1504 .bdrv_close
= qemu_gluster_close
,
1505 .bdrv_create
= qemu_gluster_create
,
1506 .bdrv_getlength
= qemu_gluster_getlength
,
1507 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
1508 .bdrv_truncate
= qemu_gluster_truncate
,
1509 .bdrv_co_readv
= qemu_gluster_co_readv
,
1510 .bdrv_co_writev
= qemu_gluster_co_writev
,
1511 .bdrv_co_flush_to_disk
= qemu_gluster_co_flush_to_disk
,
1512 .bdrv_has_zero_init
= qemu_gluster_has_zero_init
,
1513 #ifdef CONFIG_GLUSTERFS_DISCARD
1514 .bdrv_co_pdiscard
= qemu_gluster_co_pdiscard
,
1516 #ifdef CONFIG_GLUSTERFS_ZEROFILL
1517 .bdrv_co_pwrite_zeroes
= qemu_gluster_co_pwrite_zeroes
,
1519 .bdrv_co_get_block_status
= qemu_gluster_co_get_block_status
,
1520 .create_opts
= &qemu_gluster_create_opts
,
1523 static void bdrv_gluster_init(void)
1525 bdrv_register(&bdrv_gluster_rdma
);
1526 bdrv_register(&bdrv_gluster_unix
);
1527 bdrv_register(&bdrv_gluster_tcp
);
1528 bdrv_register(&bdrv_gluster
);
1531 block_init(bdrv_gluster_init
);