2 * GlusterFS backend for QEMU
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
6 * Pipe handling mechanism in AIO implementation is derived from
9 * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
10 * Josh Durgin <josh.durgin@dreamhost.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
15 * Contributions after 2012-01-13 are licensed under the terms of the
16 * GNU GPL, version 2 or (at your option) any later version.
18 #include <glusterfs/api/glfs.h>
19 #include "block/block_int.h"
20 #include "qemu/sockets.h"
23 typedef struct GlusterAIOCB
{
24 BlockDriverAIOCB common
;
31 typedef struct BDRVGlusterState
{
37 GlusterAIOCB
*event_acb
;
40 #define GLUSTER_FD_READ 0
41 #define GLUSTER_FD_WRITE 1
43 typedef struct GlusterConf
{
51 static void qemu_gluster_gconf_free(GlusterConf
*gconf
)
53 g_free(gconf
->server
);
54 g_free(gconf
->volname
);
56 g_free(gconf
->transport
);
60 static int parse_volume_options(GlusterConf
*gconf
, char *path
)
69 p
= q
= path
+ strspn(path
, "/");
74 gconf
->volname
= g_strndup(q
, p
- q
);
81 gconf
->image
= g_strdup(p
);
86 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
88 * 'gluster' is the protocol.
90 * 'transport' specifies the transport type used to connect to gluster
91 * management daemon (glusterd). Valid transport types are
92 * tcp, unix and rdma. If a transport type isn't specified, then tcp
95 * 'server' specifies the server where the volume file specification for
96 * the given volume resides. This can be either hostname, ipv4 address
97 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
98 * If transport type is 'unix', then 'server' field should not be specifed.
99 * The 'socket' field needs to be populated with the path to unix domain
102 * 'port' is the port number on which glusterd is listening. This is optional
103 * and if not specified, QEMU will send 0 which will make gluster to use the
104 * default port. If the transport type is unix, then 'port' should not be
107 * 'volname' is the name of the gluster volume which contains the VM image.
109 * 'image' is the path to the actual VM image that resides on gluster volume.
113 * file=gluster://1.2.3.4/testvol/a.img
114 * file=gluster+tcp://1.2.3.4/testvol/a.img
115 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
116 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
117 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
118 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
119 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
120 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
122 static int qemu_gluster_parseuri(GlusterConf
*gconf
, const char *filename
)
125 QueryParams
*qp
= NULL
;
126 bool is_unix
= false;
129 uri
= uri_parse(filename
);
135 if (!strcmp(uri
->scheme
, "gluster")) {
136 gconf
->transport
= g_strdup("tcp");
137 } else if (!strcmp(uri
->scheme
, "gluster+tcp")) {
138 gconf
->transport
= g_strdup("tcp");
139 } else if (!strcmp(uri
->scheme
, "gluster+unix")) {
140 gconf
->transport
= g_strdup("unix");
142 } else if (!strcmp(uri
->scheme
, "gluster+rdma")) {
143 gconf
->transport
= g_strdup("rdma");
149 ret
= parse_volume_options(gconf
, uri
->path
);
154 qp
= query_params_parse(uri
->query
);
155 if (qp
->n
> 1 || (is_unix
&& !qp
->n
) || (!is_unix
&& qp
->n
)) {
161 if (uri
->server
|| uri
->port
) {
165 if (strcmp(qp
->p
[0].name
, "socket")) {
169 gconf
->server
= g_strdup(qp
->p
[0].value
);
171 gconf
->server
= g_strdup(uri
->server
);
172 gconf
->port
= uri
->port
;
177 query_params_free(qp
);
183 static struct glfs
*qemu_gluster_init(GlusterConf
*gconf
, const char *filename
)
185 struct glfs
*glfs
= NULL
;
189 ret
= qemu_gluster_parseuri(gconf
, filename
);
191 error_report("Usage: file=gluster[+transport]://[server[:port]]/"
192 "volname/image[?socket=...]");
197 glfs
= glfs_new(gconf
->volname
);
202 ret
= glfs_set_volfile_server(glfs
, gconf
->transport
, gconf
->server
,
209 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
210 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
212 ret
= glfs_set_logging(glfs
, "-", 4);
217 ret
= glfs_init(glfs
);
219 error_report("Gluster connection failed for server=%s port=%d "
220 "volume=%s image=%s transport=%s", gconf
->server
, gconf
->port
,
221 gconf
->volname
, gconf
->image
, gconf
->transport
);
235 static void qemu_gluster_complete_aio(GlusterAIOCB
*acb
, BDRVGlusterState
*s
)
238 bool *finished
= acb
->finished
;
239 BlockDriverCompletionFunc
*cb
= acb
->common
.cb
;
240 void *opaque
= acb
->common
.opaque
;
242 if (!acb
->ret
|| acb
->ret
== acb
->size
) {
243 ret
= 0; /* Success */
244 } else if (acb
->ret
< 0) {
245 ret
= acb
->ret
; /* Read/Write failed */
247 ret
= -EIO
; /* Partial read/write - fail it */
251 qemu_aio_release(acb
);
258 static void qemu_gluster_aio_event_reader(void *opaque
)
260 BDRVGlusterState
*s
= opaque
;
264 char *p
= (char *)&s
->event_acb
;
266 ret
= read(s
->fds
[GLUSTER_FD_READ
], p
+ s
->event_reader_pos
,
267 sizeof(s
->event_acb
) - s
->event_reader_pos
);
269 s
->event_reader_pos
+= ret
;
270 if (s
->event_reader_pos
== sizeof(s
->event_acb
)) {
271 s
->event_reader_pos
= 0;
272 qemu_gluster_complete_aio(s
->event_acb
, s
);
275 } while (ret
< 0 && errno
== EINTR
);
278 static int qemu_gluster_aio_flush_cb(void *opaque
)
280 BDRVGlusterState
*s
= opaque
;
282 return (s
->qemu_aio_count
> 0);
285 /* TODO Convert to fine grained options */
286 static QemuOptsList runtime_opts
= {
288 .head
= QTAILQ_HEAD_INITIALIZER(runtime_opts
.head
),
292 .type
= QEMU_OPT_STRING
,
293 .help
= "URL to the gluster image",
295 { /* end of list */ }
299 static int qemu_gluster_open(BlockDriverState
*bs
, QDict
*options
,
302 BDRVGlusterState
*s
= bs
->opaque
;
303 int open_flags
= O_BINARY
;
305 GlusterConf
*gconf
= g_malloc0(sizeof(GlusterConf
));
307 Error
*local_err
= NULL
;
308 const char *filename
;
310 opts
= qemu_opts_create_nofail(&runtime_opts
);
311 qemu_opts_absorb_qdict(opts
, options
, &local_err
);
312 if (error_is_set(&local_err
)) {
313 qerror_report_err(local_err
);
314 error_free(local_err
);
319 filename
= qemu_opt_get(opts
, "filename");
322 s
->glfs
= qemu_gluster_init(gconf
, filename
);
328 if (bdrv_flags
& BDRV_O_RDWR
) {
329 open_flags
|= O_RDWR
;
331 open_flags
|= O_RDONLY
;
334 if ((bdrv_flags
& BDRV_O_NOCACHE
)) {
335 open_flags
|= O_DIRECT
;
338 s
->fd
= glfs_open(s
->glfs
, gconf
->image
, open_flags
);
344 ret
= qemu_pipe(s
->fds
);
349 fcntl(s
->fds
[GLUSTER_FD_READ
], F_SETFL
, O_NONBLOCK
);
350 qemu_aio_set_fd_handler(s
->fds
[GLUSTER_FD_READ
],
351 qemu_gluster_aio_event_reader
, NULL
, qemu_gluster_aio_flush_cb
, s
);
355 qemu_gluster_gconf_free(gconf
);
368 static int qemu_gluster_create(const char *filename
,
369 QEMUOptionParameter
*options
)
374 int64_t total_size
= 0;
375 GlusterConf
*gconf
= g_malloc0(sizeof(GlusterConf
));
377 glfs
= qemu_gluster_init(gconf
, filename
);
383 while (options
&& options
->name
) {
384 if (!strcmp(options
->name
, BLOCK_OPT_SIZE
)) {
385 total_size
= options
->value
.n
/ BDRV_SECTOR_SIZE
;
390 fd
= glfs_creat(glfs
, gconf
->image
,
391 O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
| S_IWUSR
);
395 if (glfs_ftruncate(fd
, total_size
* BDRV_SECTOR_SIZE
) != 0) {
398 if (glfs_close(fd
) != 0) {
403 qemu_gluster_gconf_free(gconf
);
410 static void qemu_gluster_aio_cancel(BlockDriverAIOCB
*blockacb
)
412 GlusterAIOCB
*acb
= (GlusterAIOCB
*)blockacb
;
413 bool finished
= false;
415 acb
->finished
= &finished
;
421 static const AIOCBInfo gluster_aiocb_info
= {
422 .aiocb_size
= sizeof(GlusterAIOCB
),
423 .cancel
= qemu_gluster_aio_cancel
,
426 static void gluster_finish_aiocb(struct glfs_fd
*fd
, ssize_t ret
, void *arg
)
428 GlusterAIOCB
*acb
= (GlusterAIOCB
*)arg
;
429 BlockDriverState
*bs
= acb
->common
.bs
;
430 BDRVGlusterState
*s
= bs
->opaque
;
434 retval
= qemu_write_full(s
->fds
[GLUSTER_FD_WRITE
], &acb
, sizeof(acb
));
435 if (retval
!= sizeof(acb
)) {
437 * Gluster AIO callback thread failed to notify the waiting
438 * QEMU thread about IO completion.
440 * Complete this IO request and make the disk inaccessible for
441 * subsequent reads and writes.
443 error_report("Gluster failed to notify QEMU about IO completion");
445 qemu_mutex_lock_iothread(); /* We are in gluster thread context */
446 acb
->common
.cb(acb
->common
.opaque
, -EIO
);
447 qemu_aio_release(acb
);
449 close(s
->fds
[GLUSTER_FD_READ
]);
450 close(s
->fds
[GLUSTER_FD_WRITE
]);
451 qemu_aio_set_fd_handler(s
->fds
[GLUSTER_FD_READ
], NULL
, NULL
, NULL
,
453 bs
->drv
= NULL
; /* Make the disk inaccessible */
454 qemu_mutex_unlock_iothread();
458 static BlockDriverAIOCB
*qemu_gluster_aio_rw(BlockDriverState
*bs
,
459 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
460 BlockDriverCompletionFunc
*cb
, void *opaque
, int write
)
464 BDRVGlusterState
*s
= bs
->opaque
;
468 offset
= sector_num
* BDRV_SECTOR_SIZE
;
469 size
= nb_sectors
* BDRV_SECTOR_SIZE
;
472 acb
= qemu_aio_get(&gluster_aiocb_info
, bs
, cb
, opaque
);
475 acb
->finished
= NULL
;
478 ret
= glfs_pwritev_async(s
->fd
, qiov
->iov
, qiov
->niov
, offset
, 0,
479 &gluster_finish_aiocb
, acb
);
481 ret
= glfs_preadv_async(s
->fd
, qiov
->iov
, qiov
->niov
, offset
, 0,
482 &gluster_finish_aiocb
, acb
);
492 qemu_aio_release(acb
);
496 static BlockDriverAIOCB
*qemu_gluster_aio_readv(BlockDriverState
*bs
,
497 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
498 BlockDriverCompletionFunc
*cb
, void *opaque
)
500 return qemu_gluster_aio_rw(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
503 static BlockDriverAIOCB
*qemu_gluster_aio_writev(BlockDriverState
*bs
,
504 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
505 BlockDriverCompletionFunc
*cb
, void *opaque
)
507 return qemu_gluster_aio_rw(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
510 static BlockDriverAIOCB
*qemu_gluster_aio_flush(BlockDriverState
*bs
,
511 BlockDriverCompletionFunc
*cb
, void *opaque
)
515 BDRVGlusterState
*s
= bs
->opaque
;
517 acb
= qemu_aio_get(&gluster_aiocb_info
, bs
, cb
, opaque
);
520 acb
->finished
= NULL
;
523 ret
= glfs_fsync_async(s
->fd
, &gluster_finish_aiocb
, acb
);
531 qemu_aio_release(acb
);
535 static int64_t qemu_gluster_getlength(BlockDriverState
*bs
)
537 BDRVGlusterState
*s
= bs
->opaque
;
540 ret
= glfs_lseek(s
->fd
, 0, SEEK_END
);
548 static int64_t qemu_gluster_allocated_file_size(BlockDriverState
*bs
)
550 BDRVGlusterState
*s
= bs
->opaque
;
554 ret
= glfs_fstat(s
->fd
, &st
);
558 return st
.st_blocks
* 512;
562 static void qemu_gluster_close(BlockDriverState
*bs
)
564 BDRVGlusterState
*s
= bs
->opaque
;
566 close(s
->fds
[GLUSTER_FD_READ
]);
567 close(s
->fds
[GLUSTER_FD_WRITE
]);
568 qemu_aio_set_fd_handler(s
->fds
[GLUSTER_FD_READ
], NULL
, NULL
, NULL
, NULL
);
577 static QEMUOptionParameter qemu_gluster_create_options
[] = {
579 .name
= BLOCK_OPT_SIZE
,
581 .help
= "Virtual disk size"
586 static BlockDriver bdrv_gluster
= {
587 .format_name
= "gluster",
588 .protocol_name
= "gluster",
589 .instance_size
= sizeof(BDRVGlusterState
),
590 .bdrv_file_open
= qemu_gluster_open
,
591 .bdrv_close
= qemu_gluster_close
,
592 .bdrv_create
= qemu_gluster_create
,
593 .bdrv_getlength
= qemu_gluster_getlength
,
594 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
595 .bdrv_aio_readv
= qemu_gluster_aio_readv
,
596 .bdrv_aio_writev
= qemu_gluster_aio_writev
,
597 .bdrv_aio_flush
= qemu_gluster_aio_flush
,
598 .create_options
= qemu_gluster_create_options
,
601 static BlockDriver bdrv_gluster_tcp
= {
602 .format_name
= "gluster",
603 .protocol_name
= "gluster+tcp",
604 .instance_size
= sizeof(BDRVGlusterState
),
605 .bdrv_file_open
= qemu_gluster_open
,
606 .bdrv_close
= qemu_gluster_close
,
607 .bdrv_create
= qemu_gluster_create
,
608 .bdrv_getlength
= qemu_gluster_getlength
,
609 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
610 .bdrv_aio_readv
= qemu_gluster_aio_readv
,
611 .bdrv_aio_writev
= qemu_gluster_aio_writev
,
612 .bdrv_aio_flush
= qemu_gluster_aio_flush
,
613 .create_options
= qemu_gluster_create_options
,
616 static BlockDriver bdrv_gluster_unix
= {
617 .format_name
= "gluster",
618 .protocol_name
= "gluster+unix",
619 .instance_size
= sizeof(BDRVGlusterState
),
620 .bdrv_file_open
= qemu_gluster_open
,
621 .bdrv_close
= qemu_gluster_close
,
622 .bdrv_create
= qemu_gluster_create
,
623 .bdrv_getlength
= qemu_gluster_getlength
,
624 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
625 .bdrv_aio_readv
= qemu_gluster_aio_readv
,
626 .bdrv_aio_writev
= qemu_gluster_aio_writev
,
627 .bdrv_aio_flush
= qemu_gluster_aio_flush
,
628 .create_options
= qemu_gluster_create_options
,
631 static BlockDriver bdrv_gluster_rdma
= {
632 .format_name
= "gluster",
633 .protocol_name
= "gluster+rdma",
634 .instance_size
= sizeof(BDRVGlusterState
),
635 .bdrv_file_open
= qemu_gluster_open
,
636 .bdrv_close
= qemu_gluster_close
,
637 .bdrv_create
= qemu_gluster_create
,
638 .bdrv_getlength
= qemu_gluster_getlength
,
639 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
640 .bdrv_aio_readv
= qemu_gluster_aio_readv
,
641 .bdrv_aio_writev
= qemu_gluster_aio_writev
,
642 .bdrv_aio_flush
= qemu_gluster_aio_flush
,
643 .create_options
= qemu_gluster_create_options
,
646 static void bdrv_gluster_init(void)
648 bdrv_register(&bdrv_gluster_rdma
);
649 bdrv_register(&bdrv_gluster_unix
);
650 bdrv_register(&bdrv_gluster_tcp
);
651 bdrv_register(&bdrv_gluster
);
654 block_init(bdrv_gluster_init
);