2 * GlusterFS backend for QEMU
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
6 * Pipe handling mechanism in AIO implementation is derived from
9 * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
10 * Josh Durgin <josh.durgin@dreamhost.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
15 * Contributions after 2012-01-13 are licensed under the terms of the
16 * GNU GPL, version 2 or (at your option) any later version.
18 #include <glusterfs/api/glfs.h>
19 #include "block_int.h"
20 #include "qemu_socket.h"
23 typedef struct GlusterAIOCB
{
24 BlockDriverAIOCB common
;
31 typedef struct BDRVGlusterState
{
37 GlusterAIOCB
*event_acb
;
40 #define GLUSTER_FD_READ 0
41 #define GLUSTER_FD_WRITE 1
43 typedef struct GlusterConf
{
51 static void qemu_gluster_gconf_free(GlusterConf
*gconf
)
53 g_free(gconf
->server
);
54 g_free(gconf
->volname
);
56 g_free(gconf
->transport
);
60 static int parse_volume_options(GlusterConf
*gconf
, char *path
)
69 p
= q
= path
+ strspn(path
, "/");
74 gconf
->volname
= g_strndup(q
, p
- q
);
81 gconf
->image
= g_strdup(p
);
86 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
88 * 'gluster' is the protocol.
90 * 'transport' specifies the transport type used to connect to gluster
91 * management daemon (glusterd). Valid transport types are
92 * tcp, unix and rdma. If a transport type isn't specified, then tcp
95 * 'server' specifies the server where the volume file specification for
96 * the given volume resides. This can be either hostname, ipv4 address
97 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
98 * If transport type is 'unix', then 'server' field should not be specifed.
99 * The 'socket' field needs to be populated with the path to unix domain
102 * 'port' is the port number on which glusterd is listening. This is optional
103 * and if not specified, QEMU will send 0 which will make gluster to use the
104 * default port. If the transport type is unix, then 'port' should not be
107 * 'volname' is the name of the gluster volume which contains the VM image.
109 * 'image' is the path to the actual VM image that resides on gluster volume.
113 * file=gluster://1.2.3.4/testvol/a.img
114 * file=gluster+tcp://1.2.3.4/testvol/a.img
115 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
116 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
117 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
118 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
119 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
120 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
122 static int qemu_gluster_parseuri(GlusterConf
*gconf
, const char *filename
)
125 QueryParams
*qp
= NULL
;
126 bool is_unix
= false;
129 uri
= uri_parse(filename
);
135 if (!strcmp(uri
->scheme
, "gluster")) {
136 gconf
->transport
= g_strdup("tcp");
137 } else if (!strcmp(uri
->scheme
, "gluster+tcp")) {
138 gconf
->transport
= g_strdup("tcp");
139 } else if (!strcmp(uri
->scheme
, "gluster+unix")) {
140 gconf
->transport
= g_strdup("unix");
142 } else if (!strcmp(uri
->scheme
, "gluster+rdma")) {
143 gconf
->transport
= g_strdup("rdma");
149 ret
= parse_volume_options(gconf
, uri
->path
);
154 qp
= query_params_parse(uri
->query
);
155 if (qp
->n
> 1 || (is_unix
&& !qp
->n
) || (!is_unix
&& qp
->n
)) {
161 if (uri
->server
|| uri
->port
) {
165 if (strcmp(qp
->p
[0].name
, "socket")) {
169 gconf
->server
= g_strdup(qp
->p
[0].value
);
171 gconf
->server
= g_strdup(uri
->server
);
172 gconf
->port
= uri
->port
;
177 query_params_free(qp
);
183 static struct glfs
*qemu_gluster_init(GlusterConf
*gconf
, const char *filename
)
185 struct glfs
*glfs
= NULL
;
189 ret
= qemu_gluster_parseuri(gconf
, filename
);
191 error_report("Usage: file=gluster[+transport]://[server[:port]]/"
192 "volname/image[?socket=...]");
197 glfs
= glfs_new(gconf
->volname
);
202 ret
= glfs_set_volfile_server(glfs
, gconf
->transport
, gconf
->server
,
209 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
210 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
212 ret
= glfs_set_logging(glfs
, "-", 4);
217 ret
= glfs_init(glfs
);
219 error_report("Gluster connection failed for server=%s port=%d "
220 "volume=%s image=%s transport=%s\n", gconf
->server
, gconf
->port
,
221 gconf
->volname
, gconf
->image
, gconf
->transport
);
235 static void qemu_gluster_complete_aio(GlusterAIOCB
*acb
, BDRVGlusterState
*s
)
238 bool *finished
= acb
->finished
;
239 BlockDriverCompletionFunc
*cb
= acb
->common
.cb
;
240 void *opaque
= acb
->common
.opaque
;
242 if (!acb
->ret
|| acb
->ret
== acb
->size
) {
243 ret
= 0; /* Success */
244 } else if (acb
->ret
< 0) {
245 ret
= acb
->ret
; /* Read/Write failed */
247 ret
= -EIO
; /* Partial read/write - fail it */
251 qemu_aio_release(acb
);
258 static void qemu_gluster_aio_event_reader(void *opaque
)
260 BDRVGlusterState
*s
= opaque
;
264 char *p
= (char *)&s
->event_acb
;
266 ret
= read(s
->fds
[GLUSTER_FD_READ
], p
+ s
->event_reader_pos
,
267 sizeof(s
->event_acb
) - s
->event_reader_pos
);
269 s
->event_reader_pos
+= ret
;
270 if (s
->event_reader_pos
== sizeof(s
->event_acb
)) {
271 s
->event_reader_pos
= 0;
272 qemu_gluster_complete_aio(s
->event_acb
, s
);
275 } while (ret
< 0 && errno
== EINTR
);
278 static int qemu_gluster_aio_flush_cb(void *opaque
)
280 BDRVGlusterState
*s
= opaque
;
282 return (s
->qemu_aio_count
> 0);
285 static int qemu_gluster_open(BlockDriverState
*bs
, const char *filename
,
288 BDRVGlusterState
*s
= bs
->opaque
;
289 int open_flags
= O_BINARY
;
291 GlusterConf
*gconf
= g_malloc0(sizeof(GlusterConf
));
293 s
->glfs
= qemu_gluster_init(gconf
, filename
);
299 if (bdrv_flags
& BDRV_O_RDWR
) {
300 open_flags
|= O_RDWR
;
302 open_flags
|= O_RDONLY
;
305 if ((bdrv_flags
& BDRV_O_NOCACHE
)) {
306 open_flags
|= O_DIRECT
;
309 s
->fd
= glfs_open(s
->glfs
, gconf
->image
, open_flags
);
315 ret
= qemu_pipe(s
->fds
);
320 fcntl(s
->fds
[GLUSTER_FD_READ
], F_SETFL
, O_NONBLOCK
);
321 qemu_aio_set_fd_handler(s
->fds
[GLUSTER_FD_READ
],
322 qemu_gluster_aio_event_reader
, NULL
, qemu_gluster_aio_flush_cb
, s
);
325 qemu_gluster_gconf_free(gconf
);
338 static int qemu_gluster_create(const char *filename
,
339 QEMUOptionParameter
*options
)
344 int64_t total_size
= 0;
345 GlusterConf
*gconf
= g_malloc0(sizeof(GlusterConf
));
347 glfs
= qemu_gluster_init(gconf
, filename
);
353 while (options
&& options
->name
) {
354 if (!strcmp(options
->name
, BLOCK_OPT_SIZE
)) {
355 total_size
= options
->value
.n
/ BDRV_SECTOR_SIZE
;
360 fd
= glfs_creat(glfs
, gconf
->image
,
361 O_WRONLY
| O_CREAT
| O_TRUNC
| O_BINARY
, S_IRUSR
| S_IWUSR
);
365 if (glfs_ftruncate(fd
, total_size
* BDRV_SECTOR_SIZE
) != 0) {
368 if (glfs_close(fd
) != 0) {
373 qemu_gluster_gconf_free(gconf
);
380 static void qemu_gluster_aio_cancel(BlockDriverAIOCB
*blockacb
)
382 GlusterAIOCB
*acb
= (GlusterAIOCB
*)blockacb
;
383 bool finished
= false;
385 acb
->finished
= &finished
;
391 static AIOPool gluster_aio_pool
= {
392 .aiocb_size
= sizeof(GlusterAIOCB
),
393 .cancel
= qemu_gluster_aio_cancel
,
396 static void gluster_finish_aiocb(struct glfs_fd
*fd
, ssize_t ret
, void *arg
)
398 GlusterAIOCB
*acb
= (GlusterAIOCB
*)arg
;
399 BlockDriverState
*bs
= acb
->common
.bs
;
400 BDRVGlusterState
*s
= bs
->opaque
;
404 retval
= qemu_write_full(s
->fds
[GLUSTER_FD_WRITE
], &acb
, sizeof(acb
));
405 if (retval
!= sizeof(acb
)) {
407 * Gluster AIO callback thread failed to notify the waiting
408 * QEMU thread about IO completion.
410 * Complete this IO request and make the disk inaccessible for
411 * subsequent reads and writes.
413 error_report("Gluster failed to notify QEMU about IO completion");
415 qemu_mutex_lock_iothread(); /* We are in gluster thread context */
416 acb
->common
.cb(acb
->common
.opaque
, -EIO
);
417 qemu_aio_release(acb
);
419 close(s
->fds
[GLUSTER_FD_READ
]);
420 close(s
->fds
[GLUSTER_FD_WRITE
]);
421 qemu_aio_set_fd_handler(s
->fds
[GLUSTER_FD_READ
], NULL
, NULL
, NULL
,
423 bs
->drv
= NULL
; /* Make the disk inaccessible */
424 qemu_mutex_unlock_iothread();
428 static BlockDriverAIOCB
*qemu_gluster_aio_rw(BlockDriverState
*bs
,
429 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
430 BlockDriverCompletionFunc
*cb
, void *opaque
, int write
)
434 BDRVGlusterState
*s
= bs
->opaque
;
438 offset
= sector_num
* BDRV_SECTOR_SIZE
;
439 size
= nb_sectors
* BDRV_SECTOR_SIZE
;
442 acb
= qemu_aio_get(&gluster_aio_pool
, bs
, cb
, opaque
);
445 acb
->finished
= NULL
;
448 ret
= glfs_pwritev_async(s
->fd
, qiov
->iov
, qiov
->niov
, offset
, 0,
449 &gluster_finish_aiocb
, acb
);
451 ret
= glfs_preadv_async(s
->fd
, qiov
->iov
, qiov
->niov
, offset
, 0,
452 &gluster_finish_aiocb
, acb
);
462 qemu_aio_release(acb
);
466 static BlockDriverAIOCB
*qemu_gluster_aio_readv(BlockDriverState
*bs
,
467 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
468 BlockDriverCompletionFunc
*cb
, void *opaque
)
470 return qemu_gluster_aio_rw(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 0);
473 static BlockDriverAIOCB
*qemu_gluster_aio_writev(BlockDriverState
*bs
,
474 int64_t sector_num
, QEMUIOVector
*qiov
, int nb_sectors
,
475 BlockDriverCompletionFunc
*cb
, void *opaque
)
477 return qemu_gluster_aio_rw(bs
, sector_num
, qiov
, nb_sectors
, cb
, opaque
, 1);
480 static BlockDriverAIOCB
*qemu_gluster_aio_flush(BlockDriverState
*bs
,
481 BlockDriverCompletionFunc
*cb
, void *opaque
)
485 BDRVGlusterState
*s
= bs
->opaque
;
487 acb
= qemu_aio_get(&gluster_aio_pool
, bs
, cb
, opaque
);
490 acb
->finished
= NULL
;
493 ret
= glfs_fsync_async(s
->fd
, &gluster_finish_aiocb
, acb
);
501 qemu_aio_release(acb
);
505 static int64_t qemu_gluster_getlength(BlockDriverState
*bs
)
507 BDRVGlusterState
*s
= bs
->opaque
;
510 ret
= glfs_lseek(s
->fd
, 0, SEEK_END
);
518 static int64_t qemu_gluster_allocated_file_size(BlockDriverState
*bs
)
520 BDRVGlusterState
*s
= bs
->opaque
;
524 ret
= glfs_fstat(s
->fd
, &st
);
528 return st
.st_blocks
* 512;
532 static void qemu_gluster_close(BlockDriverState
*bs
)
534 BDRVGlusterState
*s
= bs
->opaque
;
536 close(s
->fds
[GLUSTER_FD_READ
]);
537 close(s
->fds
[GLUSTER_FD_WRITE
]);
538 qemu_aio_set_fd_handler(s
->fds
[GLUSTER_FD_READ
], NULL
, NULL
, NULL
, NULL
);
547 static QEMUOptionParameter qemu_gluster_create_options
[] = {
549 .name
= BLOCK_OPT_SIZE
,
551 .help
= "Virtual disk size"
556 static BlockDriver bdrv_gluster
= {
557 .format_name
= "gluster",
558 .protocol_name
= "gluster",
559 .instance_size
= sizeof(BDRVGlusterState
),
560 .bdrv_file_open
= qemu_gluster_open
,
561 .bdrv_close
= qemu_gluster_close
,
562 .bdrv_create
= qemu_gluster_create
,
563 .bdrv_getlength
= qemu_gluster_getlength
,
564 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
565 .bdrv_aio_readv
= qemu_gluster_aio_readv
,
566 .bdrv_aio_writev
= qemu_gluster_aio_writev
,
567 .bdrv_aio_flush
= qemu_gluster_aio_flush
,
568 .create_options
= qemu_gluster_create_options
,
571 static BlockDriver bdrv_gluster_tcp
= {
572 .format_name
= "gluster",
573 .protocol_name
= "gluster+tcp",
574 .instance_size
= sizeof(BDRVGlusterState
),
575 .bdrv_file_open
= qemu_gluster_open
,
576 .bdrv_close
= qemu_gluster_close
,
577 .bdrv_create
= qemu_gluster_create
,
578 .bdrv_getlength
= qemu_gluster_getlength
,
579 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
580 .bdrv_aio_readv
= qemu_gluster_aio_readv
,
581 .bdrv_aio_writev
= qemu_gluster_aio_writev
,
582 .bdrv_aio_flush
= qemu_gluster_aio_flush
,
583 .create_options
= qemu_gluster_create_options
,
586 static BlockDriver bdrv_gluster_unix
= {
587 .format_name
= "gluster",
588 .protocol_name
= "gluster+unix",
589 .instance_size
= sizeof(BDRVGlusterState
),
590 .bdrv_file_open
= qemu_gluster_open
,
591 .bdrv_close
= qemu_gluster_close
,
592 .bdrv_create
= qemu_gluster_create
,
593 .bdrv_getlength
= qemu_gluster_getlength
,
594 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
595 .bdrv_aio_readv
= qemu_gluster_aio_readv
,
596 .bdrv_aio_writev
= qemu_gluster_aio_writev
,
597 .bdrv_aio_flush
= qemu_gluster_aio_flush
,
598 .create_options
= qemu_gluster_create_options
,
601 static BlockDriver bdrv_gluster_rdma
= {
602 .format_name
= "gluster",
603 .protocol_name
= "gluster+rdma",
604 .instance_size
= sizeof(BDRVGlusterState
),
605 .bdrv_file_open
= qemu_gluster_open
,
606 .bdrv_close
= qemu_gluster_close
,
607 .bdrv_create
= qemu_gluster_create
,
608 .bdrv_getlength
= qemu_gluster_getlength
,
609 .bdrv_get_allocated_file_size
= qemu_gluster_allocated_file_size
,
610 .bdrv_aio_readv
= qemu_gluster_aio_readv
,
611 .bdrv_aio_writev
= qemu_gluster_aio_writev
,
612 .bdrv_aio_flush
= qemu_gluster_aio_flush
,
613 .create_options
= qemu_gluster_create_options
,
616 static void bdrv_gluster_init(void)
618 bdrv_register(&bdrv_gluster_rdma
);
619 bdrv_register(&bdrv_gluster_unix
);
620 bdrv_register(&bdrv_gluster_tcp
);
621 bdrv_register(&bdrv_gluster
);
624 block_init(bdrv_gluster_init
);