quorum: Create quorum.c, add QuorumChildRequest and QuorumAIOCB.
[qemu/cris-port.git] / block / gluster.c
blob14d390b4c7f47946862857242947fa12e3964f4f
1 /*
2 * GlusterFS backend for QEMU
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
6 * Pipe handling mechanism in AIO implementation is derived from
7 * block/rbd.c. Hence,
9 * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
10 * Josh Durgin <josh.durgin@dreamhost.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
15 * Contributions after 2012-01-13 are licensed under the terms of the
16 * GNU GPL, version 2 or (at your option) any later version.
18 #include <glusterfs/api/glfs.h>
19 #include "block/block_int.h"
20 #include "qemu/sockets.h"
21 #include "qemu/uri.h"
23 typedef struct GlusterAIOCB {
24 int64_t size;
25 int ret;
26 QEMUBH *bh;
27 Coroutine *coroutine;
28 } GlusterAIOCB;
30 typedef struct BDRVGlusterState {
31 struct glfs *glfs;
32 struct glfs_fd *fd;
33 } BDRVGlusterState;
35 #define GLUSTER_FD_READ 0
36 #define GLUSTER_FD_WRITE 1
38 typedef struct GlusterConf {
39 char *server;
40 int port;
41 char *volname;
42 char *image;
43 char *transport;
44 } GlusterConf;
46 static void qemu_gluster_gconf_free(GlusterConf *gconf)
48 g_free(gconf->server);
49 g_free(gconf->volname);
50 g_free(gconf->image);
51 g_free(gconf->transport);
52 g_free(gconf);
55 static int parse_volume_options(GlusterConf *gconf, char *path)
57 char *p, *q;
59 if (!path) {
60 return -EINVAL;
63 /* volume */
64 p = q = path + strspn(path, "/");
65 p += strcspn(p, "/");
66 if (*p == '\0') {
67 return -EINVAL;
69 gconf->volname = g_strndup(q, p - q);
71 /* image */
72 p += strspn(p, "/");
73 if (*p == '\0') {
74 return -EINVAL;
76 gconf->image = g_strdup(p);
77 return 0;
81 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
83 * 'gluster' is the protocol.
85 * 'transport' specifies the transport type used to connect to gluster
86 * management daemon (glusterd). Valid transport types are
87 * tcp, unix and rdma. If a transport type isn't specified, then tcp
88 * type is assumed.
90 * 'server' specifies the server where the volume file specification for
91 * the given volume resides. This can be either hostname, ipv4 address
92 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
93 * If transport type is 'unix', then 'server' field should not be specifed.
94 * The 'socket' field needs to be populated with the path to unix domain
95 * socket.
97 * 'port' is the port number on which glusterd is listening. This is optional
98 * and if not specified, QEMU will send 0 which will make gluster to use the
99 * default port. If the transport type is unix, then 'port' should not be
100 * specified.
102 * 'volname' is the name of the gluster volume which contains the VM image.
104 * 'image' is the path to the actual VM image that resides on gluster volume.
106 * Examples:
108 * file=gluster://1.2.3.4/testvol/a.img
109 * file=gluster+tcp://1.2.3.4/testvol/a.img
110 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
111 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
112 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
113 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
114 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
115 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
117 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
119 URI *uri;
120 QueryParams *qp = NULL;
121 bool is_unix = false;
122 int ret = 0;
124 uri = uri_parse(filename);
125 if (!uri) {
126 return -EINVAL;
129 /* transport */
130 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
131 gconf->transport = g_strdup("tcp");
132 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
133 gconf->transport = g_strdup("tcp");
134 } else if (!strcmp(uri->scheme, "gluster+unix")) {
135 gconf->transport = g_strdup("unix");
136 is_unix = true;
137 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
138 gconf->transport = g_strdup("rdma");
139 } else {
140 ret = -EINVAL;
141 goto out;
144 ret = parse_volume_options(gconf, uri->path);
145 if (ret < 0) {
146 goto out;
149 qp = query_params_parse(uri->query);
150 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
151 ret = -EINVAL;
152 goto out;
155 if (is_unix) {
156 if (uri->server || uri->port) {
157 ret = -EINVAL;
158 goto out;
160 if (strcmp(qp->p[0].name, "socket")) {
161 ret = -EINVAL;
162 goto out;
164 gconf->server = g_strdup(qp->p[0].value);
165 } else {
166 gconf->server = g_strdup(uri->server ? uri->server : "localhost");
167 gconf->port = uri->port;
170 out:
171 if (qp) {
172 query_params_free(qp);
174 uri_free(uri);
175 return ret;
178 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
179 Error **errp)
181 struct glfs *glfs = NULL;
182 int ret;
183 int old_errno;
185 ret = qemu_gluster_parseuri(gconf, filename);
186 if (ret < 0) {
187 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
188 "volname/image[?socket=...]");
189 errno = -ret;
190 goto out;
193 glfs = glfs_new(gconf->volname);
194 if (!glfs) {
195 goto out;
198 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
199 gconf->port);
200 if (ret < 0) {
201 goto out;
205 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
206 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
208 ret = glfs_set_logging(glfs, "-", 4);
209 if (ret < 0) {
210 goto out;
213 ret = glfs_init(glfs);
214 if (ret) {
215 error_setg_errno(errp, errno,
216 "Gluster connection failed for server=%s port=%d "
217 "volume=%s image=%s transport=%s", gconf->server,
218 gconf->port, gconf->volname, gconf->image,
219 gconf->transport);
220 goto out;
222 return glfs;
224 out:
225 if (glfs) {
226 old_errno = errno;
227 glfs_fini(glfs);
228 errno = old_errno;
230 return NULL;
233 static void qemu_gluster_complete_aio(void *opaque)
235 GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
237 qemu_bh_delete(acb->bh);
238 acb->bh = NULL;
239 qemu_coroutine_enter(acb->coroutine, NULL);
243 * AIO callback routine called from GlusterFS thread.
245 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
247 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
249 if (!ret || ret == acb->size) {
250 acb->ret = 0; /* Success */
251 } else if (ret < 0) {
252 acb->ret = ret; /* Read/Write failed */
253 } else {
254 acb->ret = -EIO; /* Partial read/write - fail it */
257 acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb);
258 qemu_bh_schedule(acb->bh);
261 /* TODO Convert to fine grained options */
262 static QemuOptsList runtime_opts = {
263 .name = "gluster",
264 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
265 .desc = {
267 .name = "filename",
268 .type = QEMU_OPT_STRING,
269 .help = "URL to the gluster image",
271 { /* end of list */ }
275 static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
276 int bdrv_flags, Error **errp)
278 BDRVGlusterState *s = bs->opaque;
279 int open_flags = O_BINARY;
280 int ret = 0;
281 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
282 QemuOpts *opts;
283 Error *local_err = NULL;
284 const char *filename;
286 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
287 qemu_opts_absorb_qdict(opts, options, &local_err);
288 if (local_err) {
289 error_propagate(errp, local_err);
290 ret = -EINVAL;
291 goto out;
294 filename = qemu_opt_get(opts, "filename");
296 s->glfs = qemu_gluster_init(gconf, filename, errp);
297 if (!s->glfs) {
298 ret = -errno;
299 goto out;
302 if (bdrv_flags & BDRV_O_RDWR) {
303 open_flags |= O_RDWR;
304 } else {
305 open_flags |= O_RDONLY;
308 if ((bdrv_flags & BDRV_O_NOCACHE)) {
309 open_flags |= O_DIRECT;
312 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
313 if (!s->fd) {
314 ret = -errno;
317 out:
318 qemu_opts_del(opts);
319 qemu_gluster_gconf_free(gconf);
320 if (!ret) {
321 return ret;
323 if (s->fd) {
324 glfs_close(s->fd);
326 if (s->glfs) {
327 glfs_fini(s->glfs);
329 return ret;
332 #ifdef CONFIG_GLUSTERFS_ZEROFILL
333 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
334 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
336 int ret;
337 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
338 BDRVGlusterState *s = bs->opaque;
339 off_t size = nb_sectors * BDRV_SECTOR_SIZE;
340 off_t offset = sector_num * BDRV_SECTOR_SIZE;
342 acb->size = size;
343 acb->ret = 0;
344 acb->coroutine = qemu_coroutine_self();
346 ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
347 if (ret < 0) {
348 ret = -errno;
349 goto out;
352 qemu_coroutine_yield();
353 ret = acb->ret;
355 out:
356 g_slice_free(GlusterAIOCB, acb);
357 return ret;
360 static inline bool gluster_supports_zerofill(void)
362 return 1;
365 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
366 int64_t size)
368 return glfs_zerofill(fd, offset, size);
371 #else
372 static inline bool gluster_supports_zerofill(void)
374 return 0;
377 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
378 int64_t size)
380 return 0;
382 #endif
384 static int qemu_gluster_create(const char *filename,
385 QEMUOptionParameter *options, Error **errp)
387 struct glfs *glfs;
388 struct glfs_fd *fd;
389 int ret = 0;
390 int prealloc = 0;
391 int64_t total_size = 0;
392 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
394 glfs = qemu_gluster_init(gconf, filename, errp);
395 if (!glfs) {
396 ret = -EINVAL;
397 goto out;
400 while (options && options->name) {
401 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
402 total_size = options->value.n / BDRV_SECTOR_SIZE;
403 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) {
404 if (!options->value.s || !strcmp(options->value.s, "off")) {
405 prealloc = 0;
406 } else if (!strcmp(options->value.s, "full") &&
407 gluster_supports_zerofill()) {
408 prealloc = 1;
409 } else {
410 error_setg(errp, "Invalid preallocation mode: '%s'"
411 " or GlusterFS doesn't support zerofill API",
412 options->value.s);
413 ret = -EINVAL;
414 goto out;
417 options++;
420 fd = glfs_creat(glfs, gconf->image,
421 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
422 if (!fd) {
423 ret = -errno;
424 } else {
425 if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) {
426 if (prealloc && qemu_gluster_zerofill(fd, 0,
427 total_size * BDRV_SECTOR_SIZE)) {
428 ret = -errno;
430 } else {
431 ret = -errno;
434 if (glfs_close(fd) != 0) {
435 ret = -errno;
438 out:
439 qemu_gluster_gconf_free(gconf);
440 if (glfs) {
441 glfs_fini(glfs);
443 return ret;
446 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
447 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
449 int ret;
450 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
451 BDRVGlusterState *s = bs->opaque;
452 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
453 off_t offset = sector_num * BDRV_SECTOR_SIZE;
455 acb->size = size;
456 acb->ret = 0;
457 acb->coroutine = qemu_coroutine_self();
459 if (write) {
460 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
461 &gluster_finish_aiocb, acb);
462 } else {
463 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
464 &gluster_finish_aiocb, acb);
467 if (ret < 0) {
468 ret = -errno;
469 goto out;
472 qemu_coroutine_yield();
473 ret = acb->ret;
475 out:
476 g_slice_free(GlusterAIOCB, acb);
477 return ret;
480 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
482 int ret;
483 BDRVGlusterState *s = bs->opaque;
485 ret = glfs_ftruncate(s->fd, offset);
486 if (ret < 0) {
487 return -errno;
490 return 0;
493 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
494 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
496 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
499 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
500 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
502 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
505 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
507 int ret;
508 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
509 BDRVGlusterState *s = bs->opaque;
511 acb->size = 0;
512 acb->ret = 0;
513 acb->coroutine = qemu_coroutine_self();
515 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
516 if (ret < 0) {
517 ret = -errno;
518 goto out;
521 qemu_coroutine_yield();
522 ret = acb->ret;
524 out:
525 g_slice_free(GlusterAIOCB, acb);
526 return ret;
529 #ifdef CONFIG_GLUSTERFS_DISCARD
530 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
531 int64_t sector_num, int nb_sectors)
533 int ret;
534 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
535 BDRVGlusterState *s = bs->opaque;
536 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
537 off_t offset = sector_num * BDRV_SECTOR_SIZE;
539 acb->size = 0;
540 acb->ret = 0;
541 acb->coroutine = qemu_coroutine_self();
543 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
544 if (ret < 0) {
545 ret = -errno;
546 goto out;
549 qemu_coroutine_yield();
550 ret = acb->ret;
552 out:
553 g_slice_free(GlusterAIOCB, acb);
554 return ret;
556 #endif
558 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
560 BDRVGlusterState *s = bs->opaque;
561 int64_t ret;
563 ret = glfs_lseek(s->fd, 0, SEEK_END);
564 if (ret < 0) {
565 return -errno;
566 } else {
567 return ret;
571 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
573 BDRVGlusterState *s = bs->opaque;
574 struct stat st;
575 int ret;
577 ret = glfs_fstat(s->fd, &st);
578 if (ret < 0) {
579 return -errno;
580 } else {
581 return st.st_blocks * 512;
585 static void qemu_gluster_close(BlockDriverState *bs)
587 BDRVGlusterState *s = bs->opaque;
589 if (s->fd) {
590 glfs_close(s->fd);
591 s->fd = NULL;
593 glfs_fini(s->glfs);
596 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
598 /* GlusterFS volume could be backed by a block device */
599 return 0;
602 static QEMUOptionParameter qemu_gluster_create_options[] = {
604 .name = BLOCK_OPT_SIZE,
605 .type = OPT_SIZE,
606 .help = "Virtual disk size"
609 .name = BLOCK_OPT_PREALLOC,
610 .type = OPT_STRING,
611 .help = "Preallocation mode (allowed values: off, full)"
613 { NULL }
616 static BlockDriver bdrv_gluster = {
617 .format_name = "gluster",
618 .protocol_name = "gluster",
619 .instance_size = sizeof(BDRVGlusterState),
620 .bdrv_needs_filename = true,
621 .bdrv_file_open = qemu_gluster_open,
622 .bdrv_close = qemu_gluster_close,
623 .bdrv_create = qemu_gluster_create,
624 .bdrv_getlength = qemu_gluster_getlength,
625 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
626 .bdrv_truncate = qemu_gluster_truncate,
627 .bdrv_co_readv = qemu_gluster_co_readv,
628 .bdrv_co_writev = qemu_gluster_co_writev,
629 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
630 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
631 #ifdef CONFIG_GLUSTERFS_DISCARD
632 .bdrv_co_discard = qemu_gluster_co_discard,
633 #endif
634 #ifdef CONFIG_GLUSTERFS_ZEROFILL
635 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
636 #endif
637 .create_options = qemu_gluster_create_options,
640 static BlockDriver bdrv_gluster_tcp = {
641 .format_name = "gluster",
642 .protocol_name = "gluster+tcp",
643 .instance_size = sizeof(BDRVGlusterState),
644 .bdrv_needs_filename = true,
645 .bdrv_file_open = qemu_gluster_open,
646 .bdrv_close = qemu_gluster_close,
647 .bdrv_create = qemu_gluster_create,
648 .bdrv_getlength = qemu_gluster_getlength,
649 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
650 .bdrv_truncate = qemu_gluster_truncate,
651 .bdrv_co_readv = qemu_gluster_co_readv,
652 .bdrv_co_writev = qemu_gluster_co_writev,
653 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
654 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
655 #ifdef CONFIG_GLUSTERFS_DISCARD
656 .bdrv_co_discard = qemu_gluster_co_discard,
657 #endif
658 #ifdef CONFIG_GLUSTERFS_ZEROFILL
659 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
660 #endif
661 .create_options = qemu_gluster_create_options,
664 static BlockDriver bdrv_gluster_unix = {
665 .format_name = "gluster",
666 .protocol_name = "gluster+unix",
667 .instance_size = sizeof(BDRVGlusterState),
668 .bdrv_needs_filename = true,
669 .bdrv_file_open = qemu_gluster_open,
670 .bdrv_close = qemu_gluster_close,
671 .bdrv_create = qemu_gluster_create,
672 .bdrv_getlength = qemu_gluster_getlength,
673 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
674 .bdrv_truncate = qemu_gluster_truncate,
675 .bdrv_co_readv = qemu_gluster_co_readv,
676 .bdrv_co_writev = qemu_gluster_co_writev,
677 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
678 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
679 #ifdef CONFIG_GLUSTERFS_DISCARD
680 .bdrv_co_discard = qemu_gluster_co_discard,
681 #endif
682 #ifdef CONFIG_GLUSTERFS_ZEROFILL
683 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
684 #endif
685 .create_options = qemu_gluster_create_options,
688 static BlockDriver bdrv_gluster_rdma = {
689 .format_name = "gluster",
690 .protocol_name = "gluster+rdma",
691 .instance_size = sizeof(BDRVGlusterState),
692 .bdrv_needs_filename = true,
693 .bdrv_file_open = qemu_gluster_open,
694 .bdrv_close = qemu_gluster_close,
695 .bdrv_create = qemu_gluster_create,
696 .bdrv_getlength = qemu_gluster_getlength,
697 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
698 .bdrv_truncate = qemu_gluster_truncate,
699 .bdrv_co_readv = qemu_gluster_co_readv,
700 .bdrv_co_writev = qemu_gluster_co_writev,
701 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
702 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
703 #ifdef CONFIG_GLUSTERFS_DISCARD
704 .bdrv_co_discard = qemu_gluster_co_discard,
705 #endif
706 #ifdef CONFIG_GLUSTERFS_ZEROFILL
707 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
708 #endif
709 .create_options = qemu_gluster_create_options,
712 static void bdrv_gluster_init(void)
714 bdrv_register(&bdrv_gluster_rdma);
715 bdrv_register(&bdrv_gluster_unix);
716 bdrv_register(&bdrv_gluster_tcp);
717 bdrv_register(&bdrv_gluster);
720 block_init(bdrv_gluster_init);