target-ppc: add extended opcodes for dcbt/dcbtst
[qemu.git] / block / gluster.c
blobfe7a10c30b2151194182432ba992ff2bda428a27
1 /*
2 * GlusterFS backend for QEMU
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
6 * Pipe handling mechanism in AIO implementation is derived from
7 * block/rbd.c. Hence,
9 * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
10 * Josh Durgin <josh.durgin@dreamhost.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
15 * Contributions after 2012-01-13 are licensed under the terms of the
16 * GNU GPL, version 2 or (at your option) any later version.
18 #include <glusterfs/api/glfs.h>
19 #include "block/block_int.h"
20 #include "qemu/sockets.h"
21 #include "qemu/uri.h"
23 typedef struct GlusterAIOCB {
24 int64_t size;
25 int ret;
26 QEMUBH *bh;
27 Coroutine *coroutine;
28 } GlusterAIOCB;
30 typedef struct BDRVGlusterState {
31 struct glfs *glfs;
32 struct glfs_fd *fd;
33 } BDRVGlusterState;
35 #define GLUSTER_FD_READ 0
36 #define GLUSTER_FD_WRITE 1
38 typedef struct GlusterConf {
39 char *server;
40 int port;
41 char *volname;
42 char *image;
43 char *transport;
44 } GlusterConf;
46 static void qemu_gluster_gconf_free(GlusterConf *gconf)
48 if (gconf) {
49 g_free(gconf->server);
50 g_free(gconf->volname);
51 g_free(gconf->image);
52 g_free(gconf->transport);
53 g_free(gconf);
57 static int parse_volume_options(GlusterConf *gconf, char *path)
59 char *p, *q;
61 if (!path) {
62 return -EINVAL;
65 /* volume */
66 p = q = path + strspn(path, "/");
67 p += strcspn(p, "/");
68 if (*p == '\0') {
69 return -EINVAL;
71 gconf->volname = g_strndup(q, p - q);
73 /* image */
74 p += strspn(p, "/");
75 if (*p == '\0') {
76 return -EINVAL;
78 gconf->image = g_strdup(p);
79 return 0;
83 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
85 * 'gluster' is the protocol.
87 * 'transport' specifies the transport type used to connect to gluster
88 * management daemon (glusterd). Valid transport types are
89 * tcp, unix and rdma. If a transport type isn't specified, then tcp
90 * type is assumed.
92 * 'server' specifies the server where the volume file specification for
93 * the given volume resides. This can be either hostname, ipv4 address
94 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
95 * If transport type is 'unix', then 'server' field should not be specifed.
96 * The 'socket' field needs to be populated with the path to unix domain
97 * socket.
99 * 'port' is the port number on which glusterd is listening. This is optional
100 * and if not specified, QEMU will send 0 which will make gluster to use the
101 * default port. If the transport type is unix, then 'port' should not be
102 * specified.
104 * 'volname' is the name of the gluster volume which contains the VM image.
106 * 'image' is the path to the actual VM image that resides on gluster volume.
108 * Examples:
110 * file=gluster://1.2.3.4/testvol/a.img
111 * file=gluster+tcp://1.2.3.4/testvol/a.img
112 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
113 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
114 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
115 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
116 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
117 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
119 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
121 URI *uri;
122 QueryParams *qp = NULL;
123 bool is_unix = false;
124 int ret = 0;
126 uri = uri_parse(filename);
127 if (!uri) {
128 return -EINVAL;
131 /* transport */
132 if (!uri->scheme || !strcmp(uri->scheme, "gluster")) {
133 gconf->transport = g_strdup("tcp");
134 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
135 gconf->transport = g_strdup("tcp");
136 } else if (!strcmp(uri->scheme, "gluster+unix")) {
137 gconf->transport = g_strdup("unix");
138 is_unix = true;
139 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
140 gconf->transport = g_strdup("rdma");
141 } else {
142 ret = -EINVAL;
143 goto out;
146 ret = parse_volume_options(gconf, uri->path);
147 if (ret < 0) {
148 goto out;
151 qp = query_params_parse(uri->query);
152 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
153 ret = -EINVAL;
154 goto out;
157 if (is_unix) {
158 if (uri->server || uri->port) {
159 ret = -EINVAL;
160 goto out;
162 if (strcmp(qp->p[0].name, "socket")) {
163 ret = -EINVAL;
164 goto out;
166 gconf->server = g_strdup(qp->p[0].value);
167 } else {
168 gconf->server = g_strdup(uri->server ? uri->server : "localhost");
169 gconf->port = uri->port;
172 out:
173 if (qp) {
174 query_params_free(qp);
176 uri_free(uri);
177 return ret;
180 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename,
181 Error **errp)
183 struct glfs *glfs = NULL;
184 int ret;
185 int old_errno;
187 ret = qemu_gluster_parseuri(gconf, filename);
188 if (ret < 0) {
189 error_setg(errp, "Usage: file=gluster[+transport]://[server[:port]]/"
190 "volname/image[?socket=...]");
191 errno = -ret;
192 goto out;
195 glfs = glfs_new(gconf->volname);
196 if (!glfs) {
197 goto out;
200 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
201 gconf->port);
202 if (ret < 0) {
203 goto out;
207 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
208 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
210 ret = glfs_set_logging(glfs, "-", 4);
211 if (ret < 0) {
212 goto out;
215 ret = glfs_init(glfs);
216 if (ret) {
217 error_setg_errno(errp, errno,
218 "Gluster connection failed for server=%s port=%d "
219 "volume=%s image=%s transport=%s", gconf->server,
220 gconf->port, gconf->volname, gconf->image,
221 gconf->transport);
222 goto out;
224 return glfs;
226 out:
227 if (glfs) {
228 old_errno = errno;
229 glfs_fini(glfs);
230 errno = old_errno;
232 return NULL;
235 static void qemu_gluster_complete_aio(void *opaque)
237 GlusterAIOCB *acb = (GlusterAIOCB *)opaque;
239 qemu_bh_delete(acb->bh);
240 acb->bh = NULL;
241 qemu_coroutine_enter(acb->coroutine, NULL);
245 * AIO callback routine called from GlusterFS thread.
247 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
249 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
251 if (!ret || ret == acb->size) {
252 acb->ret = 0; /* Success */
253 } else if (ret < 0) {
254 acb->ret = ret; /* Read/Write failed */
255 } else {
256 acb->ret = -EIO; /* Partial read/write - fail it */
259 acb->bh = qemu_bh_new(qemu_gluster_complete_aio, acb);
260 qemu_bh_schedule(acb->bh);
263 /* TODO Convert to fine grained options */
264 static QemuOptsList runtime_opts = {
265 .name = "gluster",
266 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
267 .desc = {
269 .name = "filename",
270 .type = QEMU_OPT_STRING,
271 .help = "URL to the gluster image",
273 { /* end of list */ }
277 static void qemu_gluster_parse_flags(int bdrv_flags, int *open_flags)
279 assert(open_flags != NULL);
281 *open_flags |= O_BINARY;
283 if (bdrv_flags & BDRV_O_RDWR) {
284 *open_flags |= O_RDWR;
285 } else {
286 *open_flags |= O_RDONLY;
289 if ((bdrv_flags & BDRV_O_NOCACHE)) {
290 *open_flags |= O_DIRECT;
294 static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
295 int bdrv_flags, Error **errp)
297 BDRVGlusterState *s = bs->opaque;
298 int open_flags = 0;
299 int ret = 0;
300 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
301 QemuOpts *opts;
302 Error *local_err = NULL;
303 const char *filename;
305 opts = qemu_opts_create(&runtime_opts, NULL, 0, &error_abort);
306 qemu_opts_absorb_qdict(opts, options, &local_err);
307 if (local_err) {
308 error_propagate(errp, local_err);
309 ret = -EINVAL;
310 goto out;
313 filename = qemu_opt_get(opts, "filename");
315 s->glfs = qemu_gluster_init(gconf, filename, errp);
316 if (!s->glfs) {
317 ret = -errno;
318 goto out;
321 qemu_gluster_parse_flags(bdrv_flags, &open_flags);
323 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
324 if (!s->fd) {
325 ret = -errno;
328 out:
329 qemu_opts_del(opts);
330 qemu_gluster_gconf_free(gconf);
331 if (!ret) {
332 return ret;
334 if (s->fd) {
335 glfs_close(s->fd);
337 if (s->glfs) {
338 glfs_fini(s->glfs);
340 return ret;
343 typedef struct BDRVGlusterReopenState {
344 struct glfs *glfs;
345 struct glfs_fd *fd;
346 } BDRVGlusterReopenState;
349 static int qemu_gluster_reopen_prepare(BDRVReopenState *state,
350 BlockReopenQueue *queue, Error **errp)
352 int ret = 0;
353 BDRVGlusterReopenState *reop_s;
354 GlusterConf *gconf = NULL;
355 int open_flags = 0;
357 assert(state != NULL);
358 assert(state->bs != NULL);
360 state->opaque = g_malloc0(sizeof(BDRVGlusterReopenState));
361 reop_s = state->opaque;
363 qemu_gluster_parse_flags(state->flags, &open_flags);
365 gconf = g_malloc0(sizeof(GlusterConf));
367 reop_s->glfs = qemu_gluster_init(gconf, state->bs->filename, errp);
368 if (reop_s->glfs == NULL) {
369 ret = -errno;
370 goto exit;
373 reop_s->fd = glfs_open(reop_s->glfs, gconf->image, open_flags);
374 if (reop_s->fd == NULL) {
375 /* reops->glfs will be cleaned up in _abort */
376 ret = -errno;
377 goto exit;
380 exit:
381 /* state->opaque will be freed in either the _abort or _commit */
382 qemu_gluster_gconf_free(gconf);
383 return ret;
386 static void qemu_gluster_reopen_commit(BDRVReopenState *state)
388 BDRVGlusterReopenState *reop_s = state->opaque;
389 BDRVGlusterState *s = state->bs->opaque;
392 /* close the old */
393 if (s->fd) {
394 glfs_close(s->fd);
396 if (s->glfs) {
397 glfs_fini(s->glfs);
400 /* use the newly opened image / connection */
401 s->fd = reop_s->fd;
402 s->glfs = reop_s->glfs;
404 g_free(state->opaque);
405 state->opaque = NULL;
407 return;
411 static void qemu_gluster_reopen_abort(BDRVReopenState *state)
413 BDRVGlusterReopenState *reop_s = state->opaque;
415 if (reop_s == NULL) {
416 return;
419 if (reop_s->fd) {
420 glfs_close(reop_s->fd);
423 if (reop_s->glfs) {
424 glfs_fini(reop_s->glfs);
427 g_free(state->opaque);
428 state->opaque = NULL;
430 return;
433 #ifdef CONFIG_GLUSTERFS_ZEROFILL
434 static coroutine_fn int qemu_gluster_co_write_zeroes(BlockDriverState *bs,
435 int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
437 int ret;
438 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
439 BDRVGlusterState *s = bs->opaque;
440 off_t size = nb_sectors * BDRV_SECTOR_SIZE;
441 off_t offset = sector_num * BDRV_SECTOR_SIZE;
443 acb->size = size;
444 acb->ret = 0;
445 acb->coroutine = qemu_coroutine_self();
447 ret = glfs_zerofill_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
448 if (ret < 0) {
449 ret = -errno;
450 goto out;
453 qemu_coroutine_yield();
454 ret = acb->ret;
456 out:
457 g_slice_free(GlusterAIOCB, acb);
458 return ret;
461 static inline bool gluster_supports_zerofill(void)
463 return 1;
466 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
467 int64_t size)
469 return glfs_zerofill(fd, offset, size);
472 #else
473 static inline bool gluster_supports_zerofill(void)
475 return 0;
478 static inline int qemu_gluster_zerofill(struct glfs_fd *fd, int64_t offset,
479 int64_t size)
481 return 0;
483 #endif
485 static int qemu_gluster_create(const char *filename,
486 QEMUOptionParameter *options, Error **errp)
488 struct glfs *glfs;
489 struct glfs_fd *fd;
490 int ret = 0;
491 int prealloc = 0;
492 int64_t total_size = 0;
493 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
495 glfs = qemu_gluster_init(gconf, filename, errp);
496 if (!glfs) {
497 ret = -EINVAL;
498 goto out;
501 while (options && options->name) {
502 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
503 total_size = options->value.n / BDRV_SECTOR_SIZE;
504 } else if (!strcmp(options->name, BLOCK_OPT_PREALLOC)) {
505 if (!options->value.s || !strcmp(options->value.s, "off")) {
506 prealloc = 0;
507 } else if (!strcmp(options->value.s, "full") &&
508 gluster_supports_zerofill()) {
509 prealloc = 1;
510 } else {
511 error_setg(errp, "Invalid preallocation mode: '%s'"
512 " or GlusterFS doesn't support zerofill API",
513 options->value.s);
514 ret = -EINVAL;
515 goto out;
518 options++;
521 fd = glfs_creat(glfs, gconf->image,
522 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
523 if (!fd) {
524 ret = -errno;
525 } else {
526 if (!glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE)) {
527 if (prealloc && qemu_gluster_zerofill(fd, 0,
528 total_size * BDRV_SECTOR_SIZE)) {
529 ret = -errno;
531 } else {
532 ret = -errno;
535 if (glfs_close(fd) != 0) {
536 ret = -errno;
539 out:
540 qemu_gluster_gconf_free(gconf);
541 if (glfs) {
542 glfs_fini(glfs);
544 return ret;
547 static coroutine_fn int qemu_gluster_co_rw(BlockDriverState *bs,
548 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov, int write)
550 int ret;
551 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
552 BDRVGlusterState *s = bs->opaque;
553 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
554 off_t offset = sector_num * BDRV_SECTOR_SIZE;
556 acb->size = size;
557 acb->ret = 0;
558 acb->coroutine = qemu_coroutine_self();
560 if (write) {
561 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
562 &gluster_finish_aiocb, acb);
563 } else {
564 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
565 &gluster_finish_aiocb, acb);
568 if (ret < 0) {
569 ret = -errno;
570 goto out;
573 qemu_coroutine_yield();
574 ret = acb->ret;
576 out:
577 g_slice_free(GlusterAIOCB, acb);
578 return ret;
581 static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset)
583 int ret;
584 BDRVGlusterState *s = bs->opaque;
586 ret = glfs_ftruncate(s->fd, offset);
587 if (ret < 0) {
588 return -errno;
591 return 0;
594 static coroutine_fn int qemu_gluster_co_readv(BlockDriverState *bs,
595 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
597 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 0);
600 static coroutine_fn int qemu_gluster_co_writev(BlockDriverState *bs,
601 int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
603 return qemu_gluster_co_rw(bs, sector_num, nb_sectors, qiov, 1);
606 static coroutine_fn int qemu_gluster_co_flush_to_disk(BlockDriverState *bs)
608 int ret;
609 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
610 BDRVGlusterState *s = bs->opaque;
612 acb->size = 0;
613 acb->ret = 0;
614 acb->coroutine = qemu_coroutine_self();
616 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
617 if (ret < 0) {
618 ret = -errno;
619 goto out;
622 qemu_coroutine_yield();
623 ret = acb->ret;
625 out:
626 g_slice_free(GlusterAIOCB, acb);
627 return ret;
630 #ifdef CONFIG_GLUSTERFS_DISCARD
631 static coroutine_fn int qemu_gluster_co_discard(BlockDriverState *bs,
632 int64_t sector_num, int nb_sectors)
634 int ret;
635 GlusterAIOCB *acb = g_slice_new(GlusterAIOCB);
636 BDRVGlusterState *s = bs->opaque;
637 size_t size = nb_sectors * BDRV_SECTOR_SIZE;
638 off_t offset = sector_num * BDRV_SECTOR_SIZE;
640 acb->size = 0;
641 acb->ret = 0;
642 acb->coroutine = qemu_coroutine_self();
644 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
645 if (ret < 0) {
646 ret = -errno;
647 goto out;
650 qemu_coroutine_yield();
651 ret = acb->ret;
653 out:
654 g_slice_free(GlusterAIOCB, acb);
655 return ret;
657 #endif
659 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
661 BDRVGlusterState *s = bs->opaque;
662 int64_t ret;
664 ret = glfs_lseek(s->fd, 0, SEEK_END);
665 if (ret < 0) {
666 return -errno;
667 } else {
668 return ret;
672 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
674 BDRVGlusterState *s = bs->opaque;
675 struct stat st;
676 int ret;
678 ret = glfs_fstat(s->fd, &st);
679 if (ret < 0) {
680 return -errno;
681 } else {
682 return st.st_blocks * 512;
686 static void qemu_gluster_close(BlockDriverState *bs)
688 BDRVGlusterState *s = bs->opaque;
690 if (s->fd) {
691 glfs_close(s->fd);
692 s->fd = NULL;
694 glfs_fini(s->glfs);
697 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
699 /* GlusterFS volume could be backed by a block device */
700 return 0;
703 static QEMUOptionParameter qemu_gluster_create_options[] = {
705 .name = BLOCK_OPT_SIZE,
706 .type = OPT_SIZE,
707 .help = "Virtual disk size"
710 .name = BLOCK_OPT_PREALLOC,
711 .type = OPT_STRING,
712 .help = "Preallocation mode (allowed values: off, full)"
714 { NULL }
717 static BlockDriver bdrv_gluster = {
718 .format_name = "gluster",
719 .protocol_name = "gluster",
720 .instance_size = sizeof(BDRVGlusterState),
721 .bdrv_needs_filename = true,
722 .bdrv_file_open = qemu_gluster_open,
723 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
724 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
725 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
726 .bdrv_close = qemu_gluster_close,
727 .bdrv_create = qemu_gluster_create,
728 .bdrv_getlength = qemu_gluster_getlength,
729 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
730 .bdrv_truncate = qemu_gluster_truncate,
731 .bdrv_co_readv = qemu_gluster_co_readv,
732 .bdrv_co_writev = qemu_gluster_co_writev,
733 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
734 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
735 #ifdef CONFIG_GLUSTERFS_DISCARD
736 .bdrv_co_discard = qemu_gluster_co_discard,
737 #endif
738 #ifdef CONFIG_GLUSTERFS_ZEROFILL
739 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
740 #endif
741 .create_options = qemu_gluster_create_options,
744 static BlockDriver bdrv_gluster_tcp = {
745 .format_name = "gluster",
746 .protocol_name = "gluster+tcp",
747 .instance_size = sizeof(BDRVGlusterState),
748 .bdrv_needs_filename = true,
749 .bdrv_file_open = qemu_gluster_open,
750 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
751 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
752 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
753 .bdrv_close = qemu_gluster_close,
754 .bdrv_create = qemu_gluster_create,
755 .bdrv_getlength = qemu_gluster_getlength,
756 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
757 .bdrv_truncate = qemu_gluster_truncate,
758 .bdrv_co_readv = qemu_gluster_co_readv,
759 .bdrv_co_writev = qemu_gluster_co_writev,
760 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
761 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
762 #ifdef CONFIG_GLUSTERFS_DISCARD
763 .bdrv_co_discard = qemu_gluster_co_discard,
764 #endif
765 #ifdef CONFIG_GLUSTERFS_ZEROFILL
766 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
767 #endif
768 .create_options = qemu_gluster_create_options,
771 static BlockDriver bdrv_gluster_unix = {
772 .format_name = "gluster",
773 .protocol_name = "gluster+unix",
774 .instance_size = sizeof(BDRVGlusterState),
775 .bdrv_needs_filename = true,
776 .bdrv_file_open = qemu_gluster_open,
777 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
778 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
779 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
780 .bdrv_close = qemu_gluster_close,
781 .bdrv_create = qemu_gluster_create,
782 .bdrv_getlength = qemu_gluster_getlength,
783 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
784 .bdrv_truncate = qemu_gluster_truncate,
785 .bdrv_co_readv = qemu_gluster_co_readv,
786 .bdrv_co_writev = qemu_gluster_co_writev,
787 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
788 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
789 #ifdef CONFIG_GLUSTERFS_DISCARD
790 .bdrv_co_discard = qemu_gluster_co_discard,
791 #endif
792 #ifdef CONFIG_GLUSTERFS_ZEROFILL
793 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
794 #endif
795 .create_options = qemu_gluster_create_options,
798 static BlockDriver bdrv_gluster_rdma = {
799 .format_name = "gluster",
800 .protocol_name = "gluster+rdma",
801 .instance_size = sizeof(BDRVGlusterState),
802 .bdrv_needs_filename = true,
803 .bdrv_file_open = qemu_gluster_open,
804 .bdrv_reopen_prepare = qemu_gluster_reopen_prepare,
805 .bdrv_reopen_commit = qemu_gluster_reopen_commit,
806 .bdrv_reopen_abort = qemu_gluster_reopen_abort,
807 .bdrv_close = qemu_gluster_close,
808 .bdrv_create = qemu_gluster_create,
809 .bdrv_getlength = qemu_gluster_getlength,
810 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
811 .bdrv_truncate = qemu_gluster_truncate,
812 .bdrv_co_readv = qemu_gluster_co_readv,
813 .bdrv_co_writev = qemu_gluster_co_writev,
814 .bdrv_co_flush_to_disk = qemu_gluster_co_flush_to_disk,
815 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
816 #ifdef CONFIG_GLUSTERFS_DISCARD
817 .bdrv_co_discard = qemu_gluster_co_discard,
818 #endif
819 #ifdef CONFIG_GLUSTERFS_ZEROFILL
820 .bdrv_co_write_zeroes = qemu_gluster_co_write_zeroes,
821 #endif
822 .create_options = qemu_gluster_create_options,
825 static void bdrv_gluster_init(void)
827 bdrv_register(&bdrv_gluster_rdma);
828 bdrv_register(&bdrv_gluster_unix);
829 bdrv_register(&bdrv_gluster_tcp);
830 bdrv_register(&bdrv_gluster);
833 block_init(bdrv_gluster_init);