configure: Don't say target_nptl="no" if there is no linux-user target
[qemu/ar7.git] / block / gluster.c
blob6de418c0bd32bc28ed9df6651ea800fdb923ceb7
1 /*
2 * GlusterFS backend for QEMU
4 * Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
6 * Pipe handling mechanism in AIO implementation is derived from
7 * block/rbd.c. Hence,
9 * Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
10 * Josh Durgin <josh.durgin@dreamhost.com>
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
15 * Contributions after 2012-01-13 are licensed under the terms of the
16 * GNU GPL, version 2 or (at your option) any later version.
18 #include <glusterfs/api/glfs.h>
19 #include "block/block_int.h"
20 #include "qemu/sockets.h"
21 #include "qemu/uri.h"
23 typedef struct GlusterAIOCB {
24 BlockDriverAIOCB common;
25 int64_t size;
26 int ret;
27 bool *finished;
28 QEMUBH *bh;
29 } GlusterAIOCB;
31 typedef struct BDRVGlusterState {
32 struct glfs *glfs;
33 int fds[2];
34 struct glfs_fd *fd;
35 int qemu_aio_count;
36 int event_reader_pos;
37 GlusterAIOCB *event_acb;
38 } BDRVGlusterState;
40 #define GLUSTER_FD_READ 0
41 #define GLUSTER_FD_WRITE 1
43 typedef struct GlusterConf {
44 char *server;
45 int port;
46 char *volname;
47 char *image;
48 char *transport;
49 } GlusterConf;
51 static void qemu_gluster_gconf_free(GlusterConf *gconf)
53 g_free(gconf->server);
54 g_free(gconf->volname);
55 g_free(gconf->image);
56 g_free(gconf->transport);
57 g_free(gconf);
60 static int parse_volume_options(GlusterConf *gconf, char *path)
62 char *p, *q;
64 if (!path) {
65 return -EINVAL;
68 /* volume */
69 p = q = path + strspn(path, "/");
70 p += strcspn(p, "/");
71 if (*p == '\0') {
72 return -EINVAL;
74 gconf->volname = g_strndup(q, p - q);
76 /* image */
77 p += strspn(p, "/");
78 if (*p == '\0') {
79 return -EINVAL;
81 gconf->image = g_strdup(p);
82 return 0;
86 * file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
88 * 'gluster' is the protocol.
90 * 'transport' specifies the transport type used to connect to gluster
91 * management daemon (glusterd). Valid transport types are
92 * tcp, unix and rdma. If a transport type isn't specified, then tcp
93 * type is assumed.
95 * 'server' specifies the server where the volume file specification for
96 * the given volume resides. This can be either hostname, ipv4 address
97 * or ipv6 address. ipv6 address needs to be within square brackets [ ].
98 * If transport type is 'unix', then 'server' field should not be specifed.
99 * The 'socket' field needs to be populated with the path to unix domain
100 * socket.
102 * 'port' is the port number on which glusterd is listening. This is optional
103 * and if not specified, QEMU will send 0 which will make gluster to use the
104 * default port. If the transport type is unix, then 'port' should not be
105 * specified.
107 * 'volname' is the name of the gluster volume which contains the VM image.
109 * 'image' is the path to the actual VM image that resides on gluster volume.
111 * Examples:
113 * file=gluster://1.2.3.4/testvol/a.img
114 * file=gluster+tcp://1.2.3.4/testvol/a.img
115 * file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
116 * file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
117 * file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
118 * file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
119 * file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
120 * file=gluster+rdma://1.2.3.4:24007/testvol/a.img
122 static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename)
124 URI *uri;
125 QueryParams *qp = NULL;
126 bool is_unix = false;
127 int ret = 0;
129 uri = uri_parse(filename);
130 if (!uri) {
131 return -EINVAL;
134 /* transport */
135 if (!strcmp(uri->scheme, "gluster")) {
136 gconf->transport = g_strdup("tcp");
137 } else if (!strcmp(uri->scheme, "gluster+tcp")) {
138 gconf->transport = g_strdup("tcp");
139 } else if (!strcmp(uri->scheme, "gluster+unix")) {
140 gconf->transport = g_strdup("unix");
141 is_unix = true;
142 } else if (!strcmp(uri->scheme, "gluster+rdma")) {
143 gconf->transport = g_strdup("rdma");
144 } else {
145 ret = -EINVAL;
146 goto out;
149 ret = parse_volume_options(gconf, uri->path);
150 if (ret < 0) {
151 goto out;
154 qp = query_params_parse(uri->query);
155 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
156 ret = -EINVAL;
157 goto out;
160 if (is_unix) {
161 if (uri->server || uri->port) {
162 ret = -EINVAL;
163 goto out;
165 if (strcmp(qp->p[0].name, "socket")) {
166 ret = -EINVAL;
167 goto out;
169 gconf->server = g_strdup(qp->p[0].value);
170 } else {
171 gconf->server = g_strdup(uri->server);
172 gconf->port = uri->port;
175 out:
176 if (qp) {
177 query_params_free(qp);
179 uri_free(uri);
180 return ret;
183 static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename)
185 struct glfs *glfs = NULL;
186 int ret;
187 int old_errno;
189 ret = qemu_gluster_parseuri(gconf, filename);
190 if (ret < 0) {
191 error_report("Usage: file=gluster[+transport]://[server[:port]]/"
192 "volname/image[?socket=...]");
193 errno = -ret;
194 goto out;
197 glfs = glfs_new(gconf->volname);
198 if (!glfs) {
199 goto out;
202 ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server,
203 gconf->port);
204 if (ret < 0) {
205 goto out;
209 * TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
210 * GlusterFS makes GF_LOG_* macros available to libgfapi users.
212 ret = glfs_set_logging(glfs, "-", 4);
213 if (ret < 0) {
214 goto out;
217 ret = glfs_init(glfs);
218 if (ret) {
219 error_report("Gluster connection failed for server=%s port=%d "
220 "volume=%s image=%s transport=%s", gconf->server, gconf->port,
221 gconf->volname, gconf->image, gconf->transport);
222 goto out;
224 return glfs;
226 out:
227 if (glfs) {
228 old_errno = errno;
229 glfs_fini(glfs);
230 errno = old_errno;
232 return NULL;
235 static void qemu_gluster_complete_aio(GlusterAIOCB *acb, BDRVGlusterState *s)
237 int ret;
238 bool *finished = acb->finished;
239 BlockDriverCompletionFunc *cb = acb->common.cb;
240 void *opaque = acb->common.opaque;
242 if (!acb->ret || acb->ret == acb->size) {
243 ret = 0; /* Success */
244 } else if (acb->ret < 0) {
245 ret = acb->ret; /* Read/Write failed */
246 } else {
247 ret = -EIO; /* Partial read/write - fail it */
250 s->qemu_aio_count--;
251 qemu_aio_release(acb);
252 cb(opaque, ret);
253 if (finished) {
254 *finished = true;
258 static void qemu_gluster_aio_event_reader(void *opaque)
260 BDRVGlusterState *s = opaque;
261 ssize_t ret;
263 do {
264 char *p = (char *)&s->event_acb;
266 ret = read(s->fds[GLUSTER_FD_READ], p + s->event_reader_pos,
267 sizeof(s->event_acb) - s->event_reader_pos);
268 if (ret > 0) {
269 s->event_reader_pos += ret;
270 if (s->event_reader_pos == sizeof(s->event_acb)) {
271 s->event_reader_pos = 0;
272 qemu_gluster_complete_aio(s->event_acb, s);
275 } while (ret < 0 && errno == EINTR);
278 static int qemu_gluster_aio_flush_cb(void *opaque)
280 BDRVGlusterState *s = opaque;
282 return (s->qemu_aio_count > 0);
285 /* TODO Convert to fine grained options */
286 static QemuOptsList runtime_opts = {
287 .name = "gluster",
288 .head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head),
289 .desc = {
291 .name = "filename",
292 .type = QEMU_OPT_STRING,
293 .help = "URL to the gluster image",
295 { /* end of list */ }
299 static int qemu_gluster_open(BlockDriverState *bs, QDict *options,
300 int bdrv_flags)
302 BDRVGlusterState *s = bs->opaque;
303 int open_flags = O_BINARY;
304 int ret = 0;
305 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
306 QemuOpts *opts;
307 Error *local_err = NULL;
308 const char *filename;
310 opts = qemu_opts_create_nofail(&runtime_opts);
311 qemu_opts_absorb_qdict(opts, options, &local_err);
312 if (error_is_set(&local_err)) {
313 qerror_report_err(local_err);
314 error_free(local_err);
315 ret = -EINVAL;
316 goto out;
319 filename = qemu_opt_get(opts, "filename");
322 s->glfs = qemu_gluster_init(gconf, filename);
323 if (!s->glfs) {
324 ret = -errno;
325 goto out;
328 if (bdrv_flags & BDRV_O_RDWR) {
329 open_flags |= O_RDWR;
330 } else {
331 open_flags |= O_RDONLY;
334 if ((bdrv_flags & BDRV_O_NOCACHE)) {
335 open_flags |= O_DIRECT;
338 s->fd = glfs_open(s->glfs, gconf->image, open_flags);
339 if (!s->fd) {
340 ret = -errno;
341 goto out;
344 ret = qemu_pipe(s->fds);
345 if (ret < 0) {
346 ret = -errno;
347 goto out;
349 fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK);
350 qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ],
351 qemu_gluster_aio_event_reader, NULL, qemu_gluster_aio_flush_cb, s);
353 out:
354 qemu_opts_del(opts);
355 qemu_gluster_gconf_free(gconf);
356 if (!ret) {
357 return ret;
359 if (s->fd) {
360 glfs_close(s->fd);
362 if (s->glfs) {
363 glfs_fini(s->glfs);
365 return ret;
368 static int qemu_gluster_create(const char *filename,
369 QEMUOptionParameter *options)
371 struct glfs *glfs;
372 struct glfs_fd *fd;
373 int ret = 0;
374 int64_t total_size = 0;
375 GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
377 glfs = qemu_gluster_init(gconf, filename);
378 if (!glfs) {
379 ret = -errno;
380 goto out;
383 while (options && options->name) {
384 if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
385 total_size = options->value.n / BDRV_SECTOR_SIZE;
387 options++;
390 fd = glfs_creat(glfs, gconf->image,
391 O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR);
392 if (!fd) {
393 ret = -errno;
394 } else {
395 if (glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE) != 0) {
396 ret = -errno;
398 if (glfs_close(fd) != 0) {
399 ret = -errno;
402 out:
403 qemu_gluster_gconf_free(gconf);
404 if (glfs) {
405 glfs_fini(glfs);
407 return ret;
410 static void qemu_gluster_aio_cancel(BlockDriverAIOCB *blockacb)
412 GlusterAIOCB *acb = (GlusterAIOCB *)blockacb;
413 bool finished = false;
415 acb->finished = &finished;
416 while (!finished) {
417 qemu_aio_wait();
421 static const AIOCBInfo gluster_aiocb_info = {
422 .aiocb_size = sizeof(GlusterAIOCB),
423 .cancel = qemu_gluster_aio_cancel,
426 static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg)
428 GlusterAIOCB *acb = (GlusterAIOCB *)arg;
429 BlockDriverState *bs = acb->common.bs;
430 BDRVGlusterState *s = bs->opaque;
431 int retval;
433 acb->ret = ret;
434 retval = qemu_write_full(s->fds[GLUSTER_FD_WRITE], &acb, sizeof(acb));
435 if (retval != sizeof(acb)) {
437 * Gluster AIO callback thread failed to notify the waiting
438 * QEMU thread about IO completion.
440 * Complete this IO request and make the disk inaccessible for
441 * subsequent reads and writes.
443 error_report("Gluster failed to notify QEMU about IO completion");
445 qemu_mutex_lock_iothread(); /* We are in gluster thread context */
446 acb->common.cb(acb->common.opaque, -EIO);
447 qemu_aio_release(acb);
448 s->qemu_aio_count--;
449 close(s->fds[GLUSTER_FD_READ]);
450 close(s->fds[GLUSTER_FD_WRITE]);
451 qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL,
452 NULL);
453 bs->drv = NULL; /* Make the disk inaccessible */
454 qemu_mutex_unlock_iothread();
458 static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
459 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
460 BlockDriverCompletionFunc *cb, void *opaque, int write)
462 int ret;
463 GlusterAIOCB *acb;
464 BDRVGlusterState *s = bs->opaque;
465 size_t size;
466 off_t offset;
468 offset = sector_num * BDRV_SECTOR_SIZE;
469 size = nb_sectors * BDRV_SECTOR_SIZE;
470 s->qemu_aio_count++;
472 acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
473 acb->size = size;
474 acb->ret = 0;
475 acb->finished = NULL;
477 if (write) {
478 ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
479 &gluster_finish_aiocb, acb);
480 } else {
481 ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
482 &gluster_finish_aiocb, acb);
485 if (ret < 0) {
486 goto out;
488 return &acb->common;
490 out:
491 s->qemu_aio_count--;
492 qemu_aio_release(acb);
493 return NULL;
496 static BlockDriverAIOCB *qemu_gluster_aio_readv(BlockDriverState *bs,
497 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
498 BlockDriverCompletionFunc *cb, void *opaque)
500 return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
503 static BlockDriverAIOCB *qemu_gluster_aio_writev(BlockDriverState *bs,
504 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
505 BlockDriverCompletionFunc *cb, void *opaque)
507 return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
510 static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs,
511 BlockDriverCompletionFunc *cb, void *opaque)
513 int ret;
514 GlusterAIOCB *acb;
515 BDRVGlusterState *s = bs->opaque;
517 acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
518 acb->size = 0;
519 acb->ret = 0;
520 acb->finished = NULL;
521 s->qemu_aio_count++;
523 ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb);
524 if (ret < 0) {
525 goto out;
527 return &acb->common;
529 out:
530 s->qemu_aio_count--;
531 qemu_aio_release(acb);
532 return NULL;
535 #ifdef CONFIG_GLUSTERFS_DISCARD
536 static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs,
537 int64_t sector_num, int nb_sectors, BlockDriverCompletionFunc *cb,
538 void *opaque)
540 int ret;
541 GlusterAIOCB *acb;
542 BDRVGlusterState *s = bs->opaque;
543 size_t size;
544 off_t offset;
546 offset = sector_num * BDRV_SECTOR_SIZE;
547 size = nb_sectors * BDRV_SECTOR_SIZE;
549 acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque);
550 acb->size = 0;
551 acb->ret = 0;
552 acb->finished = NULL;
553 s->qemu_aio_count++;
555 ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb);
556 if (ret < 0) {
557 goto out;
559 return &acb->common;
561 out:
562 s->qemu_aio_count--;
563 qemu_aio_release(acb);
564 return NULL;
566 #endif
568 static int64_t qemu_gluster_getlength(BlockDriverState *bs)
570 BDRVGlusterState *s = bs->opaque;
571 int64_t ret;
573 ret = glfs_lseek(s->fd, 0, SEEK_END);
574 if (ret < 0) {
575 return -errno;
576 } else {
577 return ret;
581 static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
583 BDRVGlusterState *s = bs->opaque;
584 struct stat st;
585 int ret;
587 ret = glfs_fstat(s->fd, &st);
588 if (ret < 0) {
589 return -errno;
590 } else {
591 return st.st_blocks * 512;
595 static void qemu_gluster_close(BlockDriverState *bs)
597 BDRVGlusterState *s = bs->opaque;
599 close(s->fds[GLUSTER_FD_READ]);
600 close(s->fds[GLUSTER_FD_WRITE]);
601 qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL, NULL);
603 if (s->fd) {
604 glfs_close(s->fd);
605 s->fd = NULL;
607 glfs_fini(s->glfs);
610 static int qemu_gluster_has_zero_init(BlockDriverState *bs)
612 /* GlusterFS volume could be backed by a block device */
613 return 0;
616 static QEMUOptionParameter qemu_gluster_create_options[] = {
618 .name = BLOCK_OPT_SIZE,
619 .type = OPT_SIZE,
620 .help = "Virtual disk size"
622 { NULL }
625 static BlockDriver bdrv_gluster = {
626 .format_name = "gluster",
627 .protocol_name = "gluster",
628 .instance_size = sizeof(BDRVGlusterState),
629 .bdrv_file_open = qemu_gluster_open,
630 .bdrv_close = qemu_gluster_close,
631 .bdrv_create = qemu_gluster_create,
632 .bdrv_getlength = qemu_gluster_getlength,
633 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
634 .bdrv_aio_readv = qemu_gluster_aio_readv,
635 .bdrv_aio_writev = qemu_gluster_aio_writev,
636 .bdrv_aio_flush = qemu_gluster_aio_flush,
637 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
638 #ifdef CONFIG_GLUSTERFS_DISCARD
639 .bdrv_aio_discard = qemu_gluster_aio_discard,
640 #endif
641 .create_options = qemu_gluster_create_options,
644 static BlockDriver bdrv_gluster_tcp = {
645 .format_name = "gluster",
646 .protocol_name = "gluster+tcp",
647 .instance_size = sizeof(BDRVGlusterState),
648 .bdrv_file_open = qemu_gluster_open,
649 .bdrv_close = qemu_gluster_close,
650 .bdrv_create = qemu_gluster_create,
651 .bdrv_getlength = qemu_gluster_getlength,
652 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
653 .bdrv_aio_readv = qemu_gluster_aio_readv,
654 .bdrv_aio_writev = qemu_gluster_aio_writev,
655 .bdrv_aio_flush = qemu_gluster_aio_flush,
656 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
657 #ifdef CONFIG_GLUSTERFS_DISCARD
658 .bdrv_aio_discard = qemu_gluster_aio_discard,
659 #endif
660 .create_options = qemu_gluster_create_options,
663 static BlockDriver bdrv_gluster_unix = {
664 .format_name = "gluster",
665 .protocol_name = "gluster+unix",
666 .instance_size = sizeof(BDRVGlusterState),
667 .bdrv_file_open = qemu_gluster_open,
668 .bdrv_close = qemu_gluster_close,
669 .bdrv_create = qemu_gluster_create,
670 .bdrv_getlength = qemu_gluster_getlength,
671 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
672 .bdrv_aio_readv = qemu_gluster_aio_readv,
673 .bdrv_aio_writev = qemu_gluster_aio_writev,
674 .bdrv_aio_flush = qemu_gluster_aio_flush,
675 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
676 #ifdef CONFIG_GLUSTERFS_DISCARD
677 .bdrv_aio_discard = qemu_gluster_aio_discard,
678 #endif
679 .create_options = qemu_gluster_create_options,
682 static BlockDriver bdrv_gluster_rdma = {
683 .format_name = "gluster",
684 .protocol_name = "gluster+rdma",
685 .instance_size = sizeof(BDRVGlusterState),
686 .bdrv_file_open = qemu_gluster_open,
687 .bdrv_close = qemu_gluster_close,
688 .bdrv_create = qemu_gluster_create,
689 .bdrv_getlength = qemu_gluster_getlength,
690 .bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size,
691 .bdrv_aio_readv = qemu_gluster_aio_readv,
692 .bdrv_aio_writev = qemu_gluster_aio_writev,
693 .bdrv_aio_flush = qemu_gluster_aio_flush,
694 .bdrv_has_zero_init = qemu_gluster_has_zero_init,
695 #ifdef CONFIG_GLUSTERFS_DISCARD
696 .bdrv_aio_discard = qemu_gluster_aio_discard,
697 #endif
698 .create_options = qemu_gluster_create_options,
701 static void bdrv_gluster_init(void)
703 bdrv_register(&bdrv_gluster_rdma);
704 bdrv_register(&bdrv_gluster_unix);
705 bdrv_register(&bdrv_gluster_tcp);
706 bdrv_register(&bdrv_gluster);
709 block_init(bdrv_gluster_init);