2 * QEMU Block driver for NBD
4 * Copyright (C) 2008 Bull S.A.S.
5 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 #include "qemu-common.h"
30 #include "block/nbd.h"
32 #include "block/block_int.h"
33 #include "qemu/module.h"
34 #include "qemu/sockets.h"
35 #include "qapi/qmp/qjson.h"
36 #include "qapi/qmp/qint.h"
38 #include <sys/types.h>
41 #define EN_OPTSTR ":exportname="
43 /* #define DEBUG_NBD */
45 #if defined(DEBUG_NBD)
46 #define logout(fmt, ...) \
47 fprintf(stderr, "nbd\t%-24s" fmt, __func__, ##__VA_ARGS__)
49 #define logout(fmt, ...) ((void)0)
52 #define MAX_NBD_REQUESTS 16
53 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ ((uint64_t)(intptr_t)bs))
54 #define INDEX_TO_HANDLE(bs, index) ((index) ^ ((uint64_t)(intptr_t)bs))
56 typedef struct BDRVNBDState
{
64 Coroutine
*send_coroutine
;
67 Coroutine
*recv_coroutine
[MAX_NBD_REQUESTS
];
68 struct nbd_reply reply
;
71 QemuOpts
*socket_opts
;
73 char *export_name
; /* An NBD server may export several devices */
76 static int nbd_parse_uri(const char *filename
, QDict
*options
)
80 QueryParams
*qp
= NULL
;
84 uri
= uri_parse(filename
);
90 if (!strcmp(uri
->scheme
, "nbd")) {
92 } else if (!strcmp(uri
->scheme
, "nbd+tcp")) {
94 } else if (!strcmp(uri
->scheme
, "nbd+unix")) {
101 p
= uri
->path
? uri
->path
: "/";
104 qdict_put(options
, "export", qstring_from_str(p
));
107 qp
= query_params_parse(uri
->query
);
108 if (qp
->n
> 1 || (is_unix
&& !qp
->n
) || (!is_unix
&& qp
->n
)) {
114 /* nbd+unix:///export?socket=path */
115 if (uri
->server
|| uri
->port
|| strcmp(qp
->p
[0].name
, "socket")) {
119 qdict_put(options
, "path", qstring_from_str(qp
->p
[0].value
));
121 /* nbd[+tcp]://host[:port]/export */
127 qdict_put(options
, "host", qstring_from_str(uri
->server
));
129 char* port_str
= g_strdup_printf("%d", uri
->port
);
130 qdict_put(options
, "port", qstring_from_str(port_str
));
137 query_params_free(qp
);
143 static void nbd_parse_filename(const char *filename
, QDict
*options
,
148 const char *host_spec
;
149 const char *unixpath
;
151 if (qdict_haskey(options
, "host")
152 || qdict_haskey(options
, "port")
153 || qdict_haskey(options
, "path"))
155 error_setg(errp
, "host/port/path and a file name may not be specified "
160 if (strstr(filename
, "://")) {
161 int ret
= nbd_parse_uri(filename
, options
);
163 error_setg(errp
, "No valid URL specified");
168 file
= g_strdup(filename
);
170 export_name
= strstr(file
, EN_OPTSTR
);
172 if (export_name
[strlen(EN_OPTSTR
)] == 0) {
175 export_name
[0] = 0; /* truncate 'file' */
176 export_name
+= strlen(EN_OPTSTR
);
178 qdict_put(options
, "export", qstring_from_str(export_name
));
181 /* extract the host_spec - fail if it's not nbd:... */
182 if (!strstart(file
, "nbd:", &host_spec
)) {
183 error_setg(errp
, "File name string for NBD must start with 'nbd:'");
191 /* are we a UNIX or TCP socket? */
192 if (strstart(host_spec
, "unix:", &unixpath
)) {
193 qdict_put(options
, "path", qstring_from_str(unixpath
));
195 InetSocketAddress
*addr
= NULL
;
197 addr
= inet_parse(host_spec
, errp
);
198 if (error_is_set(errp
)) {
202 qdict_put(options
, "host", qstring_from_str(addr
->host
));
203 qdict_put(options
, "port", qstring_from_str(addr
->port
));
204 qapi_free_InetSocketAddress(addr
);
211 static int nbd_config(BDRVNBDState
*s
, QDict
*options
)
213 Error
*local_err
= NULL
;
215 if (qdict_haskey(options
, "path")) {
216 if (qdict_haskey(options
, "host")) {
217 qerror_report(ERROR_CLASS_GENERIC_ERROR
, "path and host may not "
218 "be used at the same time.");
222 } else if (qdict_haskey(options
, "host")) {
228 s
->socket_opts
= qemu_opts_create_nofail(&socket_optslist
);
230 qemu_opts_absorb_qdict(s
->socket_opts
, options
, &local_err
);
231 if (error_is_set(&local_err
)) {
232 qerror_report_err(local_err
);
233 error_free(local_err
);
237 if (!qemu_opt_get(s
->socket_opts
, "port")) {
238 qemu_opt_set_number(s
->socket_opts
, "port", NBD_DEFAULT_PORT
);
241 s
->export_name
= g_strdup(qdict_get_try_str(options
, "export"));
242 if (s
->export_name
) {
243 qdict_del(options
, "export");
250 static void nbd_coroutine_start(BDRVNBDState
*s
, struct nbd_request
*request
)
254 /* Poor man semaphore. The free_sema is locked when no other request
255 * can be accepted, and unlocked after receiving one reply. */
256 if (s
->in_flight
>= MAX_NBD_REQUESTS
- 1) {
257 qemu_co_mutex_lock(&s
->free_sema
);
258 assert(s
->in_flight
< MAX_NBD_REQUESTS
);
262 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
263 if (s
->recv_coroutine
[i
] == NULL
) {
264 s
->recv_coroutine
[i
] = qemu_coroutine_self();
269 assert(i
< MAX_NBD_REQUESTS
);
270 request
->handle
= INDEX_TO_HANDLE(s
, i
);
273 static int nbd_have_request(void *opaque
)
275 BDRVNBDState
*s
= opaque
;
277 return s
->in_flight
> 0;
280 static void nbd_reply_ready(void *opaque
)
282 BDRVNBDState
*s
= opaque
;
286 if (s
->reply
.handle
== 0) {
287 /* No reply already in flight. Fetch a header. It is possible
288 * that another thread has done the same thing in parallel, so
289 * the socket is not readable anymore.
291 ret
= nbd_receive_reply(s
->sock
, &s
->reply
);
292 if (ret
== -EAGAIN
) {
301 /* There's no need for a mutex on the receive side, because the
302 * handler acts as a synchronization point and ensures that only
303 * one coroutine is called until the reply finishes. */
304 i
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
305 if (i
>= MAX_NBD_REQUESTS
) {
309 if (s
->recv_coroutine
[i
]) {
310 qemu_coroutine_enter(s
->recv_coroutine
[i
], NULL
);
315 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
316 if (s
->recv_coroutine
[i
]) {
317 qemu_coroutine_enter(s
->recv_coroutine
[i
], NULL
);
322 static void nbd_restart_write(void *opaque
)
324 BDRVNBDState
*s
= opaque
;
325 qemu_coroutine_enter(s
->send_coroutine
, NULL
);
328 static int nbd_co_send_request(BDRVNBDState
*s
, struct nbd_request
*request
,
329 QEMUIOVector
*qiov
, int offset
)
333 qemu_co_mutex_lock(&s
->send_mutex
);
334 s
->send_coroutine
= qemu_coroutine_self();
335 qemu_aio_set_fd_handler(s
->sock
, nbd_reply_ready
, nbd_restart_write
,
336 nbd_have_request
, s
);
339 socket_set_cork(s
->sock
, 1);
341 rc
= nbd_send_request(s
->sock
, request
);
343 ret
= qemu_co_sendv(s
->sock
, qiov
->iov
, qiov
->niov
,
344 offset
, request
->len
);
345 if (ret
!= request
->len
) {
350 socket_set_cork(s
->sock
, 0);
353 rc
= nbd_send_request(s
->sock
, request
);
355 qemu_aio_set_fd_handler(s
->sock
, nbd_reply_ready
, NULL
,
356 nbd_have_request
, s
);
357 s
->send_coroutine
= NULL
;
358 qemu_co_mutex_unlock(&s
->send_mutex
);
362 static void nbd_co_receive_reply(BDRVNBDState
*s
, struct nbd_request
*request
,
363 struct nbd_reply
*reply
,
364 QEMUIOVector
*qiov
, int offset
)
368 /* Wait until we're woken up by the read handler. TODO: perhaps
369 * peek at the next reply and avoid yielding if it's ours? */
370 qemu_coroutine_yield();
372 if (reply
->handle
!= request
->handle
) {
375 if (qiov
&& reply
->error
== 0) {
376 ret
= qemu_co_recvv(s
->sock
, qiov
->iov
, qiov
->niov
,
377 offset
, request
->len
);
378 if (ret
!= request
->len
) {
383 /* Tell the read handler to read another header. */
388 static void nbd_coroutine_end(BDRVNBDState
*s
, struct nbd_request
*request
)
390 int i
= HANDLE_TO_INDEX(s
, request
->handle
);
391 s
->recv_coroutine
[i
] = NULL
;
392 if (s
->in_flight
-- == MAX_NBD_REQUESTS
) {
393 qemu_co_mutex_unlock(&s
->free_sema
);
397 static int nbd_establish_connection(BlockDriverState
*bs
)
399 BDRVNBDState
*s
= bs
->opaque
;
406 sock
= unix_socket_outgoing(qemu_opt_get(s
->socket_opts
, "path"));
408 sock
= tcp_socket_outgoing_opts(s
->socket_opts
);
410 socket_set_nodelay(sock
);
414 /* Failed to establish connection */
416 logout("Failed to establish connection to NBD server\n");
421 ret
= nbd_receive_negotiate(sock
, s
->export_name
, &s
->nbdflags
, &size
,
424 logout("Failed to negotiate with the NBD server\n");
429 /* Now that we're connected, set the socket to be non-blocking and
430 * kick the reply mechanism. */
431 qemu_set_nonblock(sock
);
432 qemu_aio_set_fd_handler(sock
, nbd_reply_ready
, NULL
,
433 nbd_have_request
, s
);
437 s
->blocksize
= blocksize
;
439 logout("Established connection with NBD server\n");
443 static void nbd_teardown_connection(BlockDriverState
*bs
)
445 BDRVNBDState
*s
= bs
->opaque
;
446 struct nbd_request request
;
448 request
.type
= NBD_CMD_DISC
;
451 nbd_send_request(s
->sock
, &request
);
453 qemu_aio_set_fd_handler(s
->sock
, NULL
, NULL
, NULL
, NULL
);
454 closesocket(s
->sock
);
457 static int nbd_open(BlockDriverState
*bs
, QDict
*options
, int flags
)
459 BDRVNBDState
*s
= bs
->opaque
;
462 qemu_co_mutex_init(&s
->send_mutex
);
463 qemu_co_mutex_init(&s
->free_sema
);
465 /* Pop the config into our state object. Exit if invalid. */
466 result
= nbd_config(s
, options
);
471 /* establish TCP connection, return error if it fails
472 * TODO: Configurable retry-until-timeout behaviour.
474 result
= nbd_establish_connection(bs
);
479 static int nbd_co_readv_1(BlockDriverState
*bs
, int64_t sector_num
,
480 int nb_sectors
, QEMUIOVector
*qiov
,
483 BDRVNBDState
*s
= bs
->opaque
;
484 struct nbd_request request
;
485 struct nbd_reply reply
;
488 request
.type
= NBD_CMD_READ
;
489 request
.from
= sector_num
* 512;
490 request
.len
= nb_sectors
* 512;
492 nbd_coroutine_start(s
, &request
);
493 ret
= nbd_co_send_request(s
, &request
, NULL
, 0);
497 nbd_co_receive_reply(s
, &request
, &reply
, qiov
, offset
);
499 nbd_coroutine_end(s
, &request
);
504 static int nbd_co_writev_1(BlockDriverState
*bs
, int64_t sector_num
,
505 int nb_sectors
, QEMUIOVector
*qiov
,
508 BDRVNBDState
*s
= bs
->opaque
;
509 struct nbd_request request
;
510 struct nbd_reply reply
;
513 request
.type
= NBD_CMD_WRITE
;
514 if (!bdrv_enable_write_cache(bs
) && (s
->nbdflags
& NBD_FLAG_SEND_FUA
)) {
515 request
.type
|= NBD_CMD_FLAG_FUA
;
518 request
.from
= sector_num
* 512;
519 request
.len
= nb_sectors
* 512;
521 nbd_coroutine_start(s
, &request
);
522 ret
= nbd_co_send_request(s
, &request
, qiov
, offset
);
526 nbd_co_receive_reply(s
, &request
, &reply
, NULL
, 0);
528 nbd_coroutine_end(s
, &request
);
532 /* qemu-nbd has a limit of slightly less than 1M per request. Try to
533 * remain aligned to 4K. */
534 #define NBD_MAX_SECTORS 2040
536 static int nbd_co_readv(BlockDriverState
*bs
, int64_t sector_num
,
537 int nb_sectors
, QEMUIOVector
*qiov
)
541 while (nb_sectors
> NBD_MAX_SECTORS
) {
542 ret
= nbd_co_readv_1(bs
, sector_num
, NBD_MAX_SECTORS
, qiov
, offset
);
546 offset
+= NBD_MAX_SECTORS
* 512;
547 sector_num
+= NBD_MAX_SECTORS
;
548 nb_sectors
-= NBD_MAX_SECTORS
;
550 return nbd_co_readv_1(bs
, sector_num
, nb_sectors
, qiov
, offset
);
553 static int nbd_co_writev(BlockDriverState
*bs
, int64_t sector_num
,
554 int nb_sectors
, QEMUIOVector
*qiov
)
558 while (nb_sectors
> NBD_MAX_SECTORS
) {
559 ret
= nbd_co_writev_1(bs
, sector_num
, NBD_MAX_SECTORS
, qiov
, offset
);
563 offset
+= NBD_MAX_SECTORS
* 512;
564 sector_num
+= NBD_MAX_SECTORS
;
565 nb_sectors
-= NBD_MAX_SECTORS
;
567 return nbd_co_writev_1(bs
, sector_num
, nb_sectors
, qiov
, offset
);
570 static int nbd_co_flush(BlockDriverState
*bs
)
572 BDRVNBDState
*s
= bs
->opaque
;
573 struct nbd_request request
;
574 struct nbd_reply reply
;
577 if (!(s
->nbdflags
& NBD_FLAG_SEND_FLUSH
)) {
581 request
.type
= NBD_CMD_FLUSH
;
582 if (s
->nbdflags
& NBD_FLAG_SEND_FUA
) {
583 request
.type
|= NBD_CMD_FLAG_FUA
;
589 nbd_coroutine_start(s
, &request
);
590 ret
= nbd_co_send_request(s
, &request
, NULL
, 0);
594 nbd_co_receive_reply(s
, &request
, &reply
, NULL
, 0);
596 nbd_coroutine_end(s
, &request
);
600 static int nbd_co_discard(BlockDriverState
*bs
, int64_t sector_num
,
603 BDRVNBDState
*s
= bs
->opaque
;
604 struct nbd_request request
;
605 struct nbd_reply reply
;
608 if (!(s
->nbdflags
& NBD_FLAG_SEND_TRIM
)) {
611 request
.type
= NBD_CMD_TRIM
;
612 request
.from
= sector_num
* 512;;
613 request
.len
= nb_sectors
* 512;
615 nbd_coroutine_start(s
, &request
);
616 ret
= nbd_co_send_request(s
, &request
, NULL
, 0);
620 nbd_co_receive_reply(s
, &request
, &reply
, NULL
, 0);
622 nbd_coroutine_end(s
, &request
);
626 static void nbd_close(BlockDriverState
*bs
)
628 BDRVNBDState
*s
= bs
->opaque
;
629 g_free(s
->export_name
);
630 qemu_opts_del(s
->socket_opts
);
632 nbd_teardown_connection(bs
);
635 static int64_t nbd_getlength(BlockDriverState
*bs
)
637 BDRVNBDState
*s
= bs
->opaque
;
642 static BlockDriver bdrv_nbd
= {
643 .format_name
= "nbd",
644 .protocol_name
= "nbd",
645 .instance_size
= sizeof(BDRVNBDState
),
646 .bdrv_parse_filename
= nbd_parse_filename
,
647 .bdrv_file_open
= nbd_open
,
648 .bdrv_co_readv
= nbd_co_readv
,
649 .bdrv_co_writev
= nbd_co_writev
,
650 .bdrv_close
= nbd_close
,
651 .bdrv_co_flush_to_os
= nbd_co_flush
,
652 .bdrv_co_discard
= nbd_co_discard
,
653 .bdrv_getlength
= nbd_getlength
,
656 static BlockDriver bdrv_nbd_tcp
= {
657 .format_name
= "nbd",
658 .protocol_name
= "nbd+tcp",
659 .instance_size
= sizeof(BDRVNBDState
),
660 .bdrv_parse_filename
= nbd_parse_filename
,
661 .bdrv_file_open
= nbd_open
,
662 .bdrv_co_readv
= nbd_co_readv
,
663 .bdrv_co_writev
= nbd_co_writev
,
664 .bdrv_close
= nbd_close
,
665 .bdrv_co_flush_to_os
= nbd_co_flush
,
666 .bdrv_co_discard
= nbd_co_discard
,
667 .bdrv_getlength
= nbd_getlength
,
670 static BlockDriver bdrv_nbd_unix
= {
671 .format_name
= "nbd",
672 .protocol_name
= "nbd+unix",
673 .instance_size
= sizeof(BDRVNBDState
),
674 .bdrv_parse_filename
= nbd_parse_filename
,
675 .bdrv_file_open
= nbd_open
,
676 .bdrv_co_readv
= nbd_co_readv
,
677 .bdrv_co_writev
= nbd_co_writev
,
678 .bdrv_close
= nbd_close
,
679 .bdrv_co_flush_to_os
= nbd_co_flush
,
680 .bdrv_co_discard
= nbd_co_discard
,
681 .bdrv_getlength
= nbd_getlength
,
684 static void bdrv_nbd_init(void)
686 bdrv_register(&bdrv_nbd
);
687 bdrv_register(&bdrv_nbd_tcp
);
688 bdrv_register(&bdrv_nbd_unix
);
691 block_init(bdrv_nbd_init
);