2 * QEMU Block driver for NBD
4 * Copyright (C) 2016 Red Hat, Inc.
5 * Copyright (C) 2008 Bull S.A.S.
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30 #include "qemu/osdep.h"
34 #include "qemu/option.h"
35 #include "qemu/cutils.h"
37 #include "qapi/qapi-visit-sockets.h"
38 #include "qapi/qmp/qstring.h"
40 #include "block/qdict.h"
41 #include "block/nbd.h"
42 #include "block/block_int.h"
44 #define EN_OPTSTR ":exportname="
45 #define MAX_NBD_REQUESTS 16
47 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
48 #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
52 uint64_t offset
; /* original offset of the request */
53 bool receiving
; /* waiting for connection_co? */
56 typedef struct NBDClientSession
{
57 QIOChannelSocket
*sioc
; /* The master data channel */
58 QIOChannel
*ioc
; /* The current I/O channel which may differ (eg TLS) */
63 Coroutine
*connection_co
;
66 NBDClientRequest requests
[MAX_NBD_REQUESTS
];
72 typedef struct BDRVNBDState
{
73 NBDClientSession client
;
75 /* For nbd_refresh_filename() */
77 char *export
, *tlscredsid
;
80 static NBDClientSession
*nbd_get_client_session(BlockDriverState
*bs
)
82 BDRVNBDState
*s
= bs
->opaque
;
87 static void nbd_recv_coroutines_wake_all(NBDClientSession
*s
)
91 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
92 NBDClientRequest
*req
= &s
->requests
[i
];
94 if (req
->coroutine
&& req
->receiving
) {
95 aio_co_wake(req
->coroutine
);
100 static void nbd_client_detach_aio_context(BlockDriverState
*bs
)
102 NBDClientSession
*client
= nbd_get_client_session(bs
);
103 qio_channel_detach_aio_context(QIO_CHANNEL(client
->ioc
));
106 static void nbd_client_attach_aio_context_bh(void *opaque
)
108 BlockDriverState
*bs
= opaque
;
109 NBDClientSession
*client
= nbd_get_client_session(bs
);
112 * The node is still drained, so we know the coroutine has yielded in
113 * nbd_read_eof(), the only place where bs->in_flight can reach 0, or it is
114 * entered for the first time. Both places are safe for entering the
117 qemu_aio_coroutine_enter(bs
->aio_context
, client
->connection_co
);
118 bdrv_dec_in_flight(bs
);
121 static void nbd_client_attach_aio_context(BlockDriverState
*bs
,
122 AioContext
*new_context
)
124 NBDClientSession
*client
= nbd_get_client_session(bs
);
125 qio_channel_attach_aio_context(QIO_CHANNEL(client
->ioc
), new_context
);
127 bdrv_inc_in_flight(bs
);
130 * Need to wait here for the BH to run because the BH must run while the
131 * node is still drained.
133 aio_wait_bh_oneshot(new_context
, nbd_client_attach_aio_context_bh
, bs
);
137 static void nbd_teardown_connection(BlockDriverState
*bs
)
139 NBDClientSession
*client
= nbd_get_client_session(bs
);
143 /* finish any pending coroutines */
144 qio_channel_shutdown(client
->ioc
,
145 QIO_CHANNEL_SHUTDOWN_BOTH
,
147 BDRV_POLL_WHILE(bs
, client
->connection_co
);
149 nbd_client_detach_aio_context(bs
);
150 object_unref(OBJECT(client
->sioc
));
152 object_unref(OBJECT(client
->ioc
));
156 static coroutine_fn
void nbd_connection_entry(void *opaque
)
158 NBDClientSession
*s
= opaque
;
161 Error
*local_err
= NULL
;
165 * The NBD client can only really be considered idle when it has
166 * yielded from qio_channel_readv_all_eof(), waiting for data. This is
167 * the point where the additional scheduled coroutine entry happens
168 * after nbd_client_attach_aio_context().
170 * Therefore we keep an additional in_flight reference all the time and
171 * only drop it temporarily here.
173 assert(s
->reply
.handle
== 0);
174 ret
= nbd_receive_reply(s
->bs
, s
->ioc
, &s
->reply
, &local_err
);
177 trace_nbd_read_reply_entry_fail(ret
, error_get_pretty(local_err
));
178 error_free(local_err
);
185 * There's no need for a mutex on the receive side, because the
186 * handler acts as a synchronization point and ensures that only
187 * one coroutine is called until the reply finishes.
189 i
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
190 if (i
>= MAX_NBD_REQUESTS
||
191 !s
->requests
[i
].coroutine
||
192 !s
->requests
[i
].receiving
||
193 (nbd_reply_is_structured(&s
->reply
) && !s
->info
.structured_reply
))
199 * We're woken up again by the request itself. Note that there
200 * is no race between yielding and reentering connection_co. This
203 * - if the request runs on the same AioContext, it is only
204 * entered after we yield
206 * - if the request runs on a different AioContext, reentering
207 * connection_co happens through a bottom half, which can only
208 * run after we yield.
210 aio_co_wake(s
->requests
[i
].coroutine
);
211 qemu_coroutine_yield();
215 nbd_recv_coroutines_wake_all(s
);
216 bdrv_dec_in_flight(s
->bs
);
218 s
->connection_co
= NULL
;
222 static int nbd_co_send_request(BlockDriverState
*bs
,
226 NBDClientSession
*s
= nbd_get_client_session(bs
);
229 qemu_co_mutex_lock(&s
->send_mutex
);
230 while (s
->in_flight
== MAX_NBD_REQUESTS
) {
231 qemu_co_queue_wait(&s
->free_sema
, &s
->send_mutex
);
235 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
236 if (s
->requests
[i
].coroutine
== NULL
) {
241 g_assert(qemu_in_coroutine());
242 assert(i
< MAX_NBD_REQUESTS
);
244 s
->requests
[i
].coroutine
= qemu_coroutine_self();
245 s
->requests
[i
].offset
= request
->from
;
246 s
->requests
[i
].receiving
= false;
248 request
->handle
= INDEX_TO_HANDLE(s
, i
);
257 qio_channel_set_cork(s
->ioc
, true);
258 rc
= nbd_send_request(s
->ioc
, request
);
259 if (rc
>= 0 && !s
->quit
) {
260 if (qio_channel_writev_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
264 } else if (rc
>= 0) {
267 qio_channel_set_cork(s
->ioc
, false);
269 rc
= nbd_send_request(s
->ioc
, request
);
275 s
->requests
[i
].coroutine
= NULL
;
277 qemu_co_queue_next(&s
->free_sema
);
279 qemu_co_mutex_unlock(&s
->send_mutex
);
283 static inline uint16_t payload_advance16(uint8_t **payload
)
286 return lduw_be_p(*payload
- 2);
289 static inline uint32_t payload_advance32(uint8_t **payload
)
292 return ldl_be_p(*payload
- 4);
295 static inline uint64_t payload_advance64(uint8_t **payload
)
298 return ldq_be_p(*payload
- 8);
301 static int nbd_parse_offset_hole_payload(NBDClientSession
*client
,
302 NBDStructuredReplyChunk
*chunk
,
303 uint8_t *payload
, uint64_t orig_offset
,
304 QEMUIOVector
*qiov
, Error
**errp
)
309 if (chunk
->length
!= sizeof(offset
) + sizeof(hole_size
)) {
310 error_setg(errp
, "Protocol error: invalid payload for "
311 "NBD_REPLY_TYPE_OFFSET_HOLE");
315 offset
= payload_advance64(&payload
);
316 hole_size
= payload_advance32(&payload
);
318 if (!hole_size
|| offset
< orig_offset
|| hole_size
> qiov
->size
||
319 offset
> orig_offset
+ qiov
->size
- hole_size
) {
320 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
324 if (client
->info
.min_block
&&
325 !QEMU_IS_ALIGNED(hole_size
, client
->info
.min_block
)) {
326 trace_nbd_structured_read_compliance("hole");
329 qemu_iovec_memset(qiov
, offset
- orig_offset
, 0, hole_size
);
335 * nbd_parse_blockstatus_payload
336 * Based on our request, we expect only one extent in reply, for the
337 * base:allocation context.
339 static int nbd_parse_blockstatus_payload(NBDClientSession
*client
,
340 NBDStructuredReplyChunk
*chunk
,
341 uint8_t *payload
, uint64_t orig_length
,
342 NBDExtent
*extent
, Error
**errp
)
346 /* The server succeeded, so it must have sent [at least] one extent */
347 if (chunk
->length
< sizeof(context_id
) + sizeof(*extent
)) {
348 error_setg(errp
, "Protocol error: invalid payload for "
349 "NBD_REPLY_TYPE_BLOCK_STATUS");
353 context_id
= payload_advance32(&payload
);
354 if (client
->info
.context_id
!= context_id
) {
355 error_setg(errp
, "Protocol error: unexpected context id %d for "
356 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
357 "id is %d", context_id
,
358 client
->info
.context_id
);
362 extent
->length
= payload_advance32(&payload
);
363 extent
->flags
= payload_advance32(&payload
);
365 if (extent
->length
== 0) {
366 error_setg(errp
, "Protocol error: server sent status chunk with "
372 * A server sending unaligned block status is in violation of the
373 * protocol, but as qemu-nbd 3.1 is such a server (at least for
374 * POSIX files that are not a multiple of 512 bytes, since qemu
375 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
376 * still sees an implicit hole beyond the real EOF), it's nicer to
377 * work around the misbehaving server. If the request included
378 * more than the final unaligned block, truncate it back to an
379 * aligned result; if the request was only the final block, round
380 * up to the full block and change the status to fully-allocated
381 * (always a safe status, even if it loses information).
383 if (client
->info
.min_block
&& !QEMU_IS_ALIGNED(extent
->length
,
384 client
->info
.min_block
)) {
385 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
386 if (extent
->length
> client
->info
.min_block
) {
387 extent
->length
= QEMU_ALIGN_DOWN(extent
->length
,
388 client
->info
.min_block
);
390 extent
->length
= client
->info
.min_block
;
396 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
397 * sent us any more than one extent, nor should it have included
398 * status beyond our request in that extent. However, it's easy
399 * enough to ignore the server's noncompliance without killing the
400 * connection; just ignore trailing extents, and clamp things to
401 * the length of our request.
403 if (chunk
->length
> sizeof(context_id
) + sizeof(*extent
)) {
404 trace_nbd_parse_blockstatus_compliance("more than one extent");
406 if (extent
->length
> orig_length
) {
407 extent
->length
= orig_length
;
408 trace_nbd_parse_blockstatus_compliance("extent length too large");
415 * nbd_parse_error_payload
416 * on success @errp contains message describing nbd error reply
418 static int nbd_parse_error_payload(NBDStructuredReplyChunk
*chunk
,
419 uint8_t *payload
, int *request_ret
,
423 uint16_t message_size
;
425 assert(chunk
->type
& (1 << 15));
427 if (chunk
->length
< sizeof(error
) + sizeof(message_size
)) {
429 "Protocol error: invalid payload for structured error");
433 error
= nbd_errno_to_system_errno(payload_advance32(&payload
));
435 error_setg(errp
, "Protocol error: server sent structured error chunk "
440 *request_ret
= -error
;
441 message_size
= payload_advance16(&payload
);
443 if (message_size
> chunk
->length
- sizeof(error
) - sizeof(message_size
)) {
444 error_setg(errp
, "Protocol error: server sent structured error chunk "
445 "with incorrect message size");
449 /* TODO: Add a trace point to mention the server complaint */
451 /* TODO handle ERROR_OFFSET */
456 static int nbd_co_receive_offset_data_payload(NBDClientSession
*s
,
457 uint64_t orig_offset
,
458 QEMUIOVector
*qiov
, Error
**errp
)
460 QEMUIOVector sub_qiov
;
464 NBDStructuredReplyChunk
*chunk
= &s
->reply
.structured
;
466 assert(nbd_reply_is_structured(&s
->reply
));
468 /* The NBD spec requires at least one byte of payload */
469 if (chunk
->length
<= sizeof(offset
)) {
470 error_setg(errp
, "Protocol error: invalid payload for "
471 "NBD_REPLY_TYPE_OFFSET_DATA");
475 if (nbd_read64(s
->ioc
, &offset
, "OFFSET_DATA offset", errp
) < 0) {
479 data_size
= chunk
->length
- sizeof(offset
);
481 if (offset
< orig_offset
|| data_size
> qiov
->size
||
482 offset
> orig_offset
+ qiov
->size
- data_size
) {
483 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
487 if (s
->info
.min_block
&& !QEMU_IS_ALIGNED(data_size
, s
->info
.min_block
)) {
488 trace_nbd_structured_read_compliance("data");
491 qemu_iovec_init(&sub_qiov
, qiov
->niov
);
492 qemu_iovec_concat(&sub_qiov
, qiov
, offset
- orig_offset
, data_size
);
493 ret
= qio_channel_readv_all(s
->ioc
, sub_qiov
.iov
, sub_qiov
.niov
, errp
);
494 qemu_iovec_destroy(&sub_qiov
);
496 return ret
< 0 ? -EIO
: 0;
499 #define NBD_MAX_MALLOC_PAYLOAD 1000
500 static coroutine_fn
int nbd_co_receive_structured_payload(
501 NBDClientSession
*s
, void **payload
, Error
**errp
)
506 assert(nbd_reply_is_structured(&s
->reply
));
508 len
= s
->reply
.structured
.length
;
514 if (payload
== NULL
) {
515 error_setg(errp
, "Unexpected structured payload");
519 if (len
> NBD_MAX_MALLOC_PAYLOAD
) {
520 error_setg(errp
, "Payload too large");
524 *payload
= g_new(char, len
);
525 ret
= nbd_read(s
->ioc
, *payload
, len
, "structured payload", errp
);
536 * nbd_co_do_receive_one_chunk
538 * set request_ret to received reply error
539 * if qiov is not NULL: read payload to @qiov
540 * for structured reply chunk:
541 * if error chunk: read payload, set @request_ret, do not set @payload
542 * else if offset_data chunk: read payload data to @qiov, do not set @payload
543 * else: read payload to @payload
545 * If function fails, @errp contains corresponding error message, and the
546 * connection with the server is suspect. If it returns 0, then the
547 * transaction succeeded (although @request_ret may be a negative errno
548 * corresponding to the server's error reply), and errp is unchanged.
550 static coroutine_fn
int nbd_co_do_receive_one_chunk(
551 NBDClientSession
*s
, uint64_t handle
, bool only_structured
,
552 int *request_ret
, QEMUIOVector
*qiov
, void **payload
, Error
**errp
)
555 int i
= HANDLE_TO_INDEX(s
, handle
);
556 void *local_payload
= NULL
;
557 NBDStructuredReplyChunk
*chunk
;
564 /* Wait until we're woken up by nbd_connection_entry. */
565 s
->requests
[i
].receiving
= true;
566 qemu_coroutine_yield();
567 s
->requests
[i
].receiving
= false;
569 error_setg(errp
, "Connection closed");
574 assert(s
->reply
.handle
== handle
);
576 if (nbd_reply_is_simple(&s
->reply
)) {
577 if (only_structured
) {
578 error_setg(errp
, "Protocol error: simple reply when structured "
579 "reply chunk was expected");
583 *request_ret
= -nbd_errno_to_system_errno(s
->reply
.simple
.error
);
584 if (*request_ret
< 0 || !qiov
) {
588 return qio_channel_readv_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
589 errp
) < 0 ? -EIO
: 0;
592 /* handle structured reply chunk */
593 assert(s
->info
.structured_reply
);
594 chunk
= &s
->reply
.structured
;
596 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
597 if (!(chunk
->flags
& NBD_REPLY_FLAG_DONE
)) {
598 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
599 " NBD_REPLY_FLAG_DONE flag set");
603 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
610 if (chunk
->type
== NBD_REPLY_TYPE_OFFSET_DATA
) {
612 error_setg(errp
, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
616 return nbd_co_receive_offset_data_payload(s
, s
->requests
[i
].offset
,
620 if (nbd_reply_type_is_error(chunk
->type
)) {
621 payload
= &local_payload
;
624 ret
= nbd_co_receive_structured_payload(s
, payload
, errp
);
629 if (nbd_reply_type_is_error(chunk
->type
)) {
630 ret
= nbd_parse_error_payload(chunk
, local_payload
, request_ret
, errp
);
631 g_free(local_payload
);
639 * nbd_co_receive_one_chunk
640 * Read reply, wake up connection_co and set s->quit if needed.
641 * Return value is a fatal error code or normal nbd reply error code
643 static coroutine_fn
int nbd_co_receive_one_chunk(
644 NBDClientSession
*s
, uint64_t handle
, bool only_structured
,
645 int *request_ret
, QEMUIOVector
*qiov
, NBDReply
*reply
, void **payload
,
648 int ret
= nbd_co_do_receive_one_chunk(s
, handle
, only_structured
,
649 request_ret
, qiov
, payload
, errp
);
654 /* For assert at loop start in nbd_connection_entry */
661 if (s
->connection_co
) {
662 aio_co_wake(s
->connection_co
);
668 typedef struct NBDReplyChunkIter
{
672 bool done
, only_structured
;
675 static void nbd_iter_channel_error(NBDReplyChunkIter
*iter
,
676 int ret
, Error
**local_err
)
682 error_propagate(&iter
->err
, *local_err
);
684 error_free(*local_err
);
690 static void nbd_iter_request_error(NBDReplyChunkIter
*iter
, int ret
)
694 if (!iter
->request_ret
) {
695 iter
->request_ret
= ret
;
700 * NBD_FOREACH_REPLY_CHUNK
701 * The pointer stored in @payload requires g_free() to free it.
703 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
704 qiov, reply, payload) \
705 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
706 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
709 * nbd_reply_chunk_iter_receive
710 * The pointer stored in @payload requires g_free() to free it.
712 static bool nbd_reply_chunk_iter_receive(NBDClientSession
*s
,
713 NBDReplyChunkIter
*iter
,
715 QEMUIOVector
*qiov
, NBDReply
*reply
,
718 int ret
, request_ret
;
719 NBDReply local_reply
;
720 NBDStructuredReplyChunk
*chunk
;
721 Error
*local_err
= NULL
;
723 error_setg(&local_err
, "Connection closed");
724 nbd_iter_channel_error(iter
, -EIO
, &local_err
);
729 /* Previous iteration was last. */
734 reply
= &local_reply
;
737 ret
= nbd_co_receive_one_chunk(s
, handle
, iter
->only_structured
,
738 &request_ret
, qiov
, reply
, payload
,
741 nbd_iter_channel_error(iter
, ret
, &local_err
);
742 } else if (request_ret
< 0) {
743 nbd_iter_request_error(iter
, request_ret
);
746 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
747 if (nbd_reply_is_simple(reply
) || s
->quit
) {
751 chunk
= &reply
->structured
;
752 iter
->only_structured
= true;
754 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
755 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
756 assert(chunk
->flags
& NBD_REPLY_FLAG_DONE
);
760 if (chunk
->flags
& NBD_REPLY_FLAG_DONE
) {
761 /* This iteration is last. */
765 /* Execute the loop body */
769 s
->requests
[HANDLE_TO_INDEX(s
, handle
)].coroutine
= NULL
;
771 qemu_co_mutex_lock(&s
->send_mutex
);
773 qemu_co_queue_next(&s
->free_sema
);
774 qemu_co_mutex_unlock(&s
->send_mutex
);
779 static int nbd_co_receive_return_code(NBDClientSession
*s
, uint64_t handle
,
780 int *request_ret
, Error
**errp
)
782 NBDReplyChunkIter iter
;
784 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, false, NULL
, NULL
, NULL
) {
785 /* nbd_reply_chunk_iter_receive does all the work */
788 error_propagate(errp
, iter
.err
);
789 *request_ret
= iter
.request_ret
;
793 static int nbd_co_receive_cmdread_reply(NBDClientSession
*s
, uint64_t handle
,
794 uint64_t offset
, QEMUIOVector
*qiov
,
795 int *request_ret
, Error
**errp
)
797 NBDReplyChunkIter iter
;
799 void *payload
= NULL
;
800 Error
*local_err
= NULL
;
802 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, s
->info
.structured_reply
,
803 qiov
, &reply
, &payload
)
806 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
808 assert(nbd_reply_is_structured(&reply
));
810 switch (chunk
->type
) {
811 case NBD_REPLY_TYPE_OFFSET_DATA
:
813 * special cased in nbd_co_receive_one_chunk, data is already
817 case NBD_REPLY_TYPE_OFFSET_HOLE
:
818 ret
= nbd_parse_offset_hole_payload(s
, &reply
.structured
, payload
,
819 offset
, qiov
, &local_err
);
822 nbd_iter_channel_error(&iter
, ret
, &local_err
);
826 if (!nbd_reply_type_is_error(chunk
->type
)) {
827 /* not allowed reply type */
829 error_setg(&local_err
,
830 "Unexpected reply type: %d (%s) for CMD_READ",
831 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
832 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
840 error_propagate(errp
, iter
.err
);
841 *request_ret
= iter
.request_ret
;
845 static int nbd_co_receive_blockstatus_reply(NBDClientSession
*s
,
846 uint64_t handle
, uint64_t length
,
848 int *request_ret
, Error
**errp
)
850 NBDReplyChunkIter iter
;
852 void *payload
= NULL
;
853 Error
*local_err
= NULL
;
854 bool received
= false;
856 assert(!extent
->length
);
857 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, false, NULL
, &reply
, &payload
) {
859 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
861 assert(nbd_reply_is_structured(&reply
));
863 switch (chunk
->type
) {
864 case NBD_REPLY_TYPE_BLOCK_STATUS
:
867 error_setg(&local_err
, "Several BLOCK_STATUS chunks in reply");
868 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
872 ret
= nbd_parse_blockstatus_payload(s
, &reply
.structured
,
873 payload
, length
, extent
,
877 nbd_iter_channel_error(&iter
, ret
, &local_err
);
881 if (!nbd_reply_type_is_error(chunk
->type
)) {
883 error_setg(&local_err
,
884 "Unexpected reply type: %d (%s) "
885 "for CMD_BLOCK_STATUS",
886 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
887 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
895 if (!extent
->length
&& !iter
.request_ret
) {
896 error_setg(&local_err
, "Server did not reply with any status extents");
897 nbd_iter_channel_error(&iter
, -EIO
, &local_err
);
900 error_propagate(errp
, iter
.err
);
901 *request_ret
= iter
.request_ret
;
905 static int nbd_co_request(BlockDriverState
*bs
, NBDRequest
*request
,
906 QEMUIOVector
*write_qiov
)
908 int ret
, request_ret
;
909 Error
*local_err
= NULL
;
910 NBDClientSession
*client
= nbd_get_client_session(bs
);
912 assert(request
->type
!= NBD_CMD_READ
);
914 assert(request
->type
== NBD_CMD_WRITE
);
915 assert(request
->len
== iov_size(write_qiov
->iov
, write_qiov
->niov
));
917 assert(request
->type
!= NBD_CMD_WRITE
);
919 ret
= nbd_co_send_request(bs
, request
, write_qiov
);
924 ret
= nbd_co_receive_return_code(client
, request
->handle
,
925 &request_ret
, &local_err
);
927 trace_nbd_co_request_fail(request
->from
, request
->len
, request
->handle
,
928 request
->flags
, request
->type
,
929 nbd_cmd_lookup(request
->type
),
930 ret
, error_get_pretty(local_err
));
931 error_free(local_err
);
933 return ret
? ret
: request_ret
;
936 static int nbd_client_co_preadv(BlockDriverState
*bs
, uint64_t offset
,
937 uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
939 int ret
, request_ret
;
940 Error
*local_err
= NULL
;
941 NBDClientSession
*client
= nbd_get_client_session(bs
);
942 NBDRequest request
= {
943 .type
= NBD_CMD_READ
,
948 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
955 * Work around the fact that the block layer doesn't do
956 * byte-accurate sizing yet - if the read exceeds the server's
957 * advertised size because the block layer rounded size up, then
958 * truncate the request to the server and tail-pad with zero.
960 if (offset
>= client
->info
.size
) {
961 assert(bytes
< BDRV_SECTOR_SIZE
);
962 qemu_iovec_memset(qiov
, 0, 0, bytes
);
965 if (offset
+ bytes
> client
->info
.size
) {
966 uint64_t slop
= offset
+ bytes
- client
->info
.size
;
968 assert(slop
< BDRV_SECTOR_SIZE
);
969 qemu_iovec_memset(qiov
, bytes
- slop
, 0, slop
);
973 ret
= nbd_co_send_request(bs
, &request
, NULL
);
978 ret
= nbd_co_receive_cmdread_reply(client
, request
.handle
, offset
, qiov
,
979 &request_ret
, &local_err
);
981 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.handle
,
982 request
.flags
, request
.type
,
983 nbd_cmd_lookup(request
.type
),
984 ret
, error_get_pretty(local_err
));
985 error_free(local_err
);
987 return ret
? ret
: request_ret
;
990 static int nbd_client_co_pwritev(BlockDriverState
*bs
, uint64_t offset
,
991 uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
993 NBDClientSession
*client
= nbd_get_client_session(bs
);
994 NBDRequest request
= {
995 .type
= NBD_CMD_WRITE
,
1000 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
1001 if (flags
& BDRV_REQ_FUA
) {
1002 assert(client
->info
.flags
& NBD_FLAG_SEND_FUA
);
1003 request
.flags
|= NBD_CMD_FLAG_FUA
;
1006 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
1011 return nbd_co_request(bs
, &request
, qiov
);
1014 static int nbd_client_co_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
,
1015 int bytes
, BdrvRequestFlags flags
)
1017 NBDClientSession
*client
= nbd_get_client_session(bs
);
1018 NBDRequest request
= {
1019 .type
= NBD_CMD_WRITE_ZEROES
,
1024 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
1025 if (!(client
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
)) {
1029 if (flags
& BDRV_REQ_FUA
) {
1030 assert(client
->info
.flags
& NBD_FLAG_SEND_FUA
);
1031 request
.flags
|= NBD_CMD_FLAG_FUA
;
1033 if (!(flags
& BDRV_REQ_MAY_UNMAP
)) {
1034 request
.flags
|= NBD_CMD_FLAG_NO_HOLE
;
1040 return nbd_co_request(bs
, &request
, NULL
);
1043 static int nbd_client_co_flush(BlockDriverState
*bs
)
1045 NBDClientSession
*client
= nbd_get_client_session(bs
);
1046 NBDRequest request
= { .type
= NBD_CMD_FLUSH
};
1048 if (!(client
->info
.flags
& NBD_FLAG_SEND_FLUSH
)) {
1055 return nbd_co_request(bs
, &request
, NULL
);
1058 static int nbd_client_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
1061 NBDClientSession
*client
= nbd_get_client_session(bs
);
1062 NBDRequest request
= {
1063 .type
= NBD_CMD_TRIM
,
1068 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
1069 if (!(client
->info
.flags
& NBD_FLAG_SEND_TRIM
) || !bytes
) {
1073 return nbd_co_request(bs
, &request
, NULL
);
1076 static int coroutine_fn
nbd_client_co_block_status(
1077 BlockDriverState
*bs
, bool want_zero
, int64_t offset
, int64_t bytes
,
1078 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
1080 int ret
, request_ret
;
1081 NBDExtent extent
= { 0 };
1082 NBDClientSession
*client
= nbd_get_client_session(bs
);
1083 Error
*local_err
= NULL
;
1085 NBDRequest request
= {
1086 .type
= NBD_CMD_BLOCK_STATUS
,
1088 .len
= MIN(MIN_NON_ZERO(QEMU_ALIGN_DOWN(INT_MAX
,
1089 bs
->bl
.request_alignment
),
1090 client
->info
.max_block
),
1091 MIN(bytes
, client
->info
.size
- offset
)),
1092 .flags
= NBD_CMD_FLAG_REQ_ONE
,
1095 if (!client
->info
.base_allocation
) {
1099 return BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
1103 * Work around the fact that the block layer doesn't do
1104 * byte-accurate sizing yet - if the status request exceeds the
1105 * server's advertised size because the block layer rounded size
1106 * up, we truncated the request to the server (above), or are
1107 * called on just the hole.
1109 if (offset
>= client
->info
.size
) {
1111 assert(bytes
< BDRV_SECTOR_SIZE
);
1112 /* Intentionally don't report offset_valid for the hole */
1113 return BDRV_BLOCK_ZERO
;
1116 if (client
->info
.min_block
) {
1117 assert(QEMU_IS_ALIGNED(request
.len
, client
->info
.min_block
));
1119 ret
= nbd_co_send_request(bs
, &request
, NULL
);
1124 ret
= nbd_co_receive_blockstatus_reply(client
, request
.handle
, bytes
,
1125 &extent
, &request_ret
, &local_err
);
1127 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.handle
,
1128 request
.flags
, request
.type
,
1129 nbd_cmd_lookup(request
.type
),
1130 ret
, error_get_pretty(local_err
));
1131 error_free(local_err
);
1133 if (ret
< 0 || request_ret
< 0) {
1134 return ret
? ret
: request_ret
;
1137 assert(extent
.length
);
1138 *pnum
= extent
.length
;
1141 return (extent
.flags
& NBD_STATE_HOLE
? 0 : BDRV_BLOCK_DATA
) |
1142 (extent
.flags
& NBD_STATE_ZERO
? BDRV_BLOCK_ZERO
: 0) |
1143 BDRV_BLOCK_OFFSET_VALID
;
1146 static void nbd_client_close(BlockDriverState
*bs
)
1148 NBDClientSession
*client
= nbd_get_client_session(bs
);
1149 NBDRequest request
= { .type
= NBD_CMD_DISC
};
1151 assert(client
->ioc
);
1153 nbd_send_request(client
->ioc
, &request
);
1155 nbd_teardown_connection(bs
);
1158 static QIOChannelSocket
*nbd_establish_connection(SocketAddress
*saddr
,
1161 QIOChannelSocket
*sioc
;
1162 Error
*local_err
= NULL
;
1164 sioc
= qio_channel_socket_new();
1165 qio_channel_set_name(QIO_CHANNEL(sioc
), "nbd-client");
1167 qio_channel_socket_connect_sync(sioc
, saddr
, &local_err
);
1169 object_unref(OBJECT(sioc
));
1170 error_propagate(errp
, local_err
);
1174 qio_channel_set_delay(QIO_CHANNEL(sioc
), false);
1179 static int nbd_client_connect(BlockDriverState
*bs
,
1180 SocketAddress
*saddr
,
1182 QCryptoTLSCreds
*tlscreds
,
1183 const char *hostname
,
1184 const char *x_dirty_bitmap
,
1187 NBDClientSession
*client
= nbd_get_client_session(bs
);
1191 * establish TCP connection, return error if it fails
1192 * TODO: Configurable retry-until-timeout behaviour.
1194 QIOChannelSocket
*sioc
= nbd_establish_connection(saddr
, errp
);
1197 return -ECONNREFUSED
;
1201 trace_nbd_client_connect(export
);
1202 qio_channel_set_blocking(QIO_CHANNEL(sioc
), true, NULL
);
1204 client
->info
.request_sizes
= true;
1205 client
->info
.structured_reply
= true;
1206 client
->info
.base_allocation
= true;
1207 client
->info
.x_dirty_bitmap
= g_strdup(x_dirty_bitmap
);
1208 client
->info
.name
= g_strdup(export
?: "");
1209 ret
= nbd_receive_negotiate(QIO_CHANNEL(sioc
), tlscreds
, hostname
,
1210 &client
->ioc
, &client
->info
, errp
);
1211 g_free(client
->info
.x_dirty_bitmap
);
1212 g_free(client
->info
.name
);
1214 object_unref(OBJECT(sioc
));
1217 if (x_dirty_bitmap
&& !client
->info
.base_allocation
) {
1218 error_setg(errp
, "requested x-dirty-bitmap %s not found",
1223 if (client
->info
.flags
& NBD_FLAG_READ_ONLY
) {
1224 ret
= bdrv_apply_auto_read_only(bs
, "NBD export is read-only", errp
);
1229 if (client
->info
.flags
& NBD_FLAG_SEND_FUA
) {
1230 bs
->supported_write_flags
= BDRV_REQ_FUA
;
1231 bs
->supported_zero_flags
|= BDRV_REQ_FUA
;
1233 if (client
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
) {
1234 bs
->supported_zero_flags
|= BDRV_REQ_MAY_UNMAP
;
1237 client
->sioc
= sioc
;
1240 client
->ioc
= QIO_CHANNEL(sioc
);
1241 object_ref(OBJECT(client
->ioc
));
1245 * Now that we're connected, set the socket to be non-blocking and
1246 * kick the reply mechanism.
1248 qio_channel_set_blocking(QIO_CHANNEL(sioc
), false, NULL
);
1249 client
->connection_co
= qemu_coroutine_create(nbd_connection_entry
, client
);
1250 bdrv_inc_in_flight(bs
);
1251 nbd_client_attach_aio_context(bs
, bdrv_get_aio_context(bs
));
1253 trace_nbd_client_connect_success(export
);
1259 * We have connected, but must fail for other reasons. The
1260 * connection is still blocking; send NBD_CMD_DISC as a courtesy
1264 NBDRequest request
= { .type
= NBD_CMD_DISC
};
1266 nbd_send_request(client
->ioc
?: QIO_CHANNEL(sioc
), &request
);
1268 object_unref(OBJECT(sioc
));
1274 static int nbd_client_init(BlockDriverState
*bs
,
1275 SocketAddress
*saddr
,
1277 QCryptoTLSCreds
*tlscreds
,
1278 const char *hostname
,
1279 const char *x_dirty_bitmap
,
1282 NBDClientSession
*client
= nbd_get_client_session(bs
);
1285 qemu_co_mutex_init(&client
->send_mutex
);
1286 qemu_co_queue_init(&client
->free_sema
);
1288 return nbd_client_connect(bs
, saddr
, export
, tlscreds
, hostname
,
1289 x_dirty_bitmap
, errp
);
1292 static int nbd_parse_uri(const char *filename
, QDict
*options
)
1296 QueryParams
*qp
= NULL
;
1300 uri
= uri_parse(filename
);
1306 if (!g_strcmp0(uri
->scheme
, "nbd")) {
1308 } else if (!g_strcmp0(uri
->scheme
, "nbd+tcp")) {
1310 } else if (!g_strcmp0(uri
->scheme
, "nbd+unix")) {
1317 p
= uri
->path
? uri
->path
: "/";
1318 p
+= strspn(p
, "/");
1320 qdict_put_str(options
, "export", p
);
1323 qp
= query_params_parse(uri
->query
);
1324 if (qp
->n
> 1 || (is_unix
&& !qp
->n
) || (!is_unix
&& qp
->n
)) {
1330 /* nbd+unix:///export?socket=path */
1331 if (uri
->server
|| uri
->port
|| strcmp(qp
->p
[0].name
, "socket")) {
1335 qdict_put_str(options
, "server.type", "unix");
1336 qdict_put_str(options
, "server.path", qp
->p
[0].value
);
1341 /* nbd[+tcp]://host[:port]/export */
1347 /* strip braces from literal IPv6 address */
1348 if (uri
->server
[0] == '[') {
1349 host
= qstring_from_substr(uri
->server
, 1,
1350 strlen(uri
->server
) - 1);
1352 host
= qstring_from_str(uri
->server
);
1355 qdict_put_str(options
, "server.type", "inet");
1356 qdict_put(options
, "server.host", host
);
1358 port_str
= g_strdup_printf("%d", uri
->port
?: NBD_DEFAULT_PORT
);
1359 qdict_put_str(options
, "server.port", port_str
);
1365 query_params_free(qp
);
1371 static bool nbd_has_filename_options_conflict(QDict
*options
, Error
**errp
)
1373 const QDictEntry
*e
;
1375 for (e
= qdict_first(options
); e
; e
= qdict_next(options
, e
)) {
1376 if (!strcmp(e
->key
, "host") ||
1377 !strcmp(e
->key
, "port") ||
1378 !strcmp(e
->key
, "path") ||
1379 !strcmp(e
->key
, "export") ||
1380 strstart(e
->key
, "server.", NULL
))
1382 error_setg(errp
, "Option '%s' cannot be used with a file name",
1391 static void nbd_parse_filename(const char *filename
, QDict
*options
,
1396 const char *host_spec
;
1397 const char *unixpath
;
1399 if (nbd_has_filename_options_conflict(options
, errp
)) {
1403 if (strstr(filename
, "://")) {
1404 int ret
= nbd_parse_uri(filename
, options
);
1406 error_setg(errp
, "No valid URL specified");
1411 file
= g_strdup(filename
);
1413 export_name
= strstr(file
, EN_OPTSTR
);
1415 if (export_name
[strlen(EN_OPTSTR
)] == 0) {
1418 export_name
[0] = 0; /* truncate 'file' */
1419 export_name
+= strlen(EN_OPTSTR
);
1421 qdict_put_str(options
, "export", export_name
);
1424 /* extract the host_spec - fail if it's not nbd:... */
1425 if (!strstart(file
, "nbd:", &host_spec
)) {
1426 error_setg(errp
, "File name string for NBD must start with 'nbd:'");
1434 /* are we a UNIX or TCP socket? */
1435 if (strstart(host_spec
, "unix:", &unixpath
)) {
1436 qdict_put_str(options
, "server.type", "unix");
1437 qdict_put_str(options
, "server.path", unixpath
);
1439 InetSocketAddress
*addr
= g_new(InetSocketAddress
, 1);
1441 if (inet_parse(addr
, host_spec
, errp
)) {
1445 qdict_put_str(options
, "server.type", "inet");
1446 qdict_put_str(options
, "server.host", addr
->host
);
1447 qdict_put_str(options
, "server.port", addr
->port
);
1449 qapi_free_InetSocketAddress(addr
);
1456 static bool nbd_process_legacy_socket_options(QDict
*output_options
,
1457 QemuOpts
*legacy_opts
,
1460 const char *path
= qemu_opt_get(legacy_opts
, "path");
1461 const char *host
= qemu_opt_get(legacy_opts
, "host");
1462 const char *port
= qemu_opt_get(legacy_opts
, "port");
1463 const QDictEntry
*e
;
1465 if (!path
&& !host
&& !port
) {
1469 for (e
= qdict_first(output_options
); e
; e
= qdict_next(output_options
, e
))
1471 if (strstart(e
->key
, "server.", NULL
)) {
1472 error_setg(errp
, "Cannot use 'server' and path/host/port at the "
1479 error_setg(errp
, "path and host may not be used at the same time");
1483 error_setg(errp
, "port may not be used without host");
1487 qdict_put_str(output_options
, "server.type", "unix");
1488 qdict_put_str(output_options
, "server.path", path
);
1490 qdict_put_str(output_options
, "server.type", "inet");
1491 qdict_put_str(output_options
, "server.host", host
);
1492 qdict_put_str(output_options
, "server.port",
1493 port
?: stringify(NBD_DEFAULT_PORT
));
1499 static SocketAddress
*nbd_config(BDRVNBDState
*s
, QDict
*options
,
1502 SocketAddress
*saddr
= NULL
;
1505 Error
*local_err
= NULL
;
1507 qdict_extract_subqdict(options
, &addr
, "server.");
1508 if (!qdict_size(addr
)) {
1509 error_setg(errp
, "NBD server address missing");
1513 iv
= qobject_input_visitor_new_flat_confused(addr
, errp
);
1518 visit_type_SocketAddress(iv
, NULL
, &saddr
, &local_err
);
1520 error_propagate(errp
, local_err
);
1525 qobject_unref(addr
);
1530 static QCryptoTLSCreds
*nbd_get_tls_creds(const char *id
, Error
**errp
)
1533 QCryptoTLSCreds
*creds
;
1535 obj
= object_resolve_path_component(
1536 object_get_objects_root(), id
);
1538 error_setg(errp
, "No TLS credentials with id '%s'",
1542 creds
= (QCryptoTLSCreds
*)
1543 object_dynamic_cast(obj
, TYPE_QCRYPTO_TLS_CREDS
);
1545 error_setg(errp
, "Object with id '%s' is not TLS credentials",
1550 if (creds
->endpoint
!= QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT
) {
1552 "Expecting TLS credentials with a client endpoint");
1560 static QemuOptsList nbd_runtime_opts
= {
1562 .head
= QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts
.head
),
1566 .type
= QEMU_OPT_STRING
,
1567 .help
= "TCP host to connect to",
1571 .type
= QEMU_OPT_STRING
,
1572 .help
= "TCP port to connect to",
1576 .type
= QEMU_OPT_STRING
,
1577 .help
= "Unix socket path to connect to",
1581 .type
= QEMU_OPT_STRING
,
1582 .help
= "Name of the NBD export to open",
1585 .name
= "tls-creds",
1586 .type
= QEMU_OPT_STRING
,
1587 .help
= "ID of the TLS credentials to use",
1590 .name
= "x-dirty-bitmap",
1591 .type
= QEMU_OPT_STRING
,
1592 .help
= "experimental: expose named dirty bitmap in place of "
1595 { /* end of list */ }
1599 static int nbd_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
1602 BDRVNBDState
*s
= bs
->opaque
;
1603 QemuOpts
*opts
= NULL
;
1604 Error
*local_err
= NULL
;
1605 QCryptoTLSCreds
*tlscreds
= NULL
;
1606 const char *hostname
= NULL
;
1609 opts
= qemu_opts_create(&nbd_runtime_opts
, NULL
, 0, &error_abort
);
1610 qemu_opts_absorb_qdict(opts
, options
, &local_err
);
1612 error_propagate(errp
, local_err
);
1616 /* Translate @host, @port, and @path to a SocketAddress */
1617 if (!nbd_process_legacy_socket_options(options
, opts
, errp
)) {
1621 /* Pop the config into our state object. Exit if invalid. */
1622 s
->saddr
= nbd_config(s
, options
, errp
);
1627 s
->export
= g_strdup(qemu_opt_get(opts
, "export"));
1629 s
->tlscredsid
= g_strdup(qemu_opt_get(opts
, "tls-creds"));
1630 if (s
->tlscredsid
) {
1631 tlscreds
= nbd_get_tls_creds(s
->tlscredsid
, errp
);
1636 /* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */
1637 if (s
->saddr
->type
!= SOCKET_ADDRESS_TYPE_INET
) {
1638 error_setg(errp
, "TLS only supported over IP sockets");
1641 hostname
= s
->saddr
->u
.inet
.host
;
1645 ret
= nbd_client_init(bs
, s
->saddr
, s
->export
, tlscreds
, hostname
,
1646 qemu_opt_get(opts
, "x-dirty-bitmap"), errp
);
1650 object_unref(OBJECT(tlscreds
));
1653 qapi_free_SocketAddress(s
->saddr
);
1655 g_free(s
->tlscredsid
);
1657 qemu_opts_del(opts
);
1661 static int nbd_co_flush(BlockDriverState
*bs
)
1663 return nbd_client_co_flush(bs
);
1666 static void nbd_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1668 NBDClientSession
*s
= nbd_get_client_session(bs
);
1669 uint32_t min
= s
->info
.min_block
;
1670 uint32_t max
= MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE
, s
->info
.max_block
);
1673 * If the server did not advertise an alignment:
1674 * - a size that is not sector-aligned implies that an alignment
1675 * of 1 can be used to access those tail bytes
1676 * - advertisement of block status requires an alignment of 1, so
1677 * that we don't violate block layer constraints that block
1678 * status is always aligned (as we can't control whether the
1679 * server will report sub-sector extents, such as a hole at EOF
1680 * on an unaligned POSIX file)
1681 * - otherwise, assume the server is so old that we are safer avoiding
1682 * sub-sector requests
1685 min
= (!QEMU_IS_ALIGNED(s
->info
.size
, BDRV_SECTOR_SIZE
) ||
1686 s
->info
.base_allocation
) ? 1 : BDRV_SECTOR_SIZE
;
1689 bs
->bl
.request_alignment
= min
;
1690 bs
->bl
.max_pdiscard
= max
;
1691 bs
->bl
.max_pwrite_zeroes
= max
;
1692 bs
->bl
.max_transfer
= max
;
1694 if (s
->info
.opt_block
&&
1695 s
->info
.opt_block
> bs
->bl
.opt_transfer
) {
1696 bs
->bl
.opt_transfer
= s
->info
.opt_block
;
1700 static void nbd_close(BlockDriverState
*bs
)
1702 BDRVNBDState
*s
= bs
->opaque
;
1704 nbd_client_close(bs
);
1706 qapi_free_SocketAddress(s
->saddr
);
1708 g_free(s
->tlscredsid
);
1711 static int64_t nbd_getlength(BlockDriverState
*bs
)
1713 BDRVNBDState
*s
= bs
->opaque
;
1715 return s
->client
.info
.size
;
1718 static void nbd_refresh_filename(BlockDriverState
*bs
)
1720 BDRVNBDState
*s
= bs
->opaque
;
1721 const char *host
= NULL
, *port
= NULL
, *path
= NULL
;
1723 if (s
->saddr
->type
== SOCKET_ADDRESS_TYPE_INET
) {
1724 const InetSocketAddress
*inet
= &s
->saddr
->u
.inet
;
1725 if (!inet
->has_ipv4
&& !inet
->has_ipv6
&& !inet
->has_to
) {
1729 } else if (s
->saddr
->type
== SOCKET_ADDRESS_TYPE_UNIX
) {
1730 path
= s
->saddr
->u
.q_unix
.path
;
1731 } /* else can't represent as pseudo-filename */
1733 if (path
&& s
->export
) {
1734 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1735 "nbd+unix:///%s?socket=%s", s
->export
, path
);
1736 } else if (path
&& !s
->export
) {
1737 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1738 "nbd+unix://?socket=%s", path
);
1739 } else if (host
&& s
->export
) {
1740 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1741 "nbd://%s:%s/%s", host
, port
, s
->export
);
1742 } else if (host
&& !s
->export
) {
1743 snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1744 "nbd://%s:%s", host
, port
);
1748 static char *nbd_dirname(BlockDriverState
*bs
, Error
**errp
)
1750 /* The generic bdrv_dirname() implementation is able to work out some
1751 * directory name for NBD nodes, but that would be wrong. So far there is no
1752 * specification for how "export paths" would work, so NBD does not have
1753 * directory names. */
1754 error_setg(errp
, "Cannot generate a base directory for NBD nodes");
1758 static const char *const nbd_strong_runtime_opts
[] = {
1769 static BlockDriver bdrv_nbd
= {
1770 .format_name
= "nbd",
1771 .protocol_name
= "nbd",
1772 .instance_size
= sizeof(BDRVNBDState
),
1773 .bdrv_parse_filename
= nbd_parse_filename
,
1774 .bdrv_file_open
= nbd_open
,
1775 .bdrv_co_preadv
= nbd_client_co_preadv
,
1776 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
1777 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
1778 .bdrv_close
= nbd_close
,
1779 .bdrv_co_flush_to_os
= nbd_co_flush
,
1780 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
1781 .bdrv_refresh_limits
= nbd_refresh_limits
,
1782 .bdrv_getlength
= nbd_getlength
,
1783 .bdrv_detach_aio_context
= nbd_client_detach_aio_context
,
1784 .bdrv_attach_aio_context
= nbd_client_attach_aio_context
,
1785 .bdrv_refresh_filename
= nbd_refresh_filename
,
1786 .bdrv_co_block_status
= nbd_client_co_block_status
,
1787 .bdrv_dirname
= nbd_dirname
,
1788 .strong_runtime_opts
= nbd_strong_runtime_opts
,
1791 static BlockDriver bdrv_nbd_tcp
= {
1792 .format_name
= "nbd",
1793 .protocol_name
= "nbd+tcp",
1794 .instance_size
= sizeof(BDRVNBDState
),
1795 .bdrv_parse_filename
= nbd_parse_filename
,
1796 .bdrv_file_open
= nbd_open
,
1797 .bdrv_co_preadv
= nbd_client_co_preadv
,
1798 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
1799 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
1800 .bdrv_close
= nbd_close
,
1801 .bdrv_co_flush_to_os
= nbd_co_flush
,
1802 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
1803 .bdrv_refresh_limits
= nbd_refresh_limits
,
1804 .bdrv_getlength
= nbd_getlength
,
1805 .bdrv_detach_aio_context
= nbd_client_detach_aio_context
,
1806 .bdrv_attach_aio_context
= nbd_client_attach_aio_context
,
1807 .bdrv_refresh_filename
= nbd_refresh_filename
,
1808 .bdrv_co_block_status
= nbd_client_co_block_status
,
1809 .bdrv_dirname
= nbd_dirname
,
1810 .strong_runtime_opts
= nbd_strong_runtime_opts
,
1813 static BlockDriver bdrv_nbd_unix
= {
1814 .format_name
= "nbd",
1815 .protocol_name
= "nbd+unix",
1816 .instance_size
= sizeof(BDRVNBDState
),
1817 .bdrv_parse_filename
= nbd_parse_filename
,
1818 .bdrv_file_open
= nbd_open
,
1819 .bdrv_co_preadv
= nbd_client_co_preadv
,
1820 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
1821 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
1822 .bdrv_close
= nbd_close
,
1823 .bdrv_co_flush_to_os
= nbd_co_flush
,
1824 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
1825 .bdrv_refresh_limits
= nbd_refresh_limits
,
1826 .bdrv_getlength
= nbd_getlength
,
1827 .bdrv_detach_aio_context
= nbd_client_detach_aio_context
,
1828 .bdrv_attach_aio_context
= nbd_client_attach_aio_context
,
1829 .bdrv_refresh_filename
= nbd_refresh_filename
,
1830 .bdrv_co_block_status
= nbd_client_co_block_status
,
1831 .bdrv_dirname
= nbd_dirname
,
1832 .strong_runtime_opts
= nbd_strong_runtime_opts
,
1835 static void bdrv_nbd_init(void)
1837 bdrv_register(&bdrv_nbd
);
1838 bdrv_register(&bdrv_nbd_tcp
);
1839 bdrv_register(&bdrv_nbd_unix
);
1842 block_init(bdrv_nbd_init
);