2 * QEMU Block driver for NBD
4 * Copyright (c) 2019 Virtuozzo International GmbH.
5 * Copyright (C) 2016 Red Hat, Inc.
6 * Copyright (C) 2008 Bull S.A.S.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
10 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu/osdep.h"
35 #include "qemu/option.h"
36 #include "qemu/cutils.h"
37 #include "qemu/main-loop.h"
38 #include "qemu/atomic.h"
40 #include "qapi/qapi-visit-sockets.h"
41 #include "qapi/qmp/qstring.h"
42 #include "qapi/clone-visitor.h"
44 #include "block/qdict.h"
45 #include "block/nbd.h"
46 #include "block/block_int.h"
47 #include "block/coroutines.h"
49 #include "qemu/yank.h"
51 #define EN_OPTSTR ":exportname="
52 #define MAX_NBD_REQUESTS 16
54 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
55 #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
59 uint64_t offset
; /* original offset of the request */
60 bool receiving
; /* sleeping in the yield in nbd_receive_replies */
61 bool reply_possible
; /* reply header not yet received */
64 typedef enum NBDClientState
{
65 NBD_CLIENT_CONNECTING_WAIT
,
66 NBD_CLIENT_CONNECTING_NOWAIT
,
71 typedef struct BDRVNBDState
{
72 QIOChannel
*ioc
; /* The current I/O channel */
78 CoMutex receive_mutex
;
82 QEMUTimer
*reconnect_delay_timer
;
83 QEMUTimer
*open_timer
;
85 NBDClientRequest requests
[MAX_NBD_REQUESTS
];
89 /* Connection parameters */
90 uint32_t reconnect_delay
;
91 uint32_t open_timeout
;
93 char *export
, *tlscredsid
;
94 QCryptoTLSCreds
*tlscreds
;
99 NBDClientConnection
*conn
;
102 static void nbd_yank(void *opaque
);
104 static void nbd_clear_bdrvstate(BlockDriverState
*bs
)
106 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
108 nbd_client_connection_release(s
->conn
);
111 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs
->node_name
));
113 /* Must not leave timers behind that would access freed data */
114 assert(!s
->reconnect_delay_timer
);
115 assert(!s
->open_timer
);
117 object_unref(OBJECT(s
->tlscreds
));
118 qapi_free_SocketAddress(s
->saddr
);
122 g_free(s
->tlscredsid
);
123 s
->tlscredsid
= NULL
;
124 g_free(s
->x_dirty_bitmap
);
125 s
->x_dirty_bitmap
= NULL
;
128 static bool nbd_client_connected(BDRVNBDState
*s
)
130 return qatomic_load_acquire(&s
->state
) == NBD_CLIENT_CONNECTED
;
133 static bool nbd_recv_coroutine_wake_one(NBDClientRequest
*req
)
135 if (req
->receiving
) {
136 req
->receiving
= false;
137 aio_co_wake(req
->coroutine
);
144 static void nbd_recv_coroutines_wake(BDRVNBDState
*s
, bool all
)
148 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
149 if (nbd_recv_coroutine_wake_one(&s
->requests
[i
]) && !all
) {
155 static void nbd_channel_error(BDRVNBDState
*s
, int ret
)
157 if (nbd_client_connected(s
)) {
158 qio_channel_shutdown(s
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
162 if (nbd_client_connected(s
)) {
163 s
->state
= s
->reconnect_delay
? NBD_CLIENT_CONNECTING_WAIT
:
164 NBD_CLIENT_CONNECTING_NOWAIT
;
167 s
->state
= NBD_CLIENT_QUIT
;
170 nbd_recv_coroutines_wake(s
, true);
173 static void reconnect_delay_timer_del(BDRVNBDState
*s
)
175 if (s
->reconnect_delay_timer
) {
176 timer_free(s
->reconnect_delay_timer
);
177 s
->reconnect_delay_timer
= NULL
;
181 static void reconnect_delay_timer_cb(void *opaque
)
183 BDRVNBDState
*s
= opaque
;
185 if (qatomic_load_acquire(&s
->state
) == NBD_CLIENT_CONNECTING_WAIT
) {
186 s
->state
= NBD_CLIENT_CONNECTING_NOWAIT
;
187 nbd_co_establish_connection_cancel(s
->conn
);
188 while (qemu_co_enter_next(&s
->free_sema
, NULL
)) {
189 /* Resume all queued requests */
193 reconnect_delay_timer_del(s
);
196 static void reconnect_delay_timer_init(BDRVNBDState
*s
, uint64_t expire_time_ns
)
198 if (qatomic_load_acquire(&s
->state
) != NBD_CLIENT_CONNECTING_WAIT
) {
202 assert(!s
->reconnect_delay_timer
);
203 s
->reconnect_delay_timer
= aio_timer_new(bdrv_get_aio_context(s
->bs
),
206 reconnect_delay_timer_cb
, s
);
207 timer_mod(s
->reconnect_delay_timer
, expire_time_ns
);
210 static void nbd_teardown_connection(BlockDriverState
*bs
)
212 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
214 assert(!s
->in_flight
);
217 qio_channel_shutdown(s
->ioc
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
218 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
220 object_unref(OBJECT(s
->ioc
));
224 s
->state
= NBD_CLIENT_QUIT
;
227 static void open_timer_del(BDRVNBDState
*s
)
230 timer_free(s
->open_timer
);
231 s
->open_timer
= NULL
;
235 static void open_timer_cb(void *opaque
)
237 BDRVNBDState
*s
= opaque
;
239 nbd_co_establish_connection_cancel(s
->conn
);
243 static void open_timer_init(BDRVNBDState
*s
, uint64_t expire_time_ns
)
245 assert(!s
->open_timer
);
246 s
->open_timer
= aio_timer_new(bdrv_get_aio_context(s
->bs
),
250 timer_mod(s
->open_timer
, expire_time_ns
);
253 static bool nbd_client_connecting(BDRVNBDState
*s
)
255 NBDClientState state
= qatomic_load_acquire(&s
->state
);
256 return state
== NBD_CLIENT_CONNECTING_WAIT
||
257 state
== NBD_CLIENT_CONNECTING_NOWAIT
;
260 static bool nbd_client_connecting_wait(BDRVNBDState
*s
)
262 return qatomic_load_acquire(&s
->state
) == NBD_CLIENT_CONNECTING_WAIT
;
266 * Update @bs with information learned during a completed negotiation process.
267 * Return failure if the server's advertised options are incompatible with the
270 static int nbd_handle_updated_info(BlockDriverState
*bs
, Error
**errp
)
272 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
275 if (s
->x_dirty_bitmap
) {
276 if (!s
->info
.base_allocation
) {
277 error_setg(errp
, "requested x-dirty-bitmap %s not found",
281 if (strcmp(s
->x_dirty_bitmap
, "qemu:allocation-depth") == 0) {
282 s
->alloc_depth
= true;
286 if (s
->info
.flags
& NBD_FLAG_READ_ONLY
) {
287 ret
= bdrv_apply_auto_read_only(bs
, "NBD export is read-only", errp
);
293 if (s
->info
.flags
& NBD_FLAG_SEND_FUA
) {
294 bs
->supported_write_flags
= BDRV_REQ_FUA
;
295 bs
->supported_zero_flags
|= BDRV_REQ_FUA
;
298 if (s
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
) {
299 bs
->supported_zero_flags
|= BDRV_REQ_MAY_UNMAP
;
300 if (s
->info
.flags
& NBD_FLAG_SEND_FAST_ZERO
) {
301 bs
->supported_zero_flags
|= BDRV_REQ_NO_FALLBACK
;
305 trace_nbd_client_handshake_success(s
->export
);
310 int coroutine_fn
nbd_co_do_establish_connection(BlockDriverState
*bs
,
313 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
315 bool blocking
= nbd_client_connecting_wait(s
);
319 s
->ioc
= nbd_co_establish_connection(s
->conn
, &s
->info
, blocking
, errp
);
321 return -ECONNREFUSED
;
324 yank_register_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
), nbd_yank
,
327 ret
= nbd_handle_updated_info(s
->bs
, NULL
);
330 * We have connected, but must fail for other reasons.
331 * Send NBD_CMD_DISC as a courtesy to the server.
333 NBDRequest request
= { .type
= NBD_CMD_DISC
};
335 nbd_send_request(s
->ioc
, &request
);
337 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
339 object_unref(OBJECT(s
->ioc
));
345 qio_channel_set_blocking(s
->ioc
, false, NULL
);
346 qio_channel_attach_aio_context(s
->ioc
, bdrv_get_aio_context(bs
));
348 /* successfully connected */
349 s
->state
= NBD_CLIENT_CONNECTED
;
350 qemu_co_queue_restart_all(&s
->free_sema
);
355 /* called under s->send_mutex */
356 static coroutine_fn
void nbd_reconnect_attempt(BDRVNBDState
*s
)
358 assert(nbd_client_connecting(s
));
359 assert(s
->in_flight
== 0);
361 if (nbd_client_connecting_wait(s
) && s
->reconnect_delay
&&
362 !s
->reconnect_delay_timer
)
365 * It's first reconnect attempt after switching to
366 * NBD_CLIENT_CONNECTING_WAIT
368 reconnect_delay_timer_init(s
,
369 qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) +
370 s
->reconnect_delay
* NANOSECONDS_PER_SECOND
);
374 * Now we are sure that nobody is accessing the channel, and no one will
375 * try until we set the state to CONNECTED.
378 /* Finalize previous connection if any */
380 qio_channel_detach_aio_context(QIO_CHANNEL(s
->ioc
));
381 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s
->bs
->node_name
),
383 object_unref(OBJECT(s
->ioc
));
387 nbd_co_do_establish_connection(s
->bs
, NULL
);
390 * The reconnect attempt is done (maybe successfully, maybe not), so
391 * we no longer need this timer. Delete it so it will not outlive
392 * this I/O request (so draining removes all timers).
394 reconnect_delay_timer_del(s
);
397 static coroutine_fn
int nbd_receive_replies(BDRVNBDState
*s
, uint64_t handle
)
400 uint64_t ind
= HANDLE_TO_INDEX(s
, handle
), ind2
;
401 QEMU_LOCK_GUARD(&s
->receive_mutex
);
404 if (s
->reply
.handle
== handle
) {
409 if (!nbd_client_connected(s
)) {
413 if (s
->reply
.handle
!= 0) {
415 * Some other request is being handled now. It should already be
416 * woken by whoever set s->reply.handle (or never wait in this
417 * yield). So, we should not wake it here.
419 ind2
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
420 assert(!s
->requests
[ind2
].receiving
);
422 s
->requests
[ind
].receiving
= true;
423 qemu_co_mutex_unlock(&s
->receive_mutex
);
425 qemu_coroutine_yield();
427 * We may be woken for 3 reasons:
428 * 1. From this function, executing in parallel coroutine, when our
429 * handle is received.
430 * 2. From nbd_channel_error(), when connection is lost.
431 * 3. From nbd_co_receive_one_chunk(), when previous request is
432 * finished and s->reply.handle set to 0.
433 * Anyway, it's OK to lock the mutex and go to the next iteration.
436 qemu_co_mutex_lock(&s
->receive_mutex
);
437 assert(!s
->requests
[ind
].receiving
);
441 /* We are under mutex and handle is 0. We have to do the dirty work. */
442 assert(s
->reply
.handle
== 0);
443 ret
= nbd_receive_reply(s
->bs
, s
->ioc
, &s
->reply
, NULL
);
445 ret
= ret
? ret
: -EIO
;
446 nbd_channel_error(s
, ret
);
449 if (nbd_reply_is_structured(&s
->reply
) && !s
->info
.structured_reply
) {
450 nbd_channel_error(s
, -EINVAL
);
453 if (s
->reply
.handle
== handle
) {
457 ind2
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
458 if (ind2
>= MAX_NBD_REQUESTS
|| !s
->requests
[ind2
].reply_possible
) {
459 nbd_channel_error(s
, -EINVAL
);
462 nbd_recv_coroutine_wake_one(&s
->requests
[ind2
]);
466 static int nbd_co_send_request(BlockDriverState
*bs
,
470 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
473 qemu_co_mutex_lock(&s
->send_mutex
);
475 while (s
->in_flight
== MAX_NBD_REQUESTS
||
476 (!nbd_client_connected(s
) && s
->in_flight
> 0))
478 qemu_co_queue_wait(&s
->free_sema
, &s
->send_mutex
);
481 if (nbd_client_connecting(s
)) {
482 nbd_reconnect_attempt(s
);
485 if (!nbd_client_connected(s
)) {
492 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
493 if (s
->requests
[i
].coroutine
== NULL
) {
498 g_assert(qemu_in_coroutine());
499 assert(i
< MAX_NBD_REQUESTS
);
501 s
->requests
[i
].coroutine
= qemu_coroutine_self();
502 s
->requests
[i
].offset
= request
->from
;
503 s
->requests
[i
].receiving
= false;
504 s
->requests
[i
].reply_possible
= true;
506 request
->handle
= INDEX_TO_HANDLE(s
, i
);
511 qio_channel_set_cork(s
->ioc
, true);
512 rc
= nbd_send_request(s
->ioc
, request
);
513 if (nbd_client_connected(s
) && rc
>= 0) {
514 if (qio_channel_writev_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
518 } else if (rc
>= 0) {
521 qio_channel_set_cork(s
->ioc
, false);
523 rc
= nbd_send_request(s
->ioc
, request
);
528 nbd_channel_error(s
, rc
);
530 s
->requests
[i
].coroutine
= NULL
;
532 qemu_co_queue_next(&s
->free_sema
);
535 qemu_co_mutex_unlock(&s
->send_mutex
);
539 static inline uint16_t payload_advance16(uint8_t **payload
)
542 return lduw_be_p(*payload
- 2);
545 static inline uint32_t payload_advance32(uint8_t **payload
)
548 return ldl_be_p(*payload
- 4);
551 static inline uint64_t payload_advance64(uint8_t **payload
)
554 return ldq_be_p(*payload
- 8);
557 static int nbd_parse_offset_hole_payload(BDRVNBDState
*s
,
558 NBDStructuredReplyChunk
*chunk
,
559 uint8_t *payload
, uint64_t orig_offset
,
560 QEMUIOVector
*qiov
, Error
**errp
)
565 if (chunk
->length
!= sizeof(offset
) + sizeof(hole_size
)) {
566 error_setg(errp
, "Protocol error: invalid payload for "
567 "NBD_REPLY_TYPE_OFFSET_HOLE");
571 offset
= payload_advance64(&payload
);
572 hole_size
= payload_advance32(&payload
);
574 if (!hole_size
|| offset
< orig_offset
|| hole_size
> qiov
->size
||
575 offset
> orig_offset
+ qiov
->size
- hole_size
) {
576 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
580 if (s
->info
.min_block
&&
581 !QEMU_IS_ALIGNED(hole_size
, s
->info
.min_block
)) {
582 trace_nbd_structured_read_compliance("hole");
585 qemu_iovec_memset(qiov
, offset
- orig_offset
, 0, hole_size
);
591 * nbd_parse_blockstatus_payload
592 * Based on our request, we expect only one extent in reply, for the
593 * base:allocation context.
595 static int nbd_parse_blockstatus_payload(BDRVNBDState
*s
,
596 NBDStructuredReplyChunk
*chunk
,
597 uint8_t *payload
, uint64_t orig_length
,
598 NBDExtent
*extent
, Error
**errp
)
602 /* The server succeeded, so it must have sent [at least] one extent */
603 if (chunk
->length
< sizeof(context_id
) + sizeof(*extent
)) {
604 error_setg(errp
, "Protocol error: invalid payload for "
605 "NBD_REPLY_TYPE_BLOCK_STATUS");
609 context_id
= payload_advance32(&payload
);
610 if (s
->info
.context_id
!= context_id
) {
611 error_setg(errp
, "Protocol error: unexpected context id %d for "
612 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
613 "id is %d", context_id
,
618 extent
->length
= payload_advance32(&payload
);
619 extent
->flags
= payload_advance32(&payload
);
621 if (extent
->length
== 0) {
622 error_setg(errp
, "Protocol error: server sent status chunk with "
628 * A server sending unaligned block status is in violation of the
629 * protocol, but as qemu-nbd 3.1 is such a server (at least for
630 * POSIX files that are not a multiple of 512 bytes, since qemu
631 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
632 * still sees an implicit hole beyond the real EOF), it's nicer to
633 * work around the misbehaving server. If the request included
634 * more than the final unaligned block, truncate it back to an
635 * aligned result; if the request was only the final block, round
636 * up to the full block and change the status to fully-allocated
637 * (always a safe status, even if it loses information).
639 if (s
->info
.min_block
&& !QEMU_IS_ALIGNED(extent
->length
,
640 s
->info
.min_block
)) {
641 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
642 if (extent
->length
> s
->info
.min_block
) {
643 extent
->length
= QEMU_ALIGN_DOWN(extent
->length
,
646 extent
->length
= s
->info
.min_block
;
652 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
653 * sent us any more than one extent, nor should it have included
654 * status beyond our request in that extent. However, it's easy
655 * enough to ignore the server's noncompliance without killing the
656 * connection; just ignore trailing extents, and clamp things to
657 * the length of our request.
659 if (chunk
->length
> sizeof(context_id
) + sizeof(*extent
)) {
660 trace_nbd_parse_blockstatus_compliance("more than one extent");
662 if (extent
->length
> orig_length
) {
663 extent
->length
= orig_length
;
664 trace_nbd_parse_blockstatus_compliance("extent length too large");
668 * HACK: if we are using x-dirty-bitmaps to access
669 * qemu:allocation-depth, treat all depths > 2 the same as 2,
670 * since nbd_client_co_block_status is only expecting the low two
673 if (s
->alloc_depth
&& extent
->flags
> 2) {
681 * nbd_parse_error_payload
682 * on success @errp contains message describing nbd error reply
684 static int nbd_parse_error_payload(NBDStructuredReplyChunk
*chunk
,
685 uint8_t *payload
, int *request_ret
,
689 uint16_t message_size
;
691 assert(chunk
->type
& (1 << 15));
693 if (chunk
->length
< sizeof(error
) + sizeof(message_size
)) {
695 "Protocol error: invalid payload for structured error");
699 error
= nbd_errno_to_system_errno(payload_advance32(&payload
));
701 error_setg(errp
, "Protocol error: server sent structured error chunk "
706 *request_ret
= -error
;
707 message_size
= payload_advance16(&payload
);
709 if (message_size
> chunk
->length
- sizeof(error
) - sizeof(message_size
)) {
710 error_setg(errp
, "Protocol error: server sent structured error chunk "
711 "with incorrect message size");
715 /* TODO: Add a trace point to mention the server complaint */
717 /* TODO handle ERROR_OFFSET */
722 static int nbd_co_receive_offset_data_payload(BDRVNBDState
*s
,
723 uint64_t orig_offset
,
724 QEMUIOVector
*qiov
, Error
**errp
)
726 QEMUIOVector sub_qiov
;
730 NBDStructuredReplyChunk
*chunk
= &s
->reply
.structured
;
732 assert(nbd_reply_is_structured(&s
->reply
));
734 /* The NBD spec requires at least one byte of payload */
735 if (chunk
->length
<= sizeof(offset
)) {
736 error_setg(errp
, "Protocol error: invalid payload for "
737 "NBD_REPLY_TYPE_OFFSET_DATA");
741 if (nbd_read64(s
->ioc
, &offset
, "OFFSET_DATA offset", errp
) < 0) {
745 data_size
= chunk
->length
- sizeof(offset
);
747 if (offset
< orig_offset
|| data_size
> qiov
->size
||
748 offset
> orig_offset
+ qiov
->size
- data_size
) {
749 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
753 if (s
->info
.min_block
&& !QEMU_IS_ALIGNED(data_size
, s
->info
.min_block
)) {
754 trace_nbd_structured_read_compliance("data");
757 qemu_iovec_init(&sub_qiov
, qiov
->niov
);
758 qemu_iovec_concat(&sub_qiov
, qiov
, offset
- orig_offset
, data_size
);
759 ret
= qio_channel_readv_all(s
->ioc
, sub_qiov
.iov
, sub_qiov
.niov
, errp
);
760 qemu_iovec_destroy(&sub_qiov
);
762 return ret
< 0 ? -EIO
: 0;
765 #define NBD_MAX_MALLOC_PAYLOAD 1000
766 static coroutine_fn
int nbd_co_receive_structured_payload(
767 BDRVNBDState
*s
, void **payload
, Error
**errp
)
772 assert(nbd_reply_is_structured(&s
->reply
));
774 len
= s
->reply
.structured
.length
;
780 if (payload
== NULL
) {
781 error_setg(errp
, "Unexpected structured payload");
785 if (len
> NBD_MAX_MALLOC_PAYLOAD
) {
786 error_setg(errp
, "Payload too large");
790 *payload
= g_new(char, len
);
791 ret
= nbd_read(s
->ioc
, *payload
, len
, "structured payload", errp
);
802 * nbd_co_do_receive_one_chunk
804 * set request_ret to received reply error
805 * if qiov is not NULL: read payload to @qiov
806 * for structured reply chunk:
807 * if error chunk: read payload, set @request_ret, do not set @payload
808 * else if offset_data chunk: read payload data to @qiov, do not set @payload
809 * else: read payload to @payload
811 * If function fails, @errp contains corresponding error message, and the
812 * connection with the server is suspect. If it returns 0, then the
813 * transaction succeeded (although @request_ret may be a negative errno
814 * corresponding to the server's error reply), and errp is unchanged.
816 static coroutine_fn
int nbd_co_do_receive_one_chunk(
817 BDRVNBDState
*s
, uint64_t handle
, bool only_structured
,
818 int *request_ret
, QEMUIOVector
*qiov
, void **payload
, Error
**errp
)
821 int i
= HANDLE_TO_INDEX(s
, handle
);
822 void *local_payload
= NULL
;
823 NBDStructuredReplyChunk
*chunk
;
830 nbd_receive_replies(s
, handle
);
831 if (!nbd_client_connected(s
)) {
832 error_setg(errp
, "Connection closed");
837 assert(s
->reply
.handle
== handle
);
839 if (nbd_reply_is_simple(&s
->reply
)) {
840 if (only_structured
) {
841 error_setg(errp
, "Protocol error: simple reply when structured "
842 "reply chunk was expected");
846 *request_ret
= -nbd_errno_to_system_errno(s
->reply
.simple
.error
);
847 if (*request_ret
< 0 || !qiov
) {
851 return qio_channel_readv_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
852 errp
) < 0 ? -EIO
: 0;
855 /* handle structured reply chunk */
856 assert(s
->info
.structured_reply
);
857 chunk
= &s
->reply
.structured
;
859 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
860 if (!(chunk
->flags
& NBD_REPLY_FLAG_DONE
)) {
861 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
862 " NBD_REPLY_FLAG_DONE flag set");
866 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
873 if (chunk
->type
== NBD_REPLY_TYPE_OFFSET_DATA
) {
875 error_setg(errp
, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
879 return nbd_co_receive_offset_data_payload(s
, s
->requests
[i
].offset
,
883 if (nbd_reply_type_is_error(chunk
->type
)) {
884 payload
= &local_payload
;
887 ret
= nbd_co_receive_structured_payload(s
, payload
, errp
);
892 if (nbd_reply_type_is_error(chunk
->type
)) {
893 ret
= nbd_parse_error_payload(chunk
, local_payload
, request_ret
, errp
);
894 g_free(local_payload
);
902 * nbd_co_receive_one_chunk
903 * Read reply, wake up connection_co and set s->quit if needed.
904 * Return value is a fatal error code or normal nbd reply error code
906 static coroutine_fn
int nbd_co_receive_one_chunk(
907 BDRVNBDState
*s
, uint64_t handle
, bool only_structured
,
908 int *request_ret
, QEMUIOVector
*qiov
, NBDReply
*reply
, void **payload
,
911 int ret
= nbd_co_do_receive_one_chunk(s
, handle
, only_structured
,
912 request_ret
, qiov
, payload
, errp
);
915 memset(reply
, 0, sizeof(*reply
));
916 nbd_channel_error(s
, ret
);
918 /* For assert at loop start in nbd_connection_entry */
923 nbd_recv_coroutines_wake(s
, false);
928 typedef struct NBDReplyChunkIter
{
932 bool done
, only_structured
;
935 static void nbd_iter_channel_error(NBDReplyChunkIter
*iter
,
936 int ret
, Error
**local_err
)
938 assert(local_err
&& *local_err
);
943 error_propagate(&iter
->err
, *local_err
);
945 error_free(*local_err
);
951 static void nbd_iter_request_error(NBDReplyChunkIter
*iter
, int ret
)
955 if (!iter
->request_ret
) {
956 iter
->request_ret
= ret
;
961 * NBD_FOREACH_REPLY_CHUNK
962 * The pointer stored in @payload requires g_free() to free it.
964 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
965 qiov, reply, payload) \
966 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
967 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
970 * nbd_reply_chunk_iter_receive
971 * The pointer stored in @payload requires g_free() to free it.
973 static bool nbd_reply_chunk_iter_receive(BDRVNBDState
*s
,
974 NBDReplyChunkIter
*iter
,
976 QEMUIOVector
*qiov
, NBDReply
*reply
,
979 int ret
, request_ret
;
980 NBDReply local_reply
;
981 NBDStructuredReplyChunk
*chunk
;
982 Error
*local_err
= NULL
;
983 if (!nbd_client_connected(s
)) {
984 error_setg(&local_err
, "Connection closed");
985 nbd_iter_channel_error(iter
, -EIO
, &local_err
);
990 /* Previous iteration was last. */
995 reply
= &local_reply
;
998 ret
= nbd_co_receive_one_chunk(s
, handle
, iter
->only_structured
,
999 &request_ret
, qiov
, reply
, payload
,
1002 nbd_iter_channel_error(iter
, ret
, &local_err
);
1003 } else if (request_ret
< 0) {
1004 nbd_iter_request_error(iter
, request_ret
);
1007 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
1008 if (nbd_reply_is_simple(reply
) || !nbd_client_connected(s
)) {
1012 chunk
= &reply
->structured
;
1013 iter
->only_structured
= true;
1015 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
1016 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
1017 assert(chunk
->flags
& NBD_REPLY_FLAG_DONE
);
1021 if (chunk
->flags
& NBD_REPLY_FLAG_DONE
) {
1022 /* This iteration is last. */
1026 /* Execute the loop body */
1030 s
->requests
[HANDLE_TO_INDEX(s
, handle
)].coroutine
= NULL
;
1032 qemu_co_mutex_lock(&s
->send_mutex
);
1034 qemu_co_queue_next(&s
->free_sema
);
1035 qemu_co_mutex_unlock(&s
->send_mutex
);
1040 static int nbd_co_receive_return_code(BDRVNBDState
*s
, uint64_t handle
,
1041 int *request_ret
, Error
**errp
)
1043 NBDReplyChunkIter iter
;
1045 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, false, NULL
, NULL
, NULL
) {
1046 /* nbd_reply_chunk_iter_receive does all the work */
1049 error_propagate(errp
, iter
.err
);
1050 *request_ret
= iter
.request_ret
;
1054 static int nbd_co_receive_cmdread_reply(BDRVNBDState
*s
, uint64_t handle
,
1055 uint64_t offset
, QEMUIOVector
*qiov
,
1056 int *request_ret
, Error
**errp
)
1058 NBDReplyChunkIter iter
;
1060 void *payload
= NULL
;
1061 Error
*local_err
= NULL
;
1063 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, s
->info
.structured_reply
,
1064 qiov
, &reply
, &payload
)
1067 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
1069 assert(nbd_reply_is_structured(&reply
));
1071 switch (chunk
->type
) {
1072 case NBD_REPLY_TYPE_OFFSET_DATA
:
1074 * special cased in nbd_co_receive_one_chunk, data is already
1078 case NBD_REPLY_TYPE_OFFSET_HOLE
:
1079 ret
= nbd_parse_offset_hole_payload(s
, &reply
.structured
, payload
,
1080 offset
, qiov
, &local_err
);
1082 nbd_channel_error(s
, ret
);
1083 nbd_iter_channel_error(&iter
, ret
, &local_err
);
1087 if (!nbd_reply_type_is_error(chunk
->type
)) {
1088 /* not allowed reply type */
1089 nbd_channel_error(s
, -EINVAL
);
1090 error_setg(&local_err
,
1091 "Unexpected reply type: %d (%s) for CMD_READ",
1092 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
1093 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1101 error_propagate(errp
, iter
.err
);
1102 *request_ret
= iter
.request_ret
;
1106 static int nbd_co_receive_blockstatus_reply(BDRVNBDState
*s
,
1107 uint64_t handle
, uint64_t length
,
1109 int *request_ret
, Error
**errp
)
1111 NBDReplyChunkIter iter
;
1113 void *payload
= NULL
;
1114 Error
*local_err
= NULL
;
1115 bool received
= false;
1117 assert(!extent
->length
);
1118 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, false, NULL
, &reply
, &payload
) {
1120 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
1122 assert(nbd_reply_is_structured(&reply
));
1124 switch (chunk
->type
) {
1125 case NBD_REPLY_TYPE_BLOCK_STATUS
:
1127 nbd_channel_error(s
, -EINVAL
);
1128 error_setg(&local_err
, "Several BLOCK_STATUS chunks in reply");
1129 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1133 ret
= nbd_parse_blockstatus_payload(s
, &reply
.structured
,
1134 payload
, length
, extent
,
1137 nbd_channel_error(s
, ret
);
1138 nbd_iter_channel_error(&iter
, ret
, &local_err
);
1142 if (!nbd_reply_type_is_error(chunk
->type
)) {
1143 nbd_channel_error(s
, -EINVAL
);
1144 error_setg(&local_err
,
1145 "Unexpected reply type: %d (%s) "
1146 "for CMD_BLOCK_STATUS",
1147 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
1148 nbd_iter_channel_error(&iter
, -EINVAL
, &local_err
);
1156 if (!extent
->length
&& !iter
.request_ret
) {
1157 error_setg(&local_err
, "Server did not reply with any status extents");
1158 nbd_iter_channel_error(&iter
, -EIO
, &local_err
);
1161 error_propagate(errp
, iter
.err
);
1162 *request_ret
= iter
.request_ret
;
1166 static int nbd_co_request(BlockDriverState
*bs
, NBDRequest
*request
,
1167 QEMUIOVector
*write_qiov
)
1169 int ret
, request_ret
;
1170 Error
*local_err
= NULL
;
1171 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1173 assert(request
->type
!= NBD_CMD_READ
);
1175 assert(request
->type
== NBD_CMD_WRITE
);
1176 assert(request
->len
== iov_size(write_qiov
->iov
, write_qiov
->niov
));
1178 assert(request
->type
!= NBD_CMD_WRITE
);
1182 ret
= nbd_co_send_request(bs
, request
, write_qiov
);
1187 ret
= nbd_co_receive_return_code(s
, request
->handle
,
1188 &request_ret
, &local_err
);
1190 trace_nbd_co_request_fail(request
->from
, request
->len
,
1191 request
->handle
, request
->flags
,
1193 nbd_cmd_lookup(request
->type
),
1194 ret
, error_get_pretty(local_err
));
1195 error_free(local_err
);
1198 } while (ret
< 0 && nbd_client_connecting_wait(s
));
1200 return ret
? ret
: request_ret
;
1203 static int nbd_client_co_preadv(BlockDriverState
*bs
, int64_t offset
,
1204 int64_t bytes
, QEMUIOVector
*qiov
,
1205 BdrvRequestFlags flags
)
1207 int ret
, request_ret
;
1208 Error
*local_err
= NULL
;
1209 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1210 NBDRequest request
= {
1211 .type
= NBD_CMD_READ
,
1216 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
1223 * Work around the fact that the block layer doesn't do
1224 * byte-accurate sizing yet - if the read exceeds the server's
1225 * advertised size because the block layer rounded size up, then
1226 * truncate the request to the server and tail-pad with zero.
1228 if (offset
>= s
->info
.size
) {
1229 assert(bytes
< BDRV_SECTOR_SIZE
);
1230 qemu_iovec_memset(qiov
, 0, 0, bytes
);
1233 if (offset
+ bytes
> s
->info
.size
) {
1234 uint64_t slop
= offset
+ bytes
- s
->info
.size
;
1236 assert(slop
< BDRV_SECTOR_SIZE
);
1237 qemu_iovec_memset(qiov
, bytes
- slop
, 0, slop
);
1238 request
.len
-= slop
;
1242 ret
= nbd_co_send_request(bs
, &request
, NULL
);
1247 ret
= nbd_co_receive_cmdread_reply(s
, request
.handle
, offset
, qiov
,
1248 &request_ret
, &local_err
);
1250 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.handle
,
1251 request
.flags
, request
.type
,
1252 nbd_cmd_lookup(request
.type
),
1253 ret
, error_get_pretty(local_err
));
1254 error_free(local_err
);
1257 } while (ret
< 0 && nbd_client_connecting_wait(s
));
1259 return ret
? ret
: request_ret
;
1262 static int nbd_client_co_pwritev(BlockDriverState
*bs
, int64_t offset
,
1263 int64_t bytes
, QEMUIOVector
*qiov
,
1264 BdrvRequestFlags flags
)
1266 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1267 NBDRequest request
= {
1268 .type
= NBD_CMD_WRITE
,
1273 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1274 if (flags
& BDRV_REQ_FUA
) {
1275 assert(s
->info
.flags
& NBD_FLAG_SEND_FUA
);
1276 request
.flags
|= NBD_CMD_FLAG_FUA
;
1279 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
1284 return nbd_co_request(bs
, &request
, qiov
);
1287 static int nbd_client_co_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
,
1288 int64_t bytes
, BdrvRequestFlags flags
)
1290 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1291 NBDRequest request
= {
1292 .type
= NBD_CMD_WRITE_ZEROES
,
1294 .len
= bytes
, /* .len is uint32_t actually */
1297 assert(bytes
<= UINT32_MAX
); /* rely on max_pwrite_zeroes */
1299 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1300 if (!(s
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
)) {
1304 if (flags
& BDRV_REQ_FUA
) {
1305 assert(s
->info
.flags
& NBD_FLAG_SEND_FUA
);
1306 request
.flags
|= NBD_CMD_FLAG_FUA
;
1308 if (!(flags
& BDRV_REQ_MAY_UNMAP
)) {
1309 request
.flags
|= NBD_CMD_FLAG_NO_HOLE
;
1311 if (flags
& BDRV_REQ_NO_FALLBACK
) {
1312 assert(s
->info
.flags
& NBD_FLAG_SEND_FAST_ZERO
);
1313 request
.flags
|= NBD_CMD_FLAG_FAST_ZERO
;
1319 return nbd_co_request(bs
, &request
, NULL
);
1322 static int nbd_client_co_flush(BlockDriverState
*bs
)
1324 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1325 NBDRequest request
= { .type
= NBD_CMD_FLUSH
};
1327 if (!(s
->info
.flags
& NBD_FLAG_SEND_FLUSH
)) {
1334 return nbd_co_request(bs
, &request
, NULL
);
1337 static int nbd_client_co_pdiscard(BlockDriverState
*bs
, int64_t offset
,
1340 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1341 NBDRequest request
= {
1342 .type
= NBD_CMD_TRIM
,
1344 .len
= bytes
, /* len is uint32_t */
1347 assert(bytes
<= UINT32_MAX
); /* rely on max_pdiscard */
1349 assert(!(s
->info
.flags
& NBD_FLAG_READ_ONLY
));
1350 if (!(s
->info
.flags
& NBD_FLAG_SEND_TRIM
) || !bytes
) {
1354 return nbd_co_request(bs
, &request
, NULL
);
1357 static int coroutine_fn
nbd_client_co_block_status(
1358 BlockDriverState
*bs
, bool want_zero
, int64_t offset
, int64_t bytes
,
1359 int64_t *pnum
, int64_t *map
, BlockDriverState
**file
)
1361 int ret
, request_ret
;
1362 NBDExtent extent
= { 0 };
1363 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1364 Error
*local_err
= NULL
;
1366 NBDRequest request
= {
1367 .type
= NBD_CMD_BLOCK_STATUS
,
1369 .len
= MIN(QEMU_ALIGN_DOWN(INT_MAX
, bs
->bl
.request_alignment
),
1370 MIN(bytes
, s
->info
.size
- offset
)),
1371 .flags
= NBD_CMD_FLAG_REQ_ONE
,
1374 if (!s
->info
.base_allocation
) {
1378 return BDRV_BLOCK_DATA
| BDRV_BLOCK_OFFSET_VALID
;
1382 * Work around the fact that the block layer doesn't do
1383 * byte-accurate sizing yet - if the status request exceeds the
1384 * server's advertised size because the block layer rounded size
1385 * up, we truncated the request to the server (above), or are
1386 * called on just the hole.
1388 if (offset
>= s
->info
.size
) {
1390 assert(bytes
< BDRV_SECTOR_SIZE
);
1391 /* Intentionally don't report offset_valid for the hole */
1392 return BDRV_BLOCK_ZERO
;
1395 if (s
->info
.min_block
) {
1396 assert(QEMU_IS_ALIGNED(request
.len
, s
->info
.min_block
));
1399 ret
= nbd_co_send_request(bs
, &request
, NULL
);
1404 ret
= nbd_co_receive_blockstatus_reply(s
, request
.handle
, bytes
,
1405 &extent
, &request_ret
,
1408 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.handle
,
1409 request
.flags
, request
.type
,
1410 nbd_cmd_lookup(request
.type
),
1411 ret
, error_get_pretty(local_err
));
1412 error_free(local_err
);
1415 } while (ret
< 0 && nbd_client_connecting_wait(s
));
1417 if (ret
< 0 || request_ret
< 0) {
1418 return ret
? ret
: request_ret
;
1421 assert(extent
.length
);
1422 *pnum
= extent
.length
;
1425 return (extent
.flags
& NBD_STATE_HOLE
? 0 : BDRV_BLOCK_DATA
) |
1426 (extent
.flags
& NBD_STATE_ZERO
? BDRV_BLOCK_ZERO
: 0) |
1427 BDRV_BLOCK_OFFSET_VALID
;
1430 static int nbd_client_reopen_prepare(BDRVReopenState
*state
,
1431 BlockReopenQueue
*queue
, Error
**errp
)
1433 BDRVNBDState
*s
= (BDRVNBDState
*)state
->bs
->opaque
;
1435 if ((state
->flags
& BDRV_O_RDWR
) && (s
->info
.flags
& NBD_FLAG_READ_ONLY
)) {
1436 error_setg(errp
, "Can't reopen read-only NBD mount as read/write");
1442 static void nbd_yank(void *opaque
)
1444 BlockDriverState
*bs
= opaque
;
1445 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1447 qatomic_store_release(&s
->state
, NBD_CLIENT_QUIT
);
1448 qio_channel_shutdown(QIO_CHANNEL(s
->ioc
), QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1451 static void nbd_client_close(BlockDriverState
*bs
)
1453 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1454 NBDRequest request
= { .type
= NBD_CMD_DISC
};
1457 nbd_send_request(s
->ioc
, &request
);
1460 nbd_teardown_connection(bs
);
1465 * Parse nbd_open options
1468 static int nbd_parse_uri(const char *filename
, QDict
*options
)
1472 QueryParams
*qp
= NULL
;
1476 uri
= uri_parse(filename
);
1482 if (!g_strcmp0(uri
->scheme
, "nbd")) {
1484 } else if (!g_strcmp0(uri
->scheme
, "nbd+tcp")) {
1486 } else if (!g_strcmp0(uri
->scheme
, "nbd+unix")) {
1493 p
= uri
->path
? uri
->path
: "";
1498 qdict_put_str(options
, "export", p
);
1501 qp
= query_params_parse(uri
->query
);
1502 if (qp
->n
> 1 || (is_unix
&& !qp
->n
) || (!is_unix
&& qp
->n
)) {
1508 /* nbd+unix:///export?socket=path */
1509 if (uri
->server
|| uri
->port
|| strcmp(qp
->p
[0].name
, "socket")) {
1513 qdict_put_str(options
, "server.type", "unix");
1514 qdict_put_str(options
, "server.path", qp
->p
[0].value
);
1519 /* nbd[+tcp]://host[:port]/export */
1525 /* strip braces from literal IPv6 address */
1526 if (uri
->server
[0] == '[') {
1527 host
= qstring_from_substr(uri
->server
, 1,
1528 strlen(uri
->server
) - 1);
1530 host
= qstring_from_str(uri
->server
);
1533 qdict_put_str(options
, "server.type", "inet");
1534 qdict_put(options
, "server.host", host
);
1536 port_str
= g_strdup_printf("%d", uri
->port
?: NBD_DEFAULT_PORT
);
1537 qdict_put_str(options
, "server.port", port_str
);
1543 query_params_free(qp
);
1549 static bool nbd_has_filename_options_conflict(QDict
*options
, Error
**errp
)
1551 const QDictEntry
*e
;
1553 for (e
= qdict_first(options
); e
; e
= qdict_next(options
, e
)) {
1554 if (!strcmp(e
->key
, "host") ||
1555 !strcmp(e
->key
, "port") ||
1556 !strcmp(e
->key
, "path") ||
1557 !strcmp(e
->key
, "export") ||
1558 strstart(e
->key
, "server.", NULL
))
1560 error_setg(errp
, "Option '%s' cannot be used with a file name",
1569 static void nbd_parse_filename(const char *filename
, QDict
*options
,
1572 g_autofree
char *file
= NULL
;
1574 const char *host_spec
;
1575 const char *unixpath
;
1577 if (nbd_has_filename_options_conflict(options
, errp
)) {
1581 if (strstr(filename
, "://")) {
1582 int ret
= nbd_parse_uri(filename
, options
);
1584 error_setg(errp
, "No valid URL specified");
1589 file
= g_strdup(filename
);
1591 export_name
= strstr(file
, EN_OPTSTR
);
1593 if (export_name
[strlen(EN_OPTSTR
)] == 0) {
1596 export_name
[0] = 0; /* truncate 'file' */
1597 export_name
+= strlen(EN_OPTSTR
);
1599 qdict_put_str(options
, "export", export_name
);
1602 /* extract the host_spec - fail if it's not nbd:... */
1603 if (!strstart(file
, "nbd:", &host_spec
)) {
1604 error_setg(errp
, "File name string for NBD must start with 'nbd:'");
1612 /* are we a UNIX or TCP socket? */
1613 if (strstart(host_spec
, "unix:", &unixpath
)) {
1614 qdict_put_str(options
, "server.type", "unix");
1615 qdict_put_str(options
, "server.path", unixpath
);
1617 InetSocketAddress
*addr
= g_new(InetSocketAddress
, 1);
1619 if (inet_parse(addr
, host_spec
, errp
)) {
1623 qdict_put_str(options
, "server.type", "inet");
1624 qdict_put_str(options
, "server.host", addr
->host
);
1625 qdict_put_str(options
, "server.port", addr
->port
);
1627 qapi_free_InetSocketAddress(addr
);
1631 static bool nbd_process_legacy_socket_options(QDict
*output_options
,
1632 QemuOpts
*legacy_opts
,
1635 const char *path
= qemu_opt_get(legacy_opts
, "path");
1636 const char *host
= qemu_opt_get(legacy_opts
, "host");
1637 const char *port
= qemu_opt_get(legacy_opts
, "port");
1638 const QDictEntry
*e
;
1640 if (!path
&& !host
&& !port
) {
1644 for (e
= qdict_first(output_options
); e
; e
= qdict_next(output_options
, e
))
1646 if (strstart(e
->key
, "server.", NULL
)) {
1647 error_setg(errp
, "Cannot use 'server' and path/host/port at the "
1654 error_setg(errp
, "path and host may not be used at the same time");
1658 error_setg(errp
, "port may not be used without host");
1662 qdict_put_str(output_options
, "server.type", "unix");
1663 qdict_put_str(output_options
, "server.path", path
);
1665 qdict_put_str(output_options
, "server.type", "inet");
1666 qdict_put_str(output_options
, "server.host", host
);
1667 qdict_put_str(output_options
, "server.port",
1668 port
?: stringify(NBD_DEFAULT_PORT
));
1674 static SocketAddress
*nbd_config(BDRVNBDState
*s
, QDict
*options
,
1677 SocketAddress
*saddr
= NULL
;
1681 qdict_extract_subqdict(options
, &addr
, "server.");
1682 if (!qdict_size(addr
)) {
1683 error_setg(errp
, "NBD server address missing");
1687 iv
= qobject_input_visitor_new_flat_confused(addr
, errp
);
1692 if (!visit_type_SocketAddress(iv
, NULL
, &saddr
, errp
)) {
1696 if (socket_address_parse_named_fd(saddr
, errp
) < 0) {
1697 qapi_free_SocketAddress(saddr
);
1703 qobject_unref(addr
);
1708 static QCryptoTLSCreds
*nbd_get_tls_creds(const char *id
, Error
**errp
)
1711 QCryptoTLSCreds
*creds
;
1713 obj
= object_resolve_path_component(
1714 object_get_objects_root(), id
);
1716 error_setg(errp
, "No TLS credentials with id '%s'",
1720 creds
= (QCryptoTLSCreds
*)
1721 object_dynamic_cast(obj
, TYPE_QCRYPTO_TLS_CREDS
);
1723 error_setg(errp
, "Object with id '%s' is not TLS credentials",
1728 if (!qcrypto_tls_creds_check_endpoint(creds
,
1729 QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT
,
1738 static QemuOptsList nbd_runtime_opts
= {
1740 .head
= QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts
.head
),
1744 .type
= QEMU_OPT_STRING
,
1745 .help
= "TCP host to connect to",
1749 .type
= QEMU_OPT_STRING
,
1750 .help
= "TCP port to connect to",
1754 .type
= QEMU_OPT_STRING
,
1755 .help
= "Unix socket path to connect to",
1759 .type
= QEMU_OPT_STRING
,
1760 .help
= "Name of the NBD export to open",
1763 .name
= "tls-creds",
1764 .type
= QEMU_OPT_STRING
,
1765 .help
= "ID of the TLS credentials to use",
1768 .name
= "x-dirty-bitmap",
1769 .type
= QEMU_OPT_STRING
,
1770 .help
= "experimental: expose named dirty bitmap in place of "
1774 .name
= "reconnect-delay",
1775 .type
= QEMU_OPT_NUMBER
,
1776 .help
= "On an unexpected disconnect, the nbd client tries to "
1777 "connect again until succeeding or encountering a serious "
1778 "error. During the first @reconnect-delay seconds, all "
1779 "requests are paused and will be rerun on a successful "
1780 "reconnect. After that time, any delayed requests and all "
1781 "future requests before a successful reconnect will "
1782 "immediately fail. Default 0",
1785 .name
= "open-timeout",
1786 .type
= QEMU_OPT_NUMBER
,
1787 .help
= "In seconds. If zero, the nbd driver tries the connection "
1788 "only once, and fails to open if the connection fails. "
1789 "If non-zero, the nbd driver will repeat connection "
1790 "attempts until successful or until @open-timeout seconds "
1791 "have elapsed. Default 0",
1793 { /* end of list */ }
1797 static int nbd_process_options(BlockDriverState
*bs
, QDict
*options
,
1800 BDRVNBDState
*s
= bs
->opaque
;
1804 opts
= qemu_opts_create(&nbd_runtime_opts
, NULL
, 0, &error_abort
);
1805 if (!qemu_opts_absorb_qdict(opts
, options
, errp
)) {
1809 /* Translate @host, @port, and @path to a SocketAddress */
1810 if (!nbd_process_legacy_socket_options(options
, opts
, errp
)) {
1814 /* Pop the config into our state object. Exit if invalid. */
1815 s
->saddr
= nbd_config(s
, options
, errp
);
1820 s
->export
= g_strdup(qemu_opt_get(opts
, "export"));
1821 if (s
->export
&& strlen(s
->export
) > NBD_MAX_STRING_SIZE
) {
1822 error_setg(errp
, "export name too long to send to server");
1826 s
->tlscredsid
= g_strdup(qemu_opt_get(opts
, "tls-creds"));
1827 if (s
->tlscredsid
) {
1828 s
->tlscreds
= nbd_get_tls_creds(s
->tlscredsid
, errp
);
1833 /* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */
1834 if (s
->saddr
->type
!= SOCKET_ADDRESS_TYPE_INET
) {
1835 error_setg(errp
, "TLS only supported over IP sockets");
1838 s
->hostname
= s
->saddr
->u
.inet
.host
;
1841 s
->x_dirty_bitmap
= g_strdup(qemu_opt_get(opts
, "x-dirty-bitmap"));
1842 if (s
->x_dirty_bitmap
&& strlen(s
->x_dirty_bitmap
) > NBD_MAX_STRING_SIZE
) {
1843 error_setg(errp
, "x-dirty-bitmap query too long to send to server");
1847 s
->reconnect_delay
= qemu_opt_get_number(opts
, "reconnect-delay", 0);
1848 s
->open_timeout
= qemu_opt_get_number(opts
, "open-timeout", 0);
1853 qemu_opts_del(opts
);
1857 static int nbd_open(BlockDriverState
*bs
, QDict
*options
, int flags
,
1861 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1864 qemu_co_mutex_init(&s
->send_mutex
);
1865 qemu_co_queue_init(&s
->free_sema
);
1866 qemu_co_mutex_init(&s
->receive_mutex
);
1868 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs
->node_name
), errp
)) {
1872 ret
= nbd_process_options(bs
, options
, errp
);
1877 s
->conn
= nbd_client_connection_new(s
->saddr
, true, s
->export
,
1878 s
->x_dirty_bitmap
, s
->tlscreds
);
1880 if (s
->open_timeout
) {
1881 nbd_client_connection_enable_retry(s
->conn
);
1882 open_timer_init(s
, qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) +
1883 s
->open_timeout
* NANOSECONDS_PER_SECOND
);
1886 s
->state
= NBD_CLIENT_CONNECTING_WAIT
;
1887 ret
= nbd_do_establish_connection(bs
, errp
);
1893 * The connect attempt is done, so we no longer need this timer.
1894 * Delete it, because we do not want it to be around when this node
1895 * is drained or closed.
1899 nbd_client_connection_enable_retry(s
->conn
);
1905 nbd_clear_bdrvstate(bs
);
1909 static int nbd_co_flush(BlockDriverState
*bs
)
1911 return nbd_client_co_flush(bs
);
1914 static void nbd_refresh_limits(BlockDriverState
*bs
, Error
**errp
)
1916 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
1917 uint32_t min
= s
->info
.min_block
;
1918 uint32_t max
= MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE
, s
->info
.max_block
);
1921 * If the server did not advertise an alignment:
1922 * - a size that is not sector-aligned implies that an alignment
1923 * of 1 can be used to access those tail bytes
1924 * - advertisement of block status requires an alignment of 1, so
1925 * that we don't violate block layer constraints that block
1926 * status is always aligned (as we can't control whether the
1927 * server will report sub-sector extents, such as a hole at EOF
1928 * on an unaligned POSIX file)
1929 * - otherwise, assume the server is so old that we are safer avoiding
1930 * sub-sector requests
1933 min
= (!QEMU_IS_ALIGNED(s
->info
.size
, BDRV_SECTOR_SIZE
) ||
1934 s
->info
.base_allocation
) ? 1 : BDRV_SECTOR_SIZE
;
1937 bs
->bl
.request_alignment
= min
;
1938 bs
->bl
.max_pdiscard
= QEMU_ALIGN_DOWN(INT_MAX
, min
);
1939 bs
->bl
.max_pwrite_zeroes
= max
;
1940 bs
->bl
.max_transfer
= max
;
1942 if (s
->info
.opt_block
&&
1943 s
->info
.opt_block
> bs
->bl
.opt_transfer
) {
1944 bs
->bl
.opt_transfer
= s
->info
.opt_block
;
1948 static void nbd_close(BlockDriverState
*bs
)
1950 nbd_client_close(bs
);
1951 nbd_clear_bdrvstate(bs
);
1955 * NBD cannot truncate, but if the caller asks to truncate to the same size, or
1956 * to a smaller size with exact=false, there is no reason to fail the
1959 * Preallocation mode is ignored since it does not seems useful to fail when
1960 * we never change anything.
1962 static int coroutine_fn
nbd_co_truncate(BlockDriverState
*bs
, int64_t offset
,
1963 bool exact
, PreallocMode prealloc
,
1964 BdrvRequestFlags flags
, Error
**errp
)
1966 BDRVNBDState
*s
= bs
->opaque
;
1968 if (offset
!= s
->info
.size
&& exact
) {
1969 error_setg(errp
, "Cannot resize NBD nodes");
1973 if (offset
> s
->info
.size
) {
1974 error_setg(errp
, "Cannot grow NBD nodes");
1981 static int64_t nbd_getlength(BlockDriverState
*bs
)
1983 BDRVNBDState
*s
= bs
->opaque
;
1985 return s
->info
.size
;
1988 static void nbd_refresh_filename(BlockDriverState
*bs
)
1990 BDRVNBDState
*s
= bs
->opaque
;
1991 const char *host
= NULL
, *port
= NULL
, *path
= NULL
;
1994 if (s
->saddr
->type
== SOCKET_ADDRESS_TYPE_INET
) {
1995 const InetSocketAddress
*inet
= &s
->saddr
->u
.inet
;
1996 if (!inet
->has_ipv4
&& !inet
->has_ipv6
&& !inet
->has_to
) {
2000 } else if (s
->saddr
->type
== SOCKET_ADDRESS_TYPE_UNIX
) {
2001 path
= s
->saddr
->u
.q_unix
.path
;
2002 } /* else can't represent as pseudo-filename */
2004 if (path
&& s
->export
) {
2005 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2006 "nbd+unix:///%s?socket=%s", s
->export
, path
);
2007 } else if (path
&& !s
->export
) {
2008 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2009 "nbd+unix://?socket=%s", path
);
2010 } else if (host
&& s
->export
) {
2011 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2012 "nbd://%s:%s/%s", host
, port
, s
->export
);
2013 } else if (host
&& !s
->export
) {
2014 len
= snprintf(bs
->exact_filename
, sizeof(bs
->exact_filename
),
2015 "nbd://%s:%s", host
, port
);
2017 if (len
>= sizeof(bs
->exact_filename
)) {
2018 /* Name is too long to represent exactly, so leave it empty. */
2019 bs
->exact_filename
[0] = '\0';
2023 static char *nbd_dirname(BlockDriverState
*bs
, Error
**errp
)
2025 /* The generic bdrv_dirname() implementation is able to work out some
2026 * directory name for NBD nodes, but that would be wrong. So far there is no
2027 * specification for how "export paths" would work, so NBD does not have
2028 * directory names. */
2029 error_setg(errp
, "Cannot generate a base directory for NBD nodes");
2033 static const char *const nbd_strong_runtime_opts
[] = {
2044 static void nbd_cancel_in_flight(BlockDriverState
*bs
)
2046 BDRVNBDState
*s
= (BDRVNBDState
*)bs
->opaque
;
2048 reconnect_delay_timer_del(s
);
2050 if (s
->state
== NBD_CLIENT_CONNECTING_WAIT
) {
2051 s
->state
= NBD_CLIENT_CONNECTING_NOWAIT
;
2052 qemu_co_queue_restart_all(&s
->free_sema
);
2055 nbd_co_establish_connection_cancel(s
->conn
);
2058 static void nbd_attach_aio_context(BlockDriverState
*bs
,
2059 AioContext
*new_context
)
2061 BDRVNBDState
*s
= bs
->opaque
;
2063 /* The open_timer is used only during nbd_open() */
2064 assert(!s
->open_timer
);
2067 * The reconnect_delay_timer is scheduled in I/O paths when the
2068 * connection is lost, to cancel the reconnection attempt after a
2069 * given time. Once this attempt is done (successfully or not),
2070 * nbd_reconnect_attempt() ensures the timer is deleted before the
2071 * respective I/O request is resumed.
2072 * Since the AioContext can only be changed when a node is drained,
2073 * the reconnect_delay_timer cannot be active here.
2075 assert(!s
->reconnect_delay_timer
);
2078 qio_channel_attach_aio_context(s
->ioc
, new_context
);
2082 static void nbd_detach_aio_context(BlockDriverState
*bs
)
2084 BDRVNBDState
*s
= bs
->opaque
;
2086 assert(!s
->open_timer
);
2087 assert(!s
->reconnect_delay_timer
);
2090 qio_channel_detach_aio_context(s
->ioc
);
2094 static BlockDriver bdrv_nbd
= {
2095 .format_name
= "nbd",
2096 .protocol_name
= "nbd",
2097 .instance_size
= sizeof(BDRVNBDState
),
2098 .bdrv_parse_filename
= nbd_parse_filename
,
2099 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2100 .create_opts
= &bdrv_create_opts_simple
,
2101 .bdrv_file_open
= nbd_open
,
2102 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2103 .bdrv_co_preadv
= nbd_client_co_preadv
,
2104 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2105 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2106 .bdrv_close
= nbd_close
,
2107 .bdrv_co_flush_to_os
= nbd_co_flush
,
2108 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2109 .bdrv_refresh_limits
= nbd_refresh_limits
,
2110 .bdrv_co_truncate
= nbd_co_truncate
,
2111 .bdrv_getlength
= nbd_getlength
,
2112 .bdrv_refresh_filename
= nbd_refresh_filename
,
2113 .bdrv_co_block_status
= nbd_client_co_block_status
,
2114 .bdrv_dirname
= nbd_dirname
,
2115 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2116 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2118 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2119 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2122 static BlockDriver bdrv_nbd_tcp
= {
2123 .format_name
= "nbd",
2124 .protocol_name
= "nbd+tcp",
2125 .instance_size
= sizeof(BDRVNBDState
),
2126 .bdrv_parse_filename
= nbd_parse_filename
,
2127 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2128 .create_opts
= &bdrv_create_opts_simple
,
2129 .bdrv_file_open
= nbd_open
,
2130 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2131 .bdrv_co_preadv
= nbd_client_co_preadv
,
2132 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2133 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2134 .bdrv_close
= nbd_close
,
2135 .bdrv_co_flush_to_os
= nbd_co_flush
,
2136 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2137 .bdrv_refresh_limits
= nbd_refresh_limits
,
2138 .bdrv_co_truncate
= nbd_co_truncate
,
2139 .bdrv_getlength
= nbd_getlength
,
2140 .bdrv_refresh_filename
= nbd_refresh_filename
,
2141 .bdrv_co_block_status
= nbd_client_co_block_status
,
2142 .bdrv_dirname
= nbd_dirname
,
2143 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2144 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2146 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2147 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2150 static BlockDriver bdrv_nbd_unix
= {
2151 .format_name
= "nbd",
2152 .protocol_name
= "nbd+unix",
2153 .instance_size
= sizeof(BDRVNBDState
),
2154 .bdrv_parse_filename
= nbd_parse_filename
,
2155 .bdrv_co_create_opts
= bdrv_co_create_opts_simple
,
2156 .create_opts
= &bdrv_create_opts_simple
,
2157 .bdrv_file_open
= nbd_open
,
2158 .bdrv_reopen_prepare
= nbd_client_reopen_prepare
,
2159 .bdrv_co_preadv
= nbd_client_co_preadv
,
2160 .bdrv_co_pwritev
= nbd_client_co_pwritev
,
2161 .bdrv_co_pwrite_zeroes
= nbd_client_co_pwrite_zeroes
,
2162 .bdrv_close
= nbd_close
,
2163 .bdrv_co_flush_to_os
= nbd_co_flush
,
2164 .bdrv_co_pdiscard
= nbd_client_co_pdiscard
,
2165 .bdrv_refresh_limits
= nbd_refresh_limits
,
2166 .bdrv_co_truncate
= nbd_co_truncate
,
2167 .bdrv_getlength
= nbd_getlength
,
2168 .bdrv_refresh_filename
= nbd_refresh_filename
,
2169 .bdrv_co_block_status
= nbd_client_co_block_status
,
2170 .bdrv_dirname
= nbd_dirname
,
2171 .strong_runtime_opts
= nbd_strong_runtime_opts
,
2172 .bdrv_cancel_in_flight
= nbd_cancel_in_flight
,
2174 .bdrv_attach_aio_context
= nbd_attach_aio_context
,
2175 .bdrv_detach_aio_context
= nbd_detach_aio_context
,
2178 static void bdrv_nbd_init(void)
2180 bdrv_register(&bdrv_nbd
);
2181 bdrv_register(&bdrv_nbd_tcp
);
2182 bdrv_register(&bdrv_nbd_unix
);
2185 block_init(bdrv_nbd_init
);