block/nbd: simplify waking of nbd_co_establish_connection()
[qemu/ericb.git] / block / nbd.c
blob653af629408d71ba5c2ed3904a73770d6ac63a3a
1 /*
2 * QEMU Block driver for NBD
4 * Copyright (c) 2019 Virtuozzo International GmbH.
5 * Copyright (C) 2016 Red Hat, Inc.
6 * Copyright (C) 2008 Bull S.A.S.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 * Some parts:
10 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 * THE SOFTWARE.
31 #include "qemu/osdep.h"
33 #include "trace.h"
34 #include "qemu/uri.h"
35 #include "qemu/option.h"
36 #include "qemu/cutils.h"
37 #include "qemu/main-loop.h"
38 #include "qemu/atomic.h"
40 #include "qapi/qapi-visit-sockets.h"
41 #include "qapi/qmp/qstring.h"
42 #include "qapi/clone-visitor.h"
44 #include "block/qdict.h"
45 #include "block/nbd.h"
46 #include "block/block_int.h"
48 #include "qemu/yank.h"
50 #define EN_OPTSTR ":exportname="
51 #define MAX_NBD_REQUESTS 16
53 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
54 #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
56 typedef struct {
57 Coroutine *coroutine;
58 uint64_t offset; /* original offset of the request */
59 bool receiving; /* waiting for connection_co? */
60 } NBDClientRequest;
62 typedef enum NBDClientState {
63 NBD_CLIENT_CONNECTING_WAIT,
64 NBD_CLIENT_CONNECTING_NOWAIT,
65 NBD_CLIENT_CONNECTED,
66 NBD_CLIENT_QUIT
67 } NBDClientState;
69 typedef enum NBDConnectThreadState {
70 /* No thread, no pending results */
71 CONNECT_THREAD_NONE,
73 /* Thread is running, no results for now */
74 CONNECT_THREAD_RUNNING,
77 * Thread is running, but requestor exited. Thread should close
78 * the new socket and free the connect state on exit.
80 CONNECT_THREAD_RUNNING_DETACHED,
82 /* Thread finished, results are stored in a state */
83 CONNECT_THREAD_FAIL,
84 CONNECT_THREAD_SUCCESS
85 } NBDConnectThreadState;
87 typedef struct NBDConnectThread {
88 /* Initialization constants */
89 SocketAddress *saddr; /* address to connect to */
92 * Result of last attempt. Valid in FAIL and SUCCESS states.
93 * If you want to steal error, don't forget to set pointer to NULL.
95 QIOChannelSocket *sioc;
96 Error *err;
98 QemuMutex mutex;
99 /* All further fields are protected by mutex */
100 NBDConnectThreadState state; /* current state of the thread */
103 * wait_co: if non-NULL, which coroutine to wake in
104 * nbd_co_establish_connection() after yield()
106 Coroutine *wait_co;
107 } NBDConnectThread;
109 typedef struct BDRVNBDState {
110 QIOChannelSocket *sioc; /* The master data channel */
111 QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */
112 NBDExportInfo info;
114 CoMutex send_mutex;
115 CoQueue free_sema;
116 Coroutine *connection_co;
117 Coroutine *teardown_co;
118 QemuCoSleep reconnect_sleep;
119 bool drained;
120 bool wait_drained_end;
121 int in_flight;
122 NBDClientState state;
123 bool wait_in_flight;
125 QEMUTimer *reconnect_delay_timer;
127 NBDClientRequest requests[MAX_NBD_REQUESTS];
128 NBDReply reply;
129 BlockDriverState *bs;
131 /* Connection parameters */
132 uint32_t reconnect_delay;
133 SocketAddress *saddr;
134 char *export, *tlscredsid;
135 QCryptoTLSCreds *tlscreds;
136 const char *hostname;
137 char *x_dirty_bitmap;
138 bool alloc_depth;
140 NBDConnectThread *connect_thread;
141 } BDRVNBDState;
143 static void nbd_free_connect_thread(NBDConnectThread *thr);
144 static int nbd_establish_connection(BlockDriverState *bs, SocketAddress *saddr,
145 Error **errp);
146 static int nbd_co_establish_connection(BlockDriverState *bs, Error **errp);
147 static void nbd_co_establish_connection_cancel(BlockDriverState *bs);
148 static int nbd_client_handshake(BlockDriverState *bs, Error **errp);
149 static void nbd_yank(void *opaque);
151 static void nbd_clear_bdrvstate(BlockDriverState *bs)
153 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
154 NBDConnectThread *thr = s->connect_thread;
155 bool thr_running;
157 qemu_mutex_lock(&thr->mutex);
158 thr_running = thr->state == CONNECT_THREAD_RUNNING;
159 if (thr_running) {
160 thr->state = CONNECT_THREAD_RUNNING_DETACHED;
162 qemu_mutex_unlock(&thr->mutex);
164 /* the runaway thread will clean up itself */
165 if (!thr_running) {
166 nbd_free_connect_thread(thr);
169 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name));
171 object_unref(OBJECT(s->tlscreds));
172 qapi_free_SocketAddress(s->saddr);
173 s->saddr = NULL;
174 g_free(s->export);
175 s->export = NULL;
176 g_free(s->tlscredsid);
177 s->tlscredsid = NULL;
178 g_free(s->x_dirty_bitmap);
179 s->x_dirty_bitmap = NULL;
182 static void nbd_channel_error(BDRVNBDState *s, int ret)
184 if (ret == -EIO) {
185 if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED) {
186 s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
187 NBD_CLIENT_CONNECTING_NOWAIT;
189 } else {
190 if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED) {
191 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
193 s->state = NBD_CLIENT_QUIT;
197 static void nbd_recv_coroutines_wake_all(BDRVNBDState *s)
199 int i;
201 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
202 NBDClientRequest *req = &s->requests[i];
204 if (req->coroutine && req->receiving) {
205 aio_co_wake(req->coroutine);
210 static void reconnect_delay_timer_del(BDRVNBDState *s)
212 if (s->reconnect_delay_timer) {
213 timer_free(s->reconnect_delay_timer);
214 s->reconnect_delay_timer = NULL;
218 static void reconnect_delay_timer_cb(void *opaque)
220 BDRVNBDState *s = opaque;
222 if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
223 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
224 while (qemu_co_enter_next(&s->free_sema, NULL)) {
225 /* Resume all queued requests */
229 reconnect_delay_timer_del(s);
232 static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
234 if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTING_WAIT) {
235 return;
238 assert(!s->reconnect_delay_timer);
239 s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
240 QEMU_CLOCK_REALTIME,
241 SCALE_NS,
242 reconnect_delay_timer_cb, s);
243 timer_mod(s->reconnect_delay_timer, expire_time_ns);
246 static void nbd_client_detach_aio_context(BlockDriverState *bs)
248 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
250 /* Timer is deleted in nbd_client_co_drain_begin() */
251 assert(!s->reconnect_delay_timer);
253 * If reconnect is in progress we may have no ->ioc. It will be
254 * re-instantiated in the proper aio context once the connection is
255 * reestablished.
257 if (s->ioc) {
258 qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
262 static void nbd_client_attach_aio_context_bh(void *opaque)
264 BlockDriverState *bs = opaque;
265 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
267 if (s->connection_co) {
269 * The node is still drained, so we know the coroutine has yielded in
270 * nbd_read_eof(), the only place where bs->in_flight can reach 0, or
271 * it is entered for the first time. Both places are safe for entering
272 * the coroutine.
274 qemu_aio_coroutine_enter(bs->aio_context, s->connection_co);
276 bdrv_dec_in_flight(bs);
279 static void nbd_client_attach_aio_context(BlockDriverState *bs,
280 AioContext *new_context)
282 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
285 * s->connection_co is either yielded from nbd_receive_reply or from
286 * nbd_co_reconnect_loop()
288 if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED) {
289 qio_channel_attach_aio_context(QIO_CHANNEL(s->ioc), new_context);
292 bdrv_inc_in_flight(bs);
295 * Need to wait here for the BH to run because the BH must run while the
296 * node is still drained.
298 aio_wait_bh_oneshot(new_context, nbd_client_attach_aio_context_bh, bs);
301 static void coroutine_fn nbd_client_co_drain_begin(BlockDriverState *bs)
303 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
305 s->drained = true;
306 qemu_co_sleep_wake(&s->reconnect_sleep);
308 nbd_co_establish_connection_cancel(bs);
310 reconnect_delay_timer_del(s);
312 if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
313 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
314 qemu_co_queue_restart_all(&s->free_sema);
318 static void coroutine_fn nbd_client_co_drain_end(BlockDriverState *bs)
320 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
322 s->drained = false;
323 if (s->wait_drained_end) {
324 s->wait_drained_end = false;
325 aio_co_wake(s->connection_co);
330 static void nbd_teardown_connection(BlockDriverState *bs)
332 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
334 if (s->ioc) {
335 /* finish any pending coroutines */
336 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
337 } else if (s->sioc) {
338 /* abort negotiation */
339 qio_channel_shutdown(QIO_CHANNEL(s->sioc), QIO_CHANNEL_SHUTDOWN_BOTH,
340 NULL);
343 s->state = NBD_CLIENT_QUIT;
344 if (s->connection_co) {
345 qemu_co_sleep_wake(&s->reconnect_sleep);
346 nbd_co_establish_connection_cancel(bs);
348 if (qemu_in_coroutine()) {
349 s->teardown_co = qemu_coroutine_self();
350 /* connection_co resumes us when it terminates */
351 qemu_coroutine_yield();
352 s->teardown_co = NULL;
353 } else {
354 BDRV_POLL_WHILE(bs, s->connection_co);
356 assert(!s->connection_co);
359 static bool nbd_client_connecting(BDRVNBDState *s)
361 NBDClientState state = qatomic_load_acquire(&s->state);
362 return state == NBD_CLIENT_CONNECTING_WAIT ||
363 state == NBD_CLIENT_CONNECTING_NOWAIT;
366 static bool nbd_client_connecting_wait(BDRVNBDState *s)
368 return qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT;
371 static void nbd_init_connect_thread(BDRVNBDState *s)
373 s->connect_thread = g_new(NBDConnectThread, 1);
375 *s->connect_thread = (NBDConnectThread) {
376 .saddr = QAPI_CLONE(SocketAddress, s->saddr),
377 .state = CONNECT_THREAD_NONE,
380 qemu_mutex_init(&s->connect_thread->mutex);
383 static void nbd_free_connect_thread(NBDConnectThread *thr)
385 if (thr->sioc) {
386 qio_channel_close(QIO_CHANNEL(thr->sioc), NULL);
387 object_unref(OBJECT(thr->sioc));
389 error_free(thr->err);
390 qapi_free_SocketAddress(thr->saddr);
391 g_free(thr);
394 static void *connect_thread_func(void *opaque)
396 NBDConnectThread *thr = opaque;
397 int ret;
398 bool do_free = false;
400 thr->sioc = qio_channel_socket_new();
402 error_free(thr->err);
403 thr->err = NULL;
404 ret = qio_channel_socket_connect_sync(thr->sioc, thr->saddr, &thr->err);
405 if (ret < 0) {
406 object_unref(OBJECT(thr->sioc));
407 thr->sioc = NULL;
410 qio_channel_set_delay(QIO_CHANNEL(thr->sioc), false);
412 qemu_mutex_lock(&thr->mutex);
414 switch (thr->state) {
415 case CONNECT_THREAD_RUNNING:
416 thr->state = ret < 0 ? CONNECT_THREAD_FAIL : CONNECT_THREAD_SUCCESS;
417 if (thr->wait_co) {
418 aio_co_wake(thr->wait_co);
419 thr->wait_co = NULL;
421 break;
422 case CONNECT_THREAD_RUNNING_DETACHED:
423 do_free = true;
424 break;
425 default:
426 abort();
429 qemu_mutex_unlock(&thr->mutex);
431 if (do_free) {
432 nbd_free_connect_thread(thr);
435 return NULL;
438 static int coroutine_fn
439 nbd_co_establish_connection(BlockDriverState *bs, Error **errp)
441 int ret;
442 QemuThread thread;
443 BDRVNBDState *s = bs->opaque;
444 NBDConnectThread *thr = s->connect_thread;
446 qemu_mutex_lock(&thr->mutex);
448 switch (thr->state) {
449 case CONNECT_THREAD_FAIL:
450 case CONNECT_THREAD_NONE:
451 error_free(thr->err);
452 thr->err = NULL;
453 thr->state = CONNECT_THREAD_RUNNING;
454 qemu_thread_create(&thread, "nbd-connect",
455 connect_thread_func, thr, QEMU_THREAD_DETACHED);
456 break;
457 case CONNECT_THREAD_SUCCESS:
458 /* Previous attempt finally succeeded in background */
459 thr->state = CONNECT_THREAD_NONE;
460 s->sioc = thr->sioc;
461 thr->sioc = NULL;
462 yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
463 nbd_yank, bs);
464 qemu_mutex_unlock(&thr->mutex);
465 return 0;
466 case CONNECT_THREAD_RUNNING:
467 /* Already running, will wait */
468 break;
469 default:
470 abort();
473 thr->wait_co = qemu_coroutine_self();
475 qemu_mutex_unlock(&thr->mutex);
478 * We are going to wait for connect-thread finish, but
479 * nbd_client_co_drain_begin() can interrupt.
481 qemu_coroutine_yield();
483 qemu_mutex_lock(&thr->mutex);
485 switch (thr->state) {
486 case CONNECT_THREAD_SUCCESS:
487 case CONNECT_THREAD_FAIL:
488 thr->state = CONNECT_THREAD_NONE;
489 error_propagate(errp, thr->err);
490 thr->err = NULL;
491 s->sioc = thr->sioc;
492 thr->sioc = NULL;
493 if (s->sioc) {
494 yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
495 nbd_yank, bs);
497 ret = (s->sioc ? 0 : -1);
498 break;
499 case CONNECT_THREAD_RUNNING:
500 case CONNECT_THREAD_RUNNING_DETACHED:
502 * Obviously, drained section wants to start. Report the attempt as
503 * failed. Still connect thread is executing in background, and its
504 * result may be used for next connection attempt.
506 ret = -1;
507 error_setg(errp, "Connection attempt cancelled by other operation");
508 break;
510 case CONNECT_THREAD_NONE:
512 * Impossible. We've seen this thread running. So it should be
513 * running or at least give some results.
515 abort();
517 default:
518 abort();
521 qemu_mutex_unlock(&thr->mutex);
523 return ret;
527 * nbd_co_establish_connection_cancel
528 * Cancel nbd_co_establish_connection asynchronously: it will finish soon, to
529 * allow drained section to begin.
531 static void nbd_co_establish_connection_cancel(BlockDriverState *bs)
533 BDRVNBDState *s = bs->opaque;
534 NBDConnectThread *thr = s->connect_thread;
535 Coroutine *wait_co = NULL;
537 qemu_mutex_lock(&thr->mutex);
539 if (thr->state == CONNECT_THREAD_RUNNING) {
540 /* We can cancel only in running state, when bh is not yet scheduled */
541 wait_co = g_steal_pointer(&thr->wait_co);
544 qemu_mutex_unlock(&thr->mutex);
546 if (wait_co) {
547 aio_co_wake(wait_co);
551 static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s)
553 int ret;
555 if (!nbd_client_connecting(s)) {
556 return;
559 /* Wait for completion of all in-flight requests */
561 qemu_co_mutex_lock(&s->send_mutex);
563 while (s->in_flight > 0) {
564 qemu_co_mutex_unlock(&s->send_mutex);
565 nbd_recv_coroutines_wake_all(s);
566 s->wait_in_flight = true;
567 qemu_coroutine_yield();
568 s->wait_in_flight = false;
569 qemu_co_mutex_lock(&s->send_mutex);
572 qemu_co_mutex_unlock(&s->send_mutex);
574 if (!nbd_client_connecting(s)) {
575 return;
579 * Now we are sure that nobody is accessing the channel, and no one will
580 * try until we set the state to CONNECTED.
583 /* Finalize previous connection if any */
584 if (s->ioc) {
585 qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
586 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
587 nbd_yank, s->bs);
588 object_unref(OBJECT(s->sioc));
589 s->sioc = NULL;
590 object_unref(OBJECT(s->ioc));
591 s->ioc = NULL;
594 if (nbd_co_establish_connection(s->bs, NULL) < 0) {
595 ret = -ECONNREFUSED;
596 goto out;
599 bdrv_dec_in_flight(s->bs);
601 ret = nbd_client_handshake(s->bs, NULL);
603 if (s->drained) {
604 s->wait_drained_end = true;
605 while (s->drained) {
607 * We may be entered once from nbd_client_attach_aio_context_bh
608 * and then from nbd_client_co_drain_end. So here is a loop.
610 qemu_coroutine_yield();
613 bdrv_inc_in_flight(s->bs);
615 out:
616 if (ret >= 0) {
617 /* successfully connected */
618 s->state = NBD_CLIENT_CONNECTED;
619 qemu_co_queue_restart_all(&s->free_sema);
623 static coroutine_fn void nbd_co_reconnect_loop(BDRVNBDState *s)
625 uint64_t timeout = 1 * NANOSECONDS_PER_SECOND;
626 uint64_t max_timeout = 16 * NANOSECONDS_PER_SECOND;
628 if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTING_WAIT) {
629 reconnect_delay_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
630 s->reconnect_delay * NANOSECONDS_PER_SECOND);
633 nbd_reconnect_attempt(s);
635 while (nbd_client_connecting(s)) {
636 if (s->drained) {
637 bdrv_dec_in_flight(s->bs);
638 s->wait_drained_end = true;
639 while (s->drained) {
641 * We may be entered once from nbd_client_attach_aio_context_bh
642 * and then from nbd_client_co_drain_end. So here is a loop.
644 qemu_coroutine_yield();
646 bdrv_inc_in_flight(s->bs);
647 } else {
648 qemu_co_sleep_ns_wakeable(&s->reconnect_sleep,
649 QEMU_CLOCK_REALTIME, timeout);
650 if (s->drained) {
651 continue;
653 if (timeout < max_timeout) {
654 timeout *= 2;
658 nbd_reconnect_attempt(s);
661 reconnect_delay_timer_del(s);
664 static coroutine_fn void nbd_connection_entry(void *opaque)
666 BDRVNBDState *s = opaque;
667 uint64_t i;
668 int ret = 0;
669 Error *local_err = NULL;
671 while (qatomic_load_acquire(&s->state) != NBD_CLIENT_QUIT) {
673 * The NBD client can only really be considered idle when it has
674 * yielded from qio_channel_readv_all_eof(), waiting for data. This is
675 * the point where the additional scheduled coroutine entry happens
676 * after nbd_client_attach_aio_context().
678 * Therefore we keep an additional in_flight reference all the time and
679 * only drop it temporarily here.
682 if (nbd_client_connecting(s)) {
683 nbd_co_reconnect_loop(s);
686 if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
687 continue;
690 assert(s->reply.handle == 0);
691 ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, &local_err);
693 if (local_err) {
694 trace_nbd_read_reply_entry_fail(ret, error_get_pretty(local_err));
695 error_free(local_err);
696 local_err = NULL;
698 if (ret <= 0) {
699 nbd_channel_error(s, ret ? ret : -EIO);
700 continue;
704 * There's no need for a mutex on the receive side, because the
705 * handler acts as a synchronization point and ensures that only
706 * one coroutine is called until the reply finishes.
708 i = HANDLE_TO_INDEX(s, s->reply.handle);
709 if (i >= MAX_NBD_REQUESTS ||
710 !s->requests[i].coroutine ||
711 !s->requests[i].receiving ||
712 (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
714 nbd_channel_error(s, -EINVAL);
715 continue;
719 * We're woken up again by the request itself. Note that there
720 * is no race between yielding and reentering connection_co. This
721 * is because:
723 * - if the request runs on the same AioContext, it is only
724 * entered after we yield
726 * - if the request runs on a different AioContext, reentering
727 * connection_co happens through a bottom half, which can only
728 * run after we yield.
730 aio_co_wake(s->requests[i].coroutine);
731 qemu_coroutine_yield();
734 qemu_co_queue_restart_all(&s->free_sema);
735 nbd_recv_coroutines_wake_all(s);
736 bdrv_dec_in_flight(s->bs);
738 s->connection_co = NULL;
739 if (s->ioc) {
740 qio_channel_detach_aio_context(QIO_CHANNEL(s->ioc));
741 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
742 nbd_yank, s->bs);
743 object_unref(OBJECT(s->sioc));
744 s->sioc = NULL;
745 object_unref(OBJECT(s->ioc));
746 s->ioc = NULL;
749 if (s->teardown_co) {
750 aio_co_wake(s->teardown_co);
752 aio_wait_kick();
755 static int nbd_co_send_request(BlockDriverState *bs,
756 NBDRequest *request,
757 QEMUIOVector *qiov)
759 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
760 int rc, i = -1;
762 qemu_co_mutex_lock(&s->send_mutex);
763 while (s->in_flight == MAX_NBD_REQUESTS || nbd_client_connecting_wait(s)) {
764 qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
767 if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
768 rc = -EIO;
769 goto err;
772 s->in_flight++;
774 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
775 if (s->requests[i].coroutine == NULL) {
776 break;
780 g_assert(qemu_in_coroutine());
781 assert(i < MAX_NBD_REQUESTS);
783 s->requests[i].coroutine = qemu_coroutine_self();
784 s->requests[i].offset = request->from;
785 s->requests[i].receiving = false;
787 request->handle = INDEX_TO_HANDLE(s, i);
789 assert(s->ioc);
791 if (qiov) {
792 qio_channel_set_cork(s->ioc, true);
793 rc = nbd_send_request(s->ioc, request);
794 if (qatomic_load_acquire(&s->state) == NBD_CLIENT_CONNECTED &&
795 rc >= 0) {
796 if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
797 NULL) < 0) {
798 rc = -EIO;
800 } else if (rc >= 0) {
801 rc = -EIO;
803 qio_channel_set_cork(s->ioc, false);
804 } else {
805 rc = nbd_send_request(s->ioc, request);
808 err:
809 if (rc < 0) {
810 nbd_channel_error(s, rc);
811 if (i != -1) {
812 s->requests[i].coroutine = NULL;
813 s->in_flight--;
815 if (s->in_flight == 0 && s->wait_in_flight) {
816 aio_co_wake(s->connection_co);
817 } else {
818 qemu_co_queue_next(&s->free_sema);
821 qemu_co_mutex_unlock(&s->send_mutex);
822 return rc;
825 static inline uint16_t payload_advance16(uint8_t **payload)
827 *payload += 2;
828 return lduw_be_p(*payload - 2);
831 static inline uint32_t payload_advance32(uint8_t **payload)
833 *payload += 4;
834 return ldl_be_p(*payload - 4);
837 static inline uint64_t payload_advance64(uint8_t **payload)
839 *payload += 8;
840 return ldq_be_p(*payload - 8);
843 static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
844 NBDStructuredReplyChunk *chunk,
845 uint8_t *payload, uint64_t orig_offset,
846 QEMUIOVector *qiov, Error **errp)
848 uint64_t offset;
849 uint32_t hole_size;
851 if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
852 error_setg(errp, "Protocol error: invalid payload for "
853 "NBD_REPLY_TYPE_OFFSET_HOLE");
854 return -EINVAL;
857 offset = payload_advance64(&payload);
858 hole_size = payload_advance32(&payload);
860 if (!hole_size || offset < orig_offset || hole_size > qiov->size ||
861 offset > orig_offset + qiov->size - hole_size) {
862 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
863 " region");
864 return -EINVAL;
866 if (s->info.min_block &&
867 !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) {
868 trace_nbd_structured_read_compliance("hole");
871 qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
873 return 0;
877 * nbd_parse_blockstatus_payload
878 * Based on our request, we expect only one extent in reply, for the
879 * base:allocation context.
881 static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
882 NBDStructuredReplyChunk *chunk,
883 uint8_t *payload, uint64_t orig_length,
884 NBDExtent *extent, Error **errp)
886 uint32_t context_id;
888 /* The server succeeded, so it must have sent [at least] one extent */
889 if (chunk->length < sizeof(context_id) + sizeof(*extent)) {
890 error_setg(errp, "Protocol error: invalid payload for "
891 "NBD_REPLY_TYPE_BLOCK_STATUS");
892 return -EINVAL;
895 context_id = payload_advance32(&payload);
896 if (s->info.context_id != context_id) {
897 error_setg(errp, "Protocol error: unexpected context id %d for "
898 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
899 "id is %d", context_id,
900 s->info.context_id);
901 return -EINVAL;
904 extent->length = payload_advance32(&payload);
905 extent->flags = payload_advance32(&payload);
907 if (extent->length == 0) {
908 error_setg(errp, "Protocol error: server sent status chunk with "
909 "zero length");
910 return -EINVAL;
914 * A server sending unaligned block status is in violation of the
915 * protocol, but as qemu-nbd 3.1 is such a server (at least for
916 * POSIX files that are not a multiple of 512 bytes, since qemu
917 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
918 * still sees an implicit hole beyond the real EOF), it's nicer to
919 * work around the misbehaving server. If the request included
920 * more than the final unaligned block, truncate it back to an
921 * aligned result; if the request was only the final block, round
922 * up to the full block and change the status to fully-allocated
923 * (always a safe status, even if it loses information).
925 if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length,
926 s->info.min_block)) {
927 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
928 if (extent->length > s->info.min_block) {
929 extent->length = QEMU_ALIGN_DOWN(extent->length,
930 s->info.min_block);
931 } else {
932 extent->length = s->info.min_block;
933 extent->flags = 0;
938 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
939 * sent us any more than one extent, nor should it have included
940 * status beyond our request in that extent. However, it's easy
941 * enough to ignore the server's noncompliance without killing the
942 * connection; just ignore trailing extents, and clamp things to
943 * the length of our request.
945 if (chunk->length > sizeof(context_id) + sizeof(*extent)) {
946 trace_nbd_parse_blockstatus_compliance("more than one extent");
948 if (extent->length > orig_length) {
949 extent->length = orig_length;
950 trace_nbd_parse_blockstatus_compliance("extent length too large");
954 * HACK: if we are using x-dirty-bitmaps to access
955 * qemu:allocation-depth, treat all depths > 2 the same as 2,
956 * since nbd_client_co_block_status is only expecting the low two
957 * bits to be set.
959 if (s->alloc_depth && extent->flags > 2) {
960 extent->flags = 2;
963 return 0;
967 * nbd_parse_error_payload
968 * on success @errp contains message describing nbd error reply
970 static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
971 uint8_t *payload, int *request_ret,
972 Error **errp)
974 uint32_t error;
975 uint16_t message_size;
977 assert(chunk->type & (1 << 15));
979 if (chunk->length < sizeof(error) + sizeof(message_size)) {
980 error_setg(errp,
981 "Protocol error: invalid payload for structured error");
982 return -EINVAL;
985 error = nbd_errno_to_system_errno(payload_advance32(&payload));
986 if (error == 0) {
987 error_setg(errp, "Protocol error: server sent structured error chunk "
988 "with error = 0");
989 return -EINVAL;
992 *request_ret = -error;
993 message_size = payload_advance16(&payload);
995 if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
996 error_setg(errp, "Protocol error: server sent structured error chunk "
997 "with incorrect message size");
998 return -EINVAL;
1001 /* TODO: Add a trace point to mention the server complaint */
1003 /* TODO handle ERROR_OFFSET */
1005 return 0;
1008 static int nbd_co_receive_offset_data_payload(BDRVNBDState *s,
1009 uint64_t orig_offset,
1010 QEMUIOVector *qiov, Error **errp)
1012 QEMUIOVector sub_qiov;
1013 uint64_t offset;
1014 size_t data_size;
1015 int ret;
1016 NBDStructuredReplyChunk *chunk = &s->reply.structured;
1018 assert(nbd_reply_is_structured(&s->reply));
1020 /* The NBD spec requires at least one byte of payload */
1021 if (chunk->length <= sizeof(offset)) {
1022 error_setg(errp, "Protocol error: invalid payload for "
1023 "NBD_REPLY_TYPE_OFFSET_DATA");
1024 return -EINVAL;
1027 if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) {
1028 return -EIO;
1031 data_size = chunk->length - sizeof(offset);
1032 assert(data_size);
1033 if (offset < orig_offset || data_size > qiov->size ||
1034 offset > orig_offset + qiov->size - data_size) {
1035 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
1036 " region");
1037 return -EINVAL;
1039 if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) {
1040 trace_nbd_structured_read_compliance("data");
1043 qemu_iovec_init(&sub_qiov, qiov->niov);
1044 qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
1045 ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
1046 qemu_iovec_destroy(&sub_qiov);
1048 return ret < 0 ? -EIO : 0;
1051 #define NBD_MAX_MALLOC_PAYLOAD 1000
1052 static coroutine_fn int nbd_co_receive_structured_payload(
1053 BDRVNBDState *s, void **payload, Error **errp)
1055 int ret;
1056 uint32_t len;
1058 assert(nbd_reply_is_structured(&s->reply));
1060 len = s->reply.structured.length;
1062 if (len == 0) {
1063 return 0;
1066 if (payload == NULL) {
1067 error_setg(errp, "Unexpected structured payload");
1068 return -EINVAL;
1071 if (len > NBD_MAX_MALLOC_PAYLOAD) {
1072 error_setg(errp, "Payload too large");
1073 return -EINVAL;
1076 *payload = g_new(char, len);
1077 ret = nbd_read(s->ioc, *payload, len, "structured payload", errp);
1078 if (ret < 0) {
1079 g_free(*payload);
1080 *payload = NULL;
1081 return ret;
1084 return 0;
1088 * nbd_co_do_receive_one_chunk
1089 * for simple reply:
1090 * set request_ret to received reply error
1091 * if qiov is not NULL: read payload to @qiov
1092 * for structured reply chunk:
1093 * if error chunk: read payload, set @request_ret, do not set @payload
1094 * else if offset_data chunk: read payload data to @qiov, do not set @payload
1095 * else: read payload to @payload
1097 * If function fails, @errp contains corresponding error message, and the
1098 * connection with the server is suspect. If it returns 0, then the
1099 * transaction succeeded (although @request_ret may be a negative errno
1100 * corresponding to the server's error reply), and errp is unchanged.
1102 static coroutine_fn int nbd_co_do_receive_one_chunk(
1103 BDRVNBDState *s, uint64_t handle, bool only_structured,
1104 int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
1106 int ret;
1107 int i = HANDLE_TO_INDEX(s, handle);
1108 void *local_payload = NULL;
1109 NBDStructuredReplyChunk *chunk;
1111 if (payload) {
1112 *payload = NULL;
1114 *request_ret = 0;
1116 /* Wait until we're woken up by nbd_connection_entry. */
1117 s->requests[i].receiving = true;
1118 qemu_coroutine_yield();
1119 s->requests[i].receiving = false;
1120 if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
1121 error_setg(errp, "Connection closed");
1122 return -EIO;
1124 assert(s->ioc);
1126 assert(s->reply.handle == handle);
1128 if (nbd_reply_is_simple(&s->reply)) {
1129 if (only_structured) {
1130 error_setg(errp, "Protocol error: simple reply when structured "
1131 "reply chunk was expected");
1132 return -EINVAL;
1135 *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
1136 if (*request_ret < 0 || !qiov) {
1137 return 0;
1140 return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
1141 errp) < 0 ? -EIO : 0;
1144 /* handle structured reply chunk */
1145 assert(s->info.structured_reply);
1146 chunk = &s->reply.structured;
1148 if (chunk->type == NBD_REPLY_TYPE_NONE) {
1149 if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
1150 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
1151 " NBD_REPLY_FLAG_DONE flag set");
1152 return -EINVAL;
1154 if (chunk->length) {
1155 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
1156 " nonzero length");
1157 return -EINVAL;
1159 return 0;
1162 if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
1163 if (!qiov) {
1164 error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
1165 return -EINVAL;
1168 return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
1169 qiov, errp);
1172 if (nbd_reply_type_is_error(chunk->type)) {
1173 payload = &local_payload;
1176 ret = nbd_co_receive_structured_payload(s, payload, errp);
1177 if (ret < 0) {
1178 return ret;
1181 if (nbd_reply_type_is_error(chunk->type)) {
1182 ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
1183 g_free(local_payload);
1184 return ret;
1187 return 0;
1191 * nbd_co_receive_one_chunk
1192 * Read reply, wake up connection_co and set s->quit if needed.
1193 * Return value is a fatal error code or normal nbd reply error code
1195 static coroutine_fn int nbd_co_receive_one_chunk(
1196 BDRVNBDState *s, uint64_t handle, bool only_structured,
1197 int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
1198 Error **errp)
1200 int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
1201 request_ret, qiov, payload, errp);
1203 if (ret < 0) {
1204 memset(reply, 0, sizeof(*reply));
1205 nbd_channel_error(s, ret);
1206 } else {
1207 /* For assert at loop start in nbd_connection_entry */
1208 *reply = s->reply;
1210 s->reply.handle = 0;
1212 if (s->connection_co && !s->wait_in_flight) {
1214 * We must check s->wait_in_flight, because we may entered by
1215 * nbd_recv_coroutines_wake_all(), in this case we should not
1216 * wake connection_co here, it will woken by last request.
1218 aio_co_wake(s->connection_co);
1221 return ret;
1224 typedef struct NBDReplyChunkIter {
1225 int ret;
1226 int request_ret;
1227 Error *err;
1228 bool done, only_structured;
1229 } NBDReplyChunkIter;
1231 static void nbd_iter_channel_error(NBDReplyChunkIter *iter,
1232 int ret, Error **local_err)
1234 assert(local_err && *local_err);
1235 assert(ret < 0);
1237 if (!iter->ret) {
1238 iter->ret = ret;
1239 error_propagate(&iter->err, *local_err);
1240 } else {
1241 error_free(*local_err);
1244 *local_err = NULL;
1247 static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
1249 assert(ret < 0);
1251 if (!iter->request_ret) {
1252 iter->request_ret = ret;
1257 * NBD_FOREACH_REPLY_CHUNK
1258 * The pointer stored in @payload requires g_free() to free it.
1260 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
1261 qiov, reply, payload) \
1262 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
1263 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
1266 * nbd_reply_chunk_iter_receive
1267 * The pointer stored in @payload requires g_free() to free it.
1269 static bool nbd_reply_chunk_iter_receive(BDRVNBDState *s,
1270 NBDReplyChunkIter *iter,
1271 uint64_t handle,
1272 QEMUIOVector *qiov, NBDReply *reply,
1273 void **payload)
1275 int ret, request_ret;
1276 NBDReply local_reply;
1277 NBDStructuredReplyChunk *chunk;
1278 Error *local_err = NULL;
1279 if (qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
1280 error_setg(&local_err, "Connection closed");
1281 nbd_iter_channel_error(iter, -EIO, &local_err);
1282 goto break_loop;
1285 if (iter->done) {
1286 /* Previous iteration was last. */
1287 goto break_loop;
1290 if (reply == NULL) {
1291 reply = &local_reply;
1294 ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
1295 &request_ret, qiov, reply, payload,
1296 &local_err);
1297 if (ret < 0) {
1298 nbd_iter_channel_error(iter, ret, &local_err);
1299 } else if (request_ret < 0) {
1300 nbd_iter_request_error(iter, request_ret);
1303 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
1304 if (nbd_reply_is_simple(reply) ||
1305 qatomic_load_acquire(&s->state) != NBD_CLIENT_CONNECTED) {
1306 goto break_loop;
1309 chunk = &reply->structured;
1310 iter->only_structured = true;
1312 if (chunk->type == NBD_REPLY_TYPE_NONE) {
1313 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
1314 assert(chunk->flags & NBD_REPLY_FLAG_DONE);
1315 goto break_loop;
1318 if (chunk->flags & NBD_REPLY_FLAG_DONE) {
1319 /* This iteration is last. */
1320 iter->done = true;
1323 /* Execute the loop body */
1324 return true;
1326 break_loop:
1327 s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
1329 qemu_co_mutex_lock(&s->send_mutex);
1330 s->in_flight--;
1331 if (s->in_flight == 0 && s->wait_in_flight) {
1332 aio_co_wake(s->connection_co);
1333 } else {
1334 qemu_co_queue_next(&s->free_sema);
1336 qemu_co_mutex_unlock(&s->send_mutex);
1338 return false;
1341 static int nbd_co_receive_return_code(BDRVNBDState *s, uint64_t handle,
1342 int *request_ret, Error **errp)
1344 NBDReplyChunkIter iter;
1346 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
1347 /* nbd_reply_chunk_iter_receive does all the work */
1350 error_propagate(errp, iter.err);
1351 *request_ret = iter.request_ret;
1352 return iter.ret;
1355 static int nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t handle,
1356 uint64_t offset, QEMUIOVector *qiov,
1357 int *request_ret, Error **errp)
1359 NBDReplyChunkIter iter;
1360 NBDReply reply;
1361 void *payload = NULL;
1362 Error *local_err = NULL;
1364 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
1365 qiov, &reply, &payload)
1367 int ret;
1368 NBDStructuredReplyChunk *chunk = &reply.structured;
1370 assert(nbd_reply_is_structured(&reply));
1372 switch (chunk->type) {
1373 case NBD_REPLY_TYPE_OFFSET_DATA:
1375 * special cased in nbd_co_receive_one_chunk, data is already
1376 * in qiov
1378 break;
1379 case NBD_REPLY_TYPE_OFFSET_HOLE:
1380 ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload,
1381 offset, qiov, &local_err);
1382 if (ret < 0) {
1383 nbd_channel_error(s, ret);
1384 nbd_iter_channel_error(&iter, ret, &local_err);
1386 break;
1387 default:
1388 if (!nbd_reply_type_is_error(chunk->type)) {
1389 /* not allowed reply type */
1390 nbd_channel_error(s, -EINVAL);
1391 error_setg(&local_err,
1392 "Unexpected reply type: %d (%s) for CMD_READ",
1393 chunk->type, nbd_reply_type_lookup(chunk->type));
1394 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1398 g_free(payload);
1399 payload = NULL;
1402 error_propagate(errp, iter.err);
1403 *request_ret = iter.request_ret;
1404 return iter.ret;
1407 static int nbd_co_receive_blockstatus_reply(BDRVNBDState *s,
1408 uint64_t handle, uint64_t length,
1409 NBDExtent *extent,
1410 int *request_ret, Error **errp)
1412 NBDReplyChunkIter iter;
1413 NBDReply reply;
1414 void *payload = NULL;
1415 Error *local_err = NULL;
1416 bool received = false;
1418 assert(!extent->length);
1419 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, &reply, &payload) {
1420 int ret;
1421 NBDStructuredReplyChunk *chunk = &reply.structured;
1423 assert(nbd_reply_is_structured(&reply));
1425 switch (chunk->type) {
1426 case NBD_REPLY_TYPE_BLOCK_STATUS:
1427 if (received) {
1428 nbd_channel_error(s, -EINVAL);
1429 error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
1430 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1432 received = true;
1434 ret = nbd_parse_blockstatus_payload(s, &reply.structured,
1435 payload, length, extent,
1436 &local_err);
1437 if (ret < 0) {
1438 nbd_channel_error(s, ret);
1439 nbd_iter_channel_error(&iter, ret, &local_err);
1441 break;
1442 default:
1443 if (!nbd_reply_type_is_error(chunk->type)) {
1444 nbd_channel_error(s, -EINVAL);
1445 error_setg(&local_err,
1446 "Unexpected reply type: %d (%s) "
1447 "for CMD_BLOCK_STATUS",
1448 chunk->type, nbd_reply_type_lookup(chunk->type));
1449 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1453 g_free(payload);
1454 payload = NULL;
1457 if (!extent->length && !iter.request_ret) {
1458 error_setg(&local_err, "Server did not reply with any status extents");
1459 nbd_iter_channel_error(&iter, -EIO, &local_err);
1462 error_propagate(errp, iter.err);
1463 *request_ret = iter.request_ret;
1464 return iter.ret;
1467 static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
1468 QEMUIOVector *write_qiov)
1470 int ret, request_ret;
1471 Error *local_err = NULL;
1472 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1474 assert(request->type != NBD_CMD_READ);
1475 if (write_qiov) {
1476 assert(request->type == NBD_CMD_WRITE);
1477 assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
1478 } else {
1479 assert(request->type != NBD_CMD_WRITE);
1482 do {
1483 ret = nbd_co_send_request(bs, request, write_qiov);
1484 if (ret < 0) {
1485 continue;
1488 ret = nbd_co_receive_return_code(s, request->handle,
1489 &request_ret, &local_err);
1490 if (local_err) {
1491 trace_nbd_co_request_fail(request->from, request->len,
1492 request->handle, request->flags,
1493 request->type,
1494 nbd_cmd_lookup(request->type),
1495 ret, error_get_pretty(local_err));
1496 error_free(local_err);
1497 local_err = NULL;
1499 } while (ret < 0 && nbd_client_connecting_wait(s));
1501 return ret ? ret : request_ret;
1504 static int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
1505 uint64_t bytes, QEMUIOVector *qiov, int flags)
1507 int ret, request_ret;
1508 Error *local_err = NULL;
1509 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1510 NBDRequest request = {
1511 .type = NBD_CMD_READ,
1512 .from = offset,
1513 .len = bytes,
1516 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1517 assert(!flags);
1519 if (!bytes) {
1520 return 0;
1523 * Work around the fact that the block layer doesn't do
1524 * byte-accurate sizing yet - if the read exceeds the server's
1525 * advertised size because the block layer rounded size up, then
1526 * truncate the request to the server and tail-pad with zero.
1528 if (offset >= s->info.size) {
1529 assert(bytes < BDRV_SECTOR_SIZE);
1530 qemu_iovec_memset(qiov, 0, 0, bytes);
1531 return 0;
1533 if (offset + bytes > s->info.size) {
1534 uint64_t slop = offset + bytes - s->info.size;
1536 assert(slop < BDRV_SECTOR_SIZE);
1537 qemu_iovec_memset(qiov, bytes - slop, 0, slop);
1538 request.len -= slop;
1541 do {
1542 ret = nbd_co_send_request(bs, &request, NULL);
1543 if (ret < 0) {
1544 continue;
1547 ret = nbd_co_receive_cmdread_reply(s, request.handle, offset, qiov,
1548 &request_ret, &local_err);
1549 if (local_err) {
1550 trace_nbd_co_request_fail(request.from, request.len, request.handle,
1551 request.flags, request.type,
1552 nbd_cmd_lookup(request.type),
1553 ret, error_get_pretty(local_err));
1554 error_free(local_err);
1555 local_err = NULL;
1557 } while (ret < 0 && nbd_client_connecting_wait(s));
1559 return ret ? ret : request_ret;
1562 static int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
1563 uint64_t bytes, QEMUIOVector *qiov, int flags)
1565 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1566 NBDRequest request = {
1567 .type = NBD_CMD_WRITE,
1568 .from = offset,
1569 .len = bytes,
1572 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1573 if (flags & BDRV_REQ_FUA) {
1574 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1575 request.flags |= NBD_CMD_FLAG_FUA;
1578 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1580 if (!bytes) {
1581 return 0;
1583 return nbd_co_request(bs, &request, qiov);
1586 static int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1587 int bytes, BdrvRequestFlags flags)
1589 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1590 NBDRequest request = {
1591 .type = NBD_CMD_WRITE_ZEROES,
1592 .from = offset,
1593 .len = bytes,
1596 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1597 if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
1598 return -ENOTSUP;
1601 if (flags & BDRV_REQ_FUA) {
1602 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1603 request.flags |= NBD_CMD_FLAG_FUA;
1605 if (!(flags & BDRV_REQ_MAY_UNMAP)) {
1606 request.flags |= NBD_CMD_FLAG_NO_HOLE;
1608 if (flags & BDRV_REQ_NO_FALLBACK) {
1609 assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO);
1610 request.flags |= NBD_CMD_FLAG_FAST_ZERO;
1613 if (!bytes) {
1614 return 0;
1616 return nbd_co_request(bs, &request, NULL);
1619 static int nbd_client_co_flush(BlockDriverState *bs)
1621 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1622 NBDRequest request = { .type = NBD_CMD_FLUSH };
1624 if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) {
1625 return 0;
1628 request.from = 0;
1629 request.len = 0;
1631 return nbd_co_request(bs, &request, NULL);
1634 static int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset,
1635 int bytes)
1637 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1638 NBDRequest request = {
1639 .type = NBD_CMD_TRIM,
1640 .from = offset,
1641 .len = bytes,
1644 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1645 if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
1646 return 0;
1649 return nbd_co_request(bs, &request, NULL);
1652 static int coroutine_fn nbd_client_co_block_status(
1653 BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
1654 int64_t *pnum, int64_t *map, BlockDriverState **file)
1656 int ret, request_ret;
1657 NBDExtent extent = { 0 };
1658 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1659 Error *local_err = NULL;
1661 NBDRequest request = {
1662 .type = NBD_CMD_BLOCK_STATUS,
1663 .from = offset,
1664 .len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
1665 MIN(bytes, s->info.size - offset)),
1666 .flags = NBD_CMD_FLAG_REQ_ONE,
1669 if (!s->info.base_allocation) {
1670 *pnum = bytes;
1671 *map = offset;
1672 *file = bs;
1673 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
1677 * Work around the fact that the block layer doesn't do
1678 * byte-accurate sizing yet - if the status request exceeds the
1679 * server's advertised size because the block layer rounded size
1680 * up, we truncated the request to the server (above), or are
1681 * called on just the hole.
1683 if (offset >= s->info.size) {
1684 *pnum = bytes;
1685 assert(bytes < BDRV_SECTOR_SIZE);
1686 /* Intentionally don't report offset_valid for the hole */
1687 return BDRV_BLOCK_ZERO;
1690 if (s->info.min_block) {
1691 assert(QEMU_IS_ALIGNED(request.len, s->info.min_block));
1693 do {
1694 ret = nbd_co_send_request(bs, &request, NULL);
1695 if (ret < 0) {
1696 continue;
1699 ret = nbd_co_receive_blockstatus_reply(s, request.handle, bytes,
1700 &extent, &request_ret,
1701 &local_err);
1702 if (local_err) {
1703 trace_nbd_co_request_fail(request.from, request.len, request.handle,
1704 request.flags, request.type,
1705 nbd_cmd_lookup(request.type),
1706 ret, error_get_pretty(local_err));
1707 error_free(local_err);
1708 local_err = NULL;
1710 } while (ret < 0 && nbd_client_connecting_wait(s));
1712 if (ret < 0 || request_ret < 0) {
1713 return ret ? ret : request_ret;
1716 assert(extent.length);
1717 *pnum = extent.length;
1718 *map = offset;
1719 *file = bs;
1720 return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) |
1721 (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) |
1722 BDRV_BLOCK_OFFSET_VALID;
1725 static int nbd_client_reopen_prepare(BDRVReopenState *state,
1726 BlockReopenQueue *queue, Error **errp)
1728 BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque;
1730 if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) {
1731 error_setg(errp, "Can't reopen read-only NBD mount as read/write");
1732 return -EACCES;
1734 return 0;
1737 static void nbd_yank(void *opaque)
1739 BlockDriverState *bs = opaque;
1740 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1742 qatomic_store_release(&s->state, NBD_CLIENT_QUIT);
1743 qio_channel_shutdown(QIO_CHANNEL(s->sioc), QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1746 static void nbd_client_close(BlockDriverState *bs)
1748 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1749 NBDRequest request = { .type = NBD_CMD_DISC };
1751 if (s->ioc) {
1752 nbd_send_request(s->ioc, &request);
1755 nbd_teardown_connection(bs);
1758 static int nbd_establish_connection(BlockDriverState *bs,
1759 SocketAddress *saddr,
1760 Error **errp)
1762 ERRP_GUARD();
1763 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1765 s->sioc = qio_channel_socket_new();
1766 qio_channel_set_name(QIO_CHANNEL(s->sioc), "nbd-client");
1768 qio_channel_socket_connect_sync(s->sioc, saddr, errp);
1769 if (*errp) {
1770 object_unref(OBJECT(s->sioc));
1771 s->sioc = NULL;
1772 return -1;
1775 yank_register_function(BLOCKDEV_YANK_INSTANCE(bs->node_name), nbd_yank, bs);
1776 qio_channel_set_delay(QIO_CHANNEL(s->sioc), false);
1778 return 0;
1781 /* nbd_client_handshake takes ownership on s->sioc. On failure it's unref'ed. */
1782 static int nbd_client_handshake(BlockDriverState *bs, Error **errp)
1784 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1785 AioContext *aio_context = bdrv_get_aio_context(bs);
1786 int ret;
1788 trace_nbd_client_handshake(s->export);
1789 qio_channel_set_blocking(QIO_CHANNEL(s->sioc), false, NULL);
1790 qio_channel_attach_aio_context(QIO_CHANNEL(s->sioc), aio_context);
1792 s->info.request_sizes = true;
1793 s->info.structured_reply = true;
1794 s->info.base_allocation = true;
1795 s->info.x_dirty_bitmap = g_strdup(s->x_dirty_bitmap);
1796 s->info.name = g_strdup(s->export ?: "");
1797 ret = nbd_receive_negotiate(aio_context, QIO_CHANNEL(s->sioc), s->tlscreds,
1798 s->hostname, &s->ioc, &s->info, errp);
1799 g_free(s->info.x_dirty_bitmap);
1800 g_free(s->info.name);
1801 if (ret < 0) {
1802 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
1803 nbd_yank, bs);
1804 object_unref(OBJECT(s->sioc));
1805 s->sioc = NULL;
1806 return ret;
1808 if (s->x_dirty_bitmap) {
1809 if (!s->info.base_allocation) {
1810 error_setg(errp, "requested x-dirty-bitmap %s not found",
1811 s->x_dirty_bitmap);
1812 ret = -EINVAL;
1813 goto fail;
1815 if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
1816 s->alloc_depth = true;
1819 if (s->info.flags & NBD_FLAG_READ_ONLY) {
1820 ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
1821 if (ret < 0) {
1822 goto fail;
1825 if (s->info.flags & NBD_FLAG_SEND_FUA) {
1826 bs->supported_write_flags = BDRV_REQ_FUA;
1827 bs->supported_zero_flags |= BDRV_REQ_FUA;
1829 if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
1830 bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
1831 if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
1832 bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
1836 if (!s->ioc) {
1837 s->ioc = QIO_CHANNEL(s->sioc);
1838 object_ref(OBJECT(s->ioc));
1841 trace_nbd_client_handshake_success(s->export);
1843 return 0;
1845 fail:
1847 * We have connected, but must fail for other reasons.
1848 * Send NBD_CMD_DISC as a courtesy to the server.
1851 NBDRequest request = { .type = NBD_CMD_DISC };
1853 nbd_send_request(s->ioc ?: QIO_CHANNEL(s->sioc), &request);
1855 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(bs->node_name),
1856 nbd_yank, bs);
1857 object_unref(OBJECT(s->sioc));
1858 s->sioc = NULL;
1859 object_unref(OBJECT(s->ioc));
1860 s->ioc = NULL;
1862 return ret;
1867 * Parse nbd_open options
1870 static int nbd_parse_uri(const char *filename, QDict *options)
1872 URI *uri;
1873 const char *p;
1874 QueryParams *qp = NULL;
1875 int ret = 0;
1876 bool is_unix;
1878 uri = uri_parse(filename);
1879 if (!uri) {
1880 return -EINVAL;
1883 /* transport */
1884 if (!g_strcmp0(uri->scheme, "nbd")) {
1885 is_unix = false;
1886 } else if (!g_strcmp0(uri->scheme, "nbd+tcp")) {
1887 is_unix = false;
1888 } else if (!g_strcmp0(uri->scheme, "nbd+unix")) {
1889 is_unix = true;
1890 } else {
1891 ret = -EINVAL;
1892 goto out;
1895 p = uri->path ? uri->path : "";
1896 if (p[0] == '/') {
1897 p++;
1899 if (p[0]) {
1900 qdict_put_str(options, "export", p);
1903 qp = query_params_parse(uri->query);
1904 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
1905 ret = -EINVAL;
1906 goto out;
1909 if (is_unix) {
1910 /* nbd+unix:///export?socket=path */
1911 if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
1912 ret = -EINVAL;
1913 goto out;
1915 qdict_put_str(options, "server.type", "unix");
1916 qdict_put_str(options, "server.path", qp->p[0].value);
1917 } else {
1918 QString *host;
1919 char *port_str;
1921 /* nbd[+tcp]://host[:port]/export */
1922 if (!uri->server) {
1923 ret = -EINVAL;
1924 goto out;
1927 /* strip braces from literal IPv6 address */
1928 if (uri->server[0] == '[') {
1929 host = qstring_from_substr(uri->server, 1,
1930 strlen(uri->server) - 1);
1931 } else {
1932 host = qstring_from_str(uri->server);
1935 qdict_put_str(options, "server.type", "inet");
1936 qdict_put(options, "server.host", host);
1938 port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT);
1939 qdict_put_str(options, "server.port", port_str);
1940 g_free(port_str);
1943 out:
1944 if (qp) {
1945 query_params_free(qp);
1947 uri_free(uri);
1948 return ret;
1951 static bool nbd_has_filename_options_conflict(QDict *options, Error **errp)
1953 const QDictEntry *e;
1955 for (e = qdict_first(options); e; e = qdict_next(options, e)) {
1956 if (!strcmp(e->key, "host") ||
1957 !strcmp(e->key, "port") ||
1958 !strcmp(e->key, "path") ||
1959 !strcmp(e->key, "export") ||
1960 strstart(e->key, "server.", NULL))
1962 error_setg(errp, "Option '%s' cannot be used with a file name",
1963 e->key);
1964 return true;
1968 return false;
1971 static void nbd_parse_filename(const char *filename, QDict *options,
1972 Error **errp)
1974 g_autofree char *file = NULL;
1975 char *export_name;
1976 const char *host_spec;
1977 const char *unixpath;
1979 if (nbd_has_filename_options_conflict(options, errp)) {
1980 return;
1983 if (strstr(filename, "://")) {
1984 int ret = nbd_parse_uri(filename, options);
1985 if (ret < 0) {
1986 error_setg(errp, "No valid URL specified");
1988 return;
1991 file = g_strdup(filename);
1993 export_name = strstr(file, EN_OPTSTR);
1994 if (export_name) {
1995 if (export_name[strlen(EN_OPTSTR)] == 0) {
1996 return;
1998 export_name[0] = 0; /* truncate 'file' */
1999 export_name += strlen(EN_OPTSTR);
2001 qdict_put_str(options, "export", export_name);
2004 /* extract the host_spec - fail if it's not nbd:... */
2005 if (!strstart(file, "nbd:", &host_spec)) {
2006 error_setg(errp, "File name string for NBD must start with 'nbd:'");
2007 return;
2010 if (!*host_spec) {
2011 return;
2014 /* are we a UNIX or TCP socket? */
2015 if (strstart(host_spec, "unix:", &unixpath)) {
2016 qdict_put_str(options, "server.type", "unix");
2017 qdict_put_str(options, "server.path", unixpath);
2018 } else {
2019 InetSocketAddress *addr = g_new(InetSocketAddress, 1);
2021 if (inet_parse(addr, host_spec, errp)) {
2022 goto out_inet;
2025 qdict_put_str(options, "server.type", "inet");
2026 qdict_put_str(options, "server.host", addr->host);
2027 qdict_put_str(options, "server.port", addr->port);
2028 out_inet:
2029 qapi_free_InetSocketAddress(addr);
2033 static bool nbd_process_legacy_socket_options(QDict *output_options,
2034 QemuOpts *legacy_opts,
2035 Error **errp)
2037 const char *path = qemu_opt_get(legacy_opts, "path");
2038 const char *host = qemu_opt_get(legacy_opts, "host");
2039 const char *port = qemu_opt_get(legacy_opts, "port");
2040 const QDictEntry *e;
2042 if (!path && !host && !port) {
2043 return true;
2046 for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
2048 if (strstart(e->key, "server.", NULL)) {
2049 error_setg(errp, "Cannot use 'server' and path/host/port at the "
2050 "same time");
2051 return false;
2055 if (path && host) {
2056 error_setg(errp, "path and host may not be used at the same time");
2057 return false;
2058 } else if (path) {
2059 if (port) {
2060 error_setg(errp, "port may not be used without host");
2061 return false;
2064 qdict_put_str(output_options, "server.type", "unix");
2065 qdict_put_str(output_options, "server.path", path);
2066 } else if (host) {
2067 qdict_put_str(output_options, "server.type", "inet");
2068 qdict_put_str(output_options, "server.host", host);
2069 qdict_put_str(output_options, "server.port",
2070 port ?: stringify(NBD_DEFAULT_PORT));
2073 return true;
2076 static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options,
2077 Error **errp)
2079 SocketAddress *saddr = NULL;
2080 QDict *addr = NULL;
2081 Visitor *iv = NULL;
2083 qdict_extract_subqdict(options, &addr, "server.");
2084 if (!qdict_size(addr)) {
2085 error_setg(errp, "NBD server address missing");
2086 goto done;
2089 iv = qobject_input_visitor_new_flat_confused(addr, errp);
2090 if (!iv) {
2091 goto done;
2094 if (!visit_type_SocketAddress(iv, NULL, &saddr, errp)) {
2095 goto done;
2098 if (socket_address_parse_named_fd(saddr, errp) < 0) {
2099 qapi_free_SocketAddress(saddr);
2100 saddr = NULL;
2101 goto done;
2104 done:
2105 qobject_unref(addr);
2106 visit_free(iv);
2107 return saddr;
2110 static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
2112 Object *obj;
2113 QCryptoTLSCreds *creds;
2115 obj = object_resolve_path_component(
2116 object_get_objects_root(), id);
2117 if (!obj) {
2118 error_setg(errp, "No TLS credentials with id '%s'",
2119 id);
2120 return NULL;
2122 creds = (QCryptoTLSCreds *)
2123 object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
2124 if (!creds) {
2125 error_setg(errp, "Object with id '%s' is not TLS credentials",
2126 id);
2127 return NULL;
2130 if (creds->endpoint != QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT) {
2131 error_setg(errp,
2132 "Expecting TLS credentials with a client endpoint");
2133 return NULL;
2135 object_ref(obj);
2136 return creds;
2140 static QemuOptsList nbd_runtime_opts = {
2141 .name = "nbd",
2142 .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head),
2143 .desc = {
2145 .name = "host",
2146 .type = QEMU_OPT_STRING,
2147 .help = "TCP host to connect to",
2150 .name = "port",
2151 .type = QEMU_OPT_STRING,
2152 .help = "TCP port to connect to",
2155 .name = "path",
2156 .type = QEMU_OPT_STRING,
2157 .help = "Unix socket path to connect to",
2160 .name = "export",
2161 .type = QEMU_OPT_STRING,
2162 .help = "Name of the NBD export to open",
2165 .name = "tls-creds",
2166 .type = QEMU_OPT_STRING,
2167 .help = "ID of the TLS credentials to use",
2170 .name = "x-dirty-bitmap",
2171 .type = QEMU_OPT_STRING,
2172 .help = "experimental: expose named dirty bitmap in place of "
2173 "block status",
2176 .name = "reconnect-delay",
2177 .type = QEMU_OPT_NUMBER,
2178 .help = "On an unexpected disconnect, the nbd client tries to "
2179 "connect again until succeeding or encountering a serious "
2180 "error. During the first @reconnect-delay seconds, all "
2181 "requests are paused and will be rerun on a successful "
2182 "reconnect. After that time, any delayed requests and all "
2183 "future requests before a successful reconnect will "
2184 "immediately fail. Default 0",
2186 { /* end of list */ }
2190 static int nbd_process_options(BlockDriverState *bs, QDict *options,
2191 Error **errp)
2193 BDRVNBDState *s = bs->opaque;
2194 QemuOpts *opts;
2195 int ret = -EINVAL;
2197 opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort);
2198 if (!qemu_opts_absorb_qdict(opts, options, errp)) {
2199 goto error;
2202 /* Translate @host, @port, and @path to a SocketAddress */
2203 if (!nbd_process_legacy_socket_options(options, opts, errp)) {
2204 goto error;
2207 /* Pop the config into our state object. Exit if invalid. */
2208 s->saddr = nbd_config(s, options, errp);
2209 if (!s->saddr) {
2210 goto error;
2213 s->export = g_strdup(qemu_opt_get(opts, "export"));
2214 if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) {
2215 error_setg(errp, "export name too long to send to server");
2216 goto error;
2219 s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds"));
2220 if (s->tlscredsid) {
2221 s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp);
2222 if (!s->tlscreds) {
2223 goto error;
2226 /* TODO SOCKET_ADDRESS_KIND_FD where fd has AF_INET or AF_INET6 */
2227 if (s->saddr->type != SOCKET_ADDRESS_TYPE_INET) {
2228 error_setg(errp, "TLS only supported over IP sockets");
2229 goto error;
2231 s->hostname = s->saddr->u.inet.host;
2234 s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
2235 if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) {
2236 error_setg(errp, "x-dirty-bitmap query too long to send to server");
2237 goto error;
2240 s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0);
2242 ret = 0;
2244 error:
2245 qemu_opts_del(opts);
2246 return ret;
2249 static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
2250 Error **errp)
2252 int ret;
2253 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
2255 s->bs = bs;
2256 qemu_co_mutex_init(&s->send_mutex);
2257 qemu_co_queue_init(&s->free_sema);
2259 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
2260 return -EEXIST;
2263 ret = nbd_process_options(bs, options, errp);
2264 if (ret < 0) {
2265 goto fail;
2268 nbd_init_connect_thread(s);
2271 * establish TCP connection, return error if it fails
2272 * TODO: Configurable retry-until-timeout behaviour.
2274 if (nbd_establish_connection(bs, s->saddr, errp) < 0) {
2275 ret = -ECONNREFUSED;
2276 goto fail;
2279 ret = nbd_client_handshake(bs, errp);
2280 if (ret < 0) {
2281 goto fail;
2283 /* successfully connected */
2284 s->state = NBD_CLIENT_CONNECTED;
2286 s->connection_co = qemu_coroutine_create(nbd_connection_entry, s);
2287 bdrv_inc_in_flight(bs);
2288 aio_co_schedule(bdrv_get_aio_context(bs), s->connection_co);
2290 return 0;
2292 fail:
2293 nbd_clear_bdrvstate(bs);
2294 return ret;
2297 static int nbd_co_flush(BlockDriverState *bs)
2299 return nbd_client_co_flush(bs);
2302 static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
2304 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
2305 uint32_t min = s->info.min_block;
2306 uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block);
2309 * If the server did not advertise an alignment:
2310 * - a size that is not sector-aligned implies that an alignment
2311 * of 1 can be used to access those tail bytes
2312 * - advertisement of block status requires an alignment of 1, so
2313 * that we don't violate block layer constraints that block
2314 * status is always aligned (as we can't control whether the
2315 * server will report sub-sector extents, such as a hole at EOF
2316 * on an unaligned POSIX file)
2317 * - otherwise, assume the server is so old that we are safer avoiding
2318 * sub-sector requests
2320 if (!min) {
2321 min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) ||
2322 s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE;
2325 bs->bl.request_alignment = min;
2326 bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min);
2327 bs->bl.max_pwrite_zeroes = max;
2328 bs->bl.max_transfer = max;
2330 if (s->info.opt_block &&
2331 s->info.opt_block > bs->bl.opt_transfer) {
2332 bs->bl.opt_transfer = s->info.opt_block;
2336 static void nbd_close(BlockDriverState *bs)
2338 nbd_client_close(bs);
2339 nbd_clear_bdrvstate(bs);
2343 * NBD cannot truncate, but if the caller asks to truncate to the same size, or
2344 * to a smaller size with exact=false, there is no reason to fail the
2345 * operation.
2347 * Preallocation mode is ignored since it does not seems useful to fail when
2348 * we never change anything.
2350 static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset,
2351 bool exact, PreallocMode prealloc,
2352 BdrvRequestFlags flags, Error **errp)
2354 BDRVNBDState *s = bs->opaque;
2356 if (offset != s->info.size && exact) {
2357 error_setg(errp, "Cannot resize NBD nodes");
2358 return -ENOTSUP;
2361 if (offset > s->info.size) {
2362 error_setg(errp, "Cannot grow NBD nodes");
2363 return -EINVAL;
2366 return 0;
2369 static int64_t nbd_getlength(BlockDriverState *bs)
2371 BDRVNBDState *s = bs->opaque;
2373 return s->info.size;
2376 static void nbd_refresh_filename(BlockDriverState *bs)
2378 BDRVNBDState *s = bs->opaque;
2379 const char *host = NULL, *port = NULL, *path = NULL;
2380 size_t len = 0;
2382 if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
2383 const InetSocketAddress *inet = &s->saddr->u.inet;
2384 if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) {
2385 host = inet->host;
2386 port = inet->port;
2388 } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) {
2389 path = s->saddr->u.q_unix.path;
2390 } /* else can't represent as pseudo-filename */
2392 if (path && s->export) {
2393 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2394 "nbd+unix:///%s?socket=%s", s->export, path);
2395 } else if (path && !s->export) {
2396 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2397 "nbd+unix://?socket=%s", path);
2398 } else if (host && s->export) {
2399 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2400 "nbd://%s:%s/%s", host, port, s->export);
2401 } else if (host && !s->export) {
2402 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2403 "nbd://%s:%s", host, port);
2405 if (len >= sizeof(bs->exact_filename)) {
2406 /* Name is too long to represent exactly, so leave it empty. */
2407 bs->exact_filename[0] = '\0';
2411 static char *nbd_dirname(BlockDriverState *bs, Error **errp)
2413 /* The generic bdrv_dirname() implementation is able to work out some
2414 * directory name for NBD nodes, but that would be wrong. So far there is no
2415 * specification for how "export paths" would work, so NBD does not have
2416 * directory names. */
2417 error_setg(errp, "Cannot generate a base directory for NBD nodes");
2418 return NULL;
2421 static const char *const nbd_strong_runtime_opts[] = {
2422 "path",
2423 "host",
2424 "port",
2425 "export",
2426 "tls-creds",
2427 "server.",
2429 NULL
2432 static void nbd_cancel_in_flight(BlockDriverState *bs)
2434 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
2436 reconnect_delay_timer_del(s);
2438 if (s->state == NBD_CLIENT_CONNECTING_WAIT) {
2439 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
2440 qemu_co_queue_restart_all(&s->free_sema);
2444 static BlockDriver bdrv_nbd = {
2445 .format_name = "nbd",
2446 .protocol_name = "nbd",
2447 .instance_size = sizeof(BDRVNBDState),
2448 .bdrv_parse_filename = nbd_parse_filename,
2449 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2450 .create_opts = &bdrv_create_opts_simple,
2451 .bdrv_file_open = nbd_open,
2452 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2453 .bdrv_co_preadv = nbd_client_co_preadv,
2454 .bdrv_co_pwritev = nbd_client_co_pwritev,
2455 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2456 .bdrv_close = nbd_close,
2457 .bdrv_co_flush_to_os = nbd_co_flush,
2458 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2459 .bdrv_refresh_limits = nbd_refresh_limits,
2460 .bdrv_co_truncate = nbd_co_truncate,
2461 .bdrv_getlength = nbd_getlength,
2462 .bdrv_detach_aio_context = nbd_client_detach_aio_context,
2463 .bdrv_attach_aio_context = nbd_client_attach_aio_context,
2464 .bdrv_co_drain_begin = nbd_client_co_drain_begin,
2465 .bdrv_co_drain_end = nbd_client_co_drain_end,
2466 .bdrv_refresh_filename = nbd_refresh_filename,
2467 .bdrv_co_block_status = nbd_client_co_block_status,
2468 .bdrv_dirname = nbd_dirname,
2469 .strong_runtime_opts = nbd_strong_runtime_opts,
2470 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2473 static BlockDriver bdrv_nbd_tcp = {
2474 .format_name = "nbd",
2475 .protocol_name = "nbd+tcp",
2476 .instance_size = sizeof(BDRVNBDState),
2477 .bdrv_parse_filename = nbd_parse_filename,
2478 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2479 .create_opts = &bdrv_create_opts_simple,
2480 .bdrv_file_open = nbd_open,
2481 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2482 .bdrv_co_preadv = nbd_client_co_preadv,
2483 .bdrv_co_pwritev = nbd_client_co_pwritev,
2484 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2485 .bdrv_close = nbd_close,
2486 .bdrv_co_flush_to_os = nbd_co_flush,
2487 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2488 .bdrv_refresh_limits = nbd_refresh_limits,
2489 .bdrv_co_truncate = nbd_co_truncate,
2490 .bdrv_getlength = nbd_getlength,
2491 .bdrv_detach_aio_context = nbd_client_detach_aio_context,
2492 .bdrv_attach_aio_context = nbd_client_attach_aio_context,
2493 .bdrv_co_drain_begin = nbd_client_co_drain_begin,
2494 .bdrv_co_drain_end = nbd_client_co_drain_end,
2495 .bdrv_refresh_filename = nbd_refresh_filename,
2496 .bdrv_co_block_status = nbd_client_co_block_status,
2497 .bdrv_dirname = nbd_dirname,
2498 .strong_runtime_opts = nbd_strong_runtime_opts,
2499 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2502 static BlockDriver bdrv_nbd_unix = {
2503 .format_name = "nbd",
2504 .protocol_name = "nbd+unix",
2505 .instance_size = sizeof(BDRVNBDState),
2506 .bdrv_parse_filename = nbd_parse_filename,
2507 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2508 .create_opts = &bdrv_create_opts_simple,
2509 .bdrv_file_open = nbd_open,
2510 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2511 .bdrv_co_preadv = nbd_client_co_preadv,
2512 .bdrv_co_pwritev = nbd_client_co_pwritev,
2513 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2514 .bdrv_close = nbd_close,
2515 .bdrv_co_flush_to_os = nbd_co_flush,
2516 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2517 .bdrv_refresh_limits = nbd_refresh_limits,
2518 .bdrv_co_truncate = nbd_co_truncate,
2519 .bdrv_getlength = nbd_getlength,
2520 .bdrv_detach_aio_context = nbd_client_detach_aio_context,
2521 .bdrv_attach_aio_context = nbd_client_attach_aio_context,
2522 .bdrv_co_drain_begin = nbd_client_co_drain_begin,
2523 .bdrv_co_drain_end = nbd_client_co_drain_end,
2524 .bdrv_refresh_filename = nbd_refresh_filename,
2525 .bdrv_co_block_status = nbd_client_co_block_status,
2526 .bdrv_dirname = nbd_dirname,
2527 .strong_runtime_opts = nbd_strong_runtime_opts,
2528 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2531 static void bdrv_nbd_init(void)
2533 bdrv_register(&bdrv_nbd);
2534 bdrv_register(&bdrv_nbd_tcp);
2535 bdrv_register(&bdrv_nbd_unix);
2538 block_init(bdrv_nbd_init);