user-exec-stub: remove unused variable
[qemu/kevin.git] / block / nbd.c
blobcc48580df703f7d34916ac4548c0fc5ff21751f8
1 /*
2 * QEMU Block driver for NBD
4 * Copyright (c) 2019 Virtuozzo International GmbH.
5 * Copyright Red Hat
6 * Copyright (C) 2008 Bull S.A.S.
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 * Some parts:
10 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this software and associated documentation files (the "Software"), to deal
14 * in the Software without restriction, including without limitation the rights
15 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 * copies of the Software, and to permit persons to whom the Software is
17 * furnished to do so, subject to the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 * THE SOFTWARE.
31 #include "qemu/osdep.h"
33 #include "trace.h"
34 #include "qemu/uri.h"
35 #include "qemu/option.h"
36 #include "qemu/cutils.h"
37 #include "qemu/main-loop.h"
39 #include "qapi/qapi-visit-sockets.h"
40 #include "qapi/qmp/qstring.h"
41 #include "qapi/clone-visitor.h"
43 #include "block/qdict.h"
44 #include "block/nbd.h"
45 #include "block/block_int.h"
46 #include "block/coroutines.h"
48 #include "qemu/yank.h"
50 #define EN_OPTSTR ":exportname="
51 #define MAX_NBD_REQUESTS 16
53 #define COOKIE_TO_INDEX(cookie) ((cookie) - 1)
54 #define INDEX_TO_COOKIE(index) ((index) + 1)
56 typedef struct {
57 Coroutine *coroutine;
58 uint64_t offset; /* original offset of the request */
59 bool receiving; /* sleeping in the yield in nbd_receive_replies */
60 } NBDClientRequest;
62 typedef enum NBDClientState {
63 NBD_CLIENT_CONNECTING_WAIT,
64 NBD_CLIENT_CONNECTING_NOWAIT,
65 NBD_CLIENT_CONNECTED,
66 NBD_CLIENT_QUIT
67 } NBDClientState;
69 typedef struct BDRVNBDState {
70 QIOChannel *ioc; /* The current I/O channel */
71 NBDExportInfo info;
74 * Protects state, free_sema, in_flight, requests[].coroutine,
75 * reconnect_delay_timer.
77 QemuMutex requests_lock;
78 NBDClientState state;
79 CoQueue free_sema;
80 unsigned in_flight;
81 NBDClientRequest requests[MAX_NBD_REQUESTS];
82 QEMUTimer *reconnect_delay_timer;
84 /* Protects sending data on the socket. */
85 CoMutex send_mutex;
88 * Protects receiving reply headers from the socket, as well as the
89 * fields reply and requests[].receiving
91 CoMutex receive_mutex;
92 NBDReply reply;
94 QEMUTimer *open_timer;
96 BlockDriverState *bs;
98 /* Connection parameters */
99 uint32_t reconnect_delay;
100 uint32_t open_timeout;
101 SocketAddress *saddr;
102 char *export;
103 char *tlscredsid;
104 QCryptoTLSCreds *tlscreds;
105 char *tlshostname;
106 char *x_dirty_bitmap;
107 bool alloc_depth;
109 NBDClientConnection *conn;
110 } BDRVNBDState;
112 static void nbd_yank(void *opaque);
114 static void nbd_clear_bdrvstate(BlockDriverState *bs)
116 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
118 nbd_client_connection_release(s->conn);
119 s->conn = NULL;
121 yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name));
123 /* Must not leave timers behind that would access freed data */
124 assert(!s->reconnect_delay_timer);
125 assert(!s->open_timer);
127 object_unref(OBJECT(s->tlscreds));
128 qapi_free_SocketAddress(s->saddr);
129 s->saddr = NULL;
130 g_free(s->export);
131 s->export = NULL;
132 g_free(s->tlscredsid);
133 s->tlscredsid = NULL;
134 g_free(s->tlshostname);
135 s->tlshostname = NULL;
136 g_free(s->x_dirty_bitmap);
137 s->x_dirty_bitmap = NULL;
140 /* Called with s->receive_mutex taken. */
141 static bool coroutine_fn nbd_recv_coroutine_wake_one(NBDClientRequest *req)
143 if (req->receiving) {
144 req->receiving = false;
145 aio_co_wake(req->coroutine);
146 return true;
149 return false;
152 static void coroutine_fn nbd_recv_coroutines_wake(BDRVNBDState *s)
154 int i;
156 QEMU_LOCK_GUARD(&s->receive_mutex);
157 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
158 if (nbd_recv_coroutine_wake_one(&s->requests[i])) {
159 return;
164 /* Called with s->requests_lock held. */
165 static void coroutine_fn nbd_channel_error_locked(BDRVNBDState *s, int ret)
167 if (s->state == NBD_CLIENT_CONNECTED) {
168 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
171 if (ret == -EIO) {
172 if (s->state == NBD_CLIENT_CONNECTED) {
173 s->state = s->reconnect_delay ? NBD_CLIENT_CONNECTING_WAIT :
174 NBD_CLIENT_CONNECTING_NOWAIT;
176 } else {
177 s->state = NBD_CLIENT_QUIT;
181 static void coroutine_fn nbd_channel_error(BDRVNBDState *s, int ret)
183 QEMU_LOCK_GUARD(&s->requests_lock);
184 nbd_channel_error_locked(s, ret);
187 static void reconnect_delay_timer_del(BDRVNBDState *s)
189 if (s->reconnect_delay_timer) {
190 timer_free(s->reconnect_delay_timer);
191 s->reconnect_delay_timer = NULL;
195 static void reconnect_delay_timer_cb(void *opaque)
197 BDRVNBDState *s = opaque;
199 reconnect_delay_timer_del(s);
200 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
201 if (s->state != NBD_CLIENT_CONNECTING_WAIT) {
202 return;
204 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
206 nbd_co_establish_connection_cancel(s->conn);
209 static void reconnect_delay_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
211 assert(!s->reconnect_delay_timer);
212 s->reconnect_delay_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
213 QEMU_CLOCK_REALTIME,
214 SCALE_NS,
215 reconnect_delay_timer_cb, s);
216 timer_mod(s->reconnect_delay_timer, expire_time_ns);
219 static void nbd_teardown_connection(BlockDriverState *bs)
221 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
223 assert(!s->in_flight);
225 if (s->ioc) {
226 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
227 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
228 nbd_yank, s->bs);
229 object_unref(OBJECT(s->ioc));
230 s->ioc = NULL;
233 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
234 s->state = NBD_CLIENT_QUIT;
238 static void open_timer_del(BDRVNBDState *s)
240 if (s->open_timer) {
241 timer_free(s->open_timer);
242 s->open_timer = NULL;
246 static void open_timer_cb(void *opaque)
248 BDRVNBDState *s = opaque;
250 nbd_co_establish_connection_cancel(s->conn);
251 open_timer_del(s);
254 static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns)
256 assert(!s->open_timer);
257 s->open_timer = aio_timer_new(bdrv_get_aio_context(s->bs),
258 QEMU_CLOCK_REALTIME,
259 SCALE_NS,
260 open_timer_cb, s);
261 timer_mod(s->open_timer, expire_time_ns);
264 static bool nbd_client_will_reconnect(BDRVNBDState *s)
267 * Called only after a socket error, so this is not performance sensitive.
269 QEMU_LOCK_GUARD(&s->requests_lock);
270 return s->state == NBD_CLIENT_CONNECTING_WAIT;
274 * Update @bs with information learned during a completed negotiation process.
275 * Return failure if the server's advertised options are incompatible with the
276 * client's needs.
278 static int nbd_handle_updated_info(BlockDriverState *bs, Error **errp)
280 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
281 int ret;
283 if (s->x_dirty_bitmap) {
284 if (!s->info.base_allocation) {
285 error_setg(errp, "requested x-dirty-bitmap %s not found",
286 s->x_dirty_bitmap);
287 return -EINVAL;
289 if (strcmp(s->x_dirty_bitmap, "qemu:allocation-depth") == 0) {
290 s->alloc_depth = true;
294 if (s->info.flags & NBD_FLAG_READ_ONLY) {
295 ret = bdrv_apply_auto_read_only(bs, "NBD export is read-only", errp);
296 if (ret < 0) {
297 return ret;
301 if (s->info.flags & NBD_FLAG_SEND_FUA) {
302 bs->supported_write_flags = BDRV_REQ_FUA;
303 bs->supported_zero_flags |= BDRV_REQ_FUA;
306 if (s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
307 bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
308 if (s->info.flags & NBD_FLAG_SEND_FAST_ZERO) {
309 bs->supported_zero_flags |= BDRV_REQ_NO_FALLBACK;
313 trace_nbd_client_handshake_success(s->export);
315 return 0;
318 int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs,
319 bool blocking, Error **errp)
321 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
322 int ret;
323 IO_CODE();
325 assert_bdrv_graph_readable();
326 assert(!s->ioc);
328 s->ioc = nbd_co_establish_connection(s->conn, &s->info, blocking, errp);
329 if (!s->ioc) {
330 return -ECONNREFUSED;
333 yank_register_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name), nbd_yank,
334 bs);
336 ret = nbd_handle_updated_info(s->bs, NULL);
337 if (ret < 0) {
339 * We have connected, but must fail for other reasons.
340 * Send NBD_CMD_DISC as a courtesy to the server.
342 NBDRequest request = { .type = NBD_CMD_DISC };
344 nbd_send_request(s->ioc, &request);
346 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
347 nbd_yank, bs);
348 object_unref(OBJECT(s->ioc));
349 s->ioc = NULL;
351 return ret;
354 qio_channel_set_blocking(s->ioc, false, NULL);
355 qio_channel_set_follow_coroutine_ctx(s->ioc, true);
357 /* successfully connected */
358 WITH_QEMU_LOCK_GUARD(&s->requests_lock) {
359 s->state = NBD_CLIENT_CONNECTED;
362 return 0;
365 /* Called with s->requests_lock held. */
366 static bool nbd_client_connecting(BDRVNBDState *s)
368 return s->state == NBD_CLIENT_CONNECTING_WAIT ||
369 s->state == NBD_CLIENT_CONNECTING_NOWAIT;
372 /* Called with s->requests_lock taken. */
373 static void coroutine_fn GRAPH_RDLOCK nbd_reconnect_attempt(BDRVNBDState *s)
375 int ret;
376 bool blocking = s->state == NBD_CLIENT_CONNECTING_WAIT;
379 * Now we are sure that nobody is accessing the channel, and no one will
380 * try until we set the state to CONNECTED.
382 assert(nbd_client_connecting(s));
383 assert(s->in_flight == 1);
385 trace_nbd_reconnect_attempt(s->bs->in_flight);
387 if (blocking && !s->reconnect_delay_timer) {
389 * It's the first reconnect attempt after switching to
390 * NBD_CLIENT_CONNECTING_WAIT
392 g_assert(s->reconnect_delay);
393 reconnect_delay_timer_init(s,
394 qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
395 s->reconnect_delay * NANOSECONDS_PER_SECOND);
398 /* Finalize previous connection if any */
399 if (s->ioc) {
400 yank_unregister_function(BLOCKDEV_YANK_INSTANCE(s->bs->node_name),
401 nbd_yank, s->bs);
402 object_unref(OBJECT(s->ioc));
403 s->ioc = NULL;
406 qemu_mutex_unlock(&s->requests_lock);
407 ret = nbd_co_do_establish_connection(s->bs, blocking, NULL);
408 trace_nbd_reconnect_attempt_result(ret, s->bs->in_flight);
409 qemu_mutex_lock(&s->requests_lock);
412 * The reconnect attempt is done (maybe successfully, maybe not), so
413 * we no longer need this timer. Delete it so it will not outlive
414 * this I/O request (so draining removes all timers).
416 reconnect_delay_timer_del(s);
419 static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t cookie)
421 int ret;
422 uint64_t ind = COOKIE_TO_INDEX(cookie), ind2;
423 QEMU_LOCK_GUARD(&s->receive_mutex);
425 while (true) {
426 if (s->reply.cookie == cookie) {
427 /* We are done */
428 return 0;
431 if (s->reply.cookie != 0) {
433 * Some other request is being handled now. It should already be
434 * woken by whoever set s->reply.cookie (or never wait in this
435 * yield). So, we should not wake it here.
437 ind2 = COOKIE_TO_INDEX(s->reply.cookie);
438 assert(!s->requests[ind2].receiving);
440 s->requests[ind].receiving = true;
441 qemu_co_mutex_unlock(&s->receive_mutex);
443 qemu_coroutine_yield();
445 * We may be woken for 2 reasons:
446 * 1. From this function, executing in parallel coroutine, when our
447 * cookie is received.
448 * 2. From nbd_co_receive_one_chunk(), when previous request is
449 * finished and s->reply.cookie set to 0.
450 * Anyway, it's OK to lock the mutex and go to the next iteration.
453 qemu_co_mutex_lock(&s->receive_mutex);
454 assert(!s->requests[ind].receiving);
455 continue;
458 /* We are under mutex and cookie is 0. We have to do the dirty work. */
459 assert(s->reply.cookie == 0);
460 ret = nbd_receive_reply(s->bs, s->ioc, &s->reply, NULL);
461 if (ret <= 0) {
462 ret = ret ? ret : -EIO;
463 nbd_channel_error(s, ret);
464 return ret;
466 if (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply) {
467 nbd_channel_error(s, -EINVAL);
468 return -EINVAL;
470 ind2 = COOKIE_TO_INDEX(s->reply.cookie);
471 if (ind2 >= MAX_NBD_REQUESTS || !s->requests[ind2].coroutine) {
472 nbd_channel_error(s, -EINVAL);
473 return -EINVAL;
475 if (s->reply.cookie == cookie) {
476 /* We are done */
477 return 0;
479 nbd_recv_coroutine_wake_one(&s->requests[ind2]);
483 static int coroutine_fn GRAPH_RDLOCK
484 nbd_co_send_request(BlockDriverState *bs, NBDRequest *request,
485 QEMUIOVector *qiov)
487 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
488 int rc, i = -1;
490 qemu_mutex_lock(&s->requests_lock);
491 while (s->in_flight == MAX_NBD_REQUESTS ||
492 (s->state != NBD_CLIENT_CONNECTED && s->in_flight > 0)) {
493 qemu_co_queue_wait(&s->free_sema, &s->requests_lock);
496 s->in_flight++;
497 if (s->state != NBD_CLIENT_CONNECTED) {
498 if (nbd_client_connecting(s)) {
499 nbd_reconnect_attempt(s);
500 qemu_co_queue_restart_all(&s->free_sema);
502 if (s->state != NBD_CLIENT_CONNECTED) {
503 rc = -EIO;
504 goto err;
508 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
509 if (s->requests[i].coroutine == NULL) {
510 break;
514 assert(i < MAX_NBD_REQUESTS);
515 s->requests[i].coroutine = qemu_coroutine_self();
516 s->requests[i].offset = request->from;
517 s->requests[i].receiving = false;
518 qemu_mutex_unlock(&s->requests_lock);
520 qemu_co_mutex_lock(&s->send_mutex);
521 request->cookie = INDEX_TO_COOKIE(i);
523 assert(s->ioc);
525 if (qiov) {
526 qio_channel_set_cork(s->ioc, true);
527 rc = nbd_send_request(s->ioc, request);
528 if (rc >= 0 && qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
529 NULL) < 0) {
530 rc = -EIO;
532 qio_channel_set_cork(s->ioc, false);
533 } else {
534 rc = nbd_send_request(s->ioc, request);
536 qemu_co_mutex_unlock(&s->send_mutex);
538 if (rc < 0) {
539 qemu_mutex_lock(&s->requests_lock);
540 err:
541 nbd_channel_error_locked(s, rc);
542 if (i != -1) {
543 s->requests[i].coroutine = NULL;
545 s->in_flight--;
546 qemu_co_queue_next(&s->free_sema);
547 qemu_mutex_unlock(&s->requests_lock);
549 return rc;
552 static inline uint16_t payload_advance16(uint8_t **payload)
554 *payload += 2;
555 return lduw_be_p(*payload - 2);
558 static inline uint32_t payload_advance32(uint8_t **payload)
560 *payload += 4;
561 return ldl_be_p(*payload - 4);
564 static inline uint64_t payload_advance64(uint8_t **payload)
566 *payload += 8;
567 return ldq_be_p(*payload - 8);
570 static int nbd_parse_offset_hole_payload(BDRVNBDState *s,
571 NBDStructuredReplyChunk *chunk,
572 uint8_t *payload, uint64_t orig_offset,
573 QEMUIOVector *qiov, Error **errp)
575 uint64_t offset;
576 uint32_t hole_size;
578 if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
579 error_setg(errp, "Protocol error: invalid payload for "
580 "NBD_REPLY_TYPE_OFFSET_HOLE");
581 return -EINVAL;
584 offset = payload_advance64(&payload);
585 hole_size = payload_advance32(&payload);
587 if (!hole_size || offset < orig_offset || hole_size > qiov->size ||
588 offset > orig_offset + qiov->size - hole_size) {
589 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
590 " region");
591 return -EINVAL;
593 if (s->info.min_block &&
594 !QEMU_IS_ALIGNED(hole_size, s->info.min_block)) {
595 trace_nbd_structured_read_compliance("hole");
598 qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
600 return 0;
604 * nbd_parse_blockstatus_payload
605 * Based on our request, we expect only one extent in reply, for the
606 * base:allocation context.
608 static int nbd_parse_blockstatus_payload(BDRVNBDState *s,
609 NBDStructuredReplyChunk *chunk,
610 uint8_t *payload, uint64_t orig_length,
611 NBDExtent *extent, Error **errp)
613 uint32_t context_id;
615 /* The server succeeded, so it must have sent [at least] one extent */
616 if (chunk->length < sizeof(context_id) + sizeof(*extent)) {
617 error_setg(errp, "Protocol error: invalid payload for "
618 "NBD_REPLY_TYPE_BLOCK_STATUS");
619 return -EINVAL;
622 context_id = payload_advance32(&payload);
623 if (s->info.context_id != context_id) {
624 error_setg(errp, "Protocol error: unexpected context id %d for "
625 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
626 "id is %d", context_id,
627 s->info.context_id);
628 return -EINVAL;
631 extent->length = payload_advance32(&payload);
632 extent->flags = payload_advance32(&payload);
634 if (extent->length == 0) {
635 error_setg(errp, "Protocol error: server sent status chunk with "
636 "zero length");
637 return -EINVAL;
641 * A server sending unaligned block status is in violation of the
642 * protocol, but as qemu-nbd 3.1 is such a server (at least for
643 * POSIX files that are not a multiple of 512 bytes, since qemu
644 * rounds files up to 512-byte multiples but lseek(SEEK_HOLE)
645 * still sees an implicit hole beyond the real EOF), it's nicer to
646 * work around the misbehaving server. If the request included
647 * more than the final unaligned block, truncate it back to an
648 * aligned result; if the request was only the final block, round
649 * up to the full block and change the status to fully-allocated
650 * (always a safe status, even if it loses information).
652 if (s->info.min_block && !QEMU_IS_ALIGNED(extent->length,
653 s->info.min_block)) {
654 trace_nbd_parse_blockstatus_compliance("extent length is unaligned");
655 if (extent->length > s->info.min_block) {
656 extent->length = QEMU_ALIGN_DOWN(extent->length,
657 s->info.min_block);
658 } else {
659 extent->length = s->info.min_block;
660 extent->flags = 0;
665 * We used NBD_CMD_FLAG_REQ_ONE, so the server should not have
666 * sent us any more than one extent, nor should it have included
667 * status beyond our request in that extent. However, it's easy
668 * enough to ignore the server's noncompliance without killing the
669 * connection; just ignore trailing extents, and clamp things to
670 * the length of our request.
672 if (chunk->length > sizeof(context_id) + sizeof(*extent)) {
673 trace_nbd_parse_blockstatus_compliance("more than one extent");
675 if (extent->length > orig_length) {
676 extent->length = orig_length;
677 trace_nbd_parse_blockstatus_compliance("extent length too large");
681 * HACK: if we are using x-dirty-bitmaps to access
682 * qemu:allocation-depth, treat all depths > 2 the same as 2,
683 * since nbd_client_co_block_status is only expecting the low two
684 * bits to be set.
686 if (s->alloc_depth && extent->flags > 2) {
687 extent->flags = 2;
690 return 0;
694 * nbd_parse_error_payload
695 * on success @errp contains message describing nbd error reply
697 static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
698 uint8_t *payload, int *request_ret,
699 Error **errp)
701 uint32_t error;
702 uint16_t message_size;
704 assert(chunk->type & (1 << 15));
706 if (chunk->length < sizeof(error) + sizeof(message_size)) {
707 error_setg(errp,
708 "Protocol error: invalid payload for structured error");
709 return -EINVAL;
712 error = nbd_errno_to_system_errno(payload_advance32(&payload));
713 if (error == 0) {
714 error_setg(errp, "Protocol error: server sent structured error chunk "
715 "with error = 0");
716 return -EINVAL;
719 *request_ret = -error;
720 message_size = payload_advance16(&payload);
722 if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
723 error_setg(errp, "Protocol error: server sent structured error chunk "
724 "with incorrect message size");
725 return -EINVAL;
728 /* TODO: Add a trace point to mention the server complaint */
730 /* TODO handle ERROR_OFFSET */
732 return 0;
735 static int coroutine_fn
736 nbd_co_receive_offset_data_payload(BDRVNBDState *s, uint64_t orig_offset,
737 QEMUIOVector *qiov, Error **errp)
739 QEMUIOVector sub_qiov;
740 uint64_t offset;
741 size_t data_size;
742 int ret;
743 NBDStructuredReplyChunk *chunk = &s->reply.structured;
745 assert(nbd_reply_is_structured(&s->reply));
747 /* The NBD spec requires at least one byte of payload */
748 if (chunk->length <= sizeof(offset)) {
749 error_setg(errp, "Protocol error: invalid payload for "
750 "NBD_REPLY_TYPE_OFFSET_DATA");
751 return -EINVAL;
754 if (nbd_read64(s->ioc, &offset, "OFFSET_DATA offset", errp) < 0) {
755 return -EIO;
758 data_size = chunk->length - sizeof(offset);
759 assert(data_size);
760 if (offset < orig_offset || data_size > qiov->size ||
761 offset > orig_offset + qiov->size - data_size) {
762 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
763 " region");
764 return -EINVAL;
766 if (s->info.min_block && !QEMU_IS_ALIGNED(data_size, s->info.min_block)) {
767 trace_nbd_structured_read_compliance("data");
770 qemu_iovec_init(&sub_qiov, qiov->niov);
771 qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
772 ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
773 qemu_iovec_destroy(&sub_qiov);
775 return ret < 0 ? -EIO : 0;
778 #define NBD_MAX_MALLOC_PAYLOAD 1000
779 static coroutine_fn int nbd_co_receive_structured_payload(
780 BDRVNBDState *s, void **payload, Error **errp)
782 int ret;
783 uint32_t len;
785 assert(nbd_reply_is_structured(&s->reply));
787 len = s->reply.structured.length;
789 if (len == 0) {
790 return 0;
793 if (payload == NULL) {
794 error_setg(errp, "Unexpected structured payload");
795 return -EINVAL;
798 if (len > NBD_MAX_MALLOC_PAYLOAD) {
799 error_setg(errp, "Payload too large");
800 return -EINVAL;
803 *payload = g_new(char, len);
804 ret = nbd_read(s->ioc, *payload, len, "structured payload", errp);
805 if (ret < 0) {
806 g_free(*payload);
807 *payload = NULL;
808 return ret;
811 return 0;
815 * nbd_co_do_receive_one_chunk
816 * for simple reply:
817 * set request_ret to received reply error
818 * if qiov is not NULL: read payload to @qiov
819 * for structured reply chunk:
820 * if error chunk: read payload, set @request_ret, do not set @payload
821 * else if offset_data chunk: read payload data to @qiov, do not set @payload
822 * else: read payload to @payload
824 * If function fails, @errp contains corresponding error message, and the
825 * connection with the server is suspect. If it returns 0, then the
826 * transaction succeeded (although @request_ret may be a negative errno
827 * corresponding to the server's error reply), and errp is unchanged.
829 static coroutine_fn int nbd_co_do_receive_one_chunk(
830 BDRVNBDState *s, uint64_t cookie, bool only_structured,
831 int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
833 int ret;
834 int i = COOKIE_TO_INDEX(cookie);
835 void *local_payload = NULL;
836 NBDStructuredReplyChunk *chunk;
838 if (payload) {
839 *payload = NULL;
841 *request_ret = 0;
843 ret = nbd_receive_replies(s, cookie);
844 if (ret < 0) {
845 error_setg(errp, "Connection closed");
846 return -EIO;
848 assert(s->ioc);
850 assert(s->reply.cookie == cookie);
852 if (nbd_reply_is_simple(&s->reply)) {
853 if (only_structured) {
854 error_setg(errp, "Protocol error: simple reply when structured "
855 "reply chunk was expected");
856 return -EINVAL;
859 *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
860 if (*request_ret < 0 || !qiov) {
861 return 0;
864 return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
865 errp) < 0 ? -EIO : 0;
868 /* handle structured reply chunk */
869 assert(s->info.structured_reply);
870 chunk = &s->reply.structured;
872 if (chunk->type == NBD_REPLY_TYPE_NONE) {
873 if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
874 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
875 " NBD_REPLY_FLAG_DONE flag set");
876 return -EINVAL;
878 if (chunk->length) {
879 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
880 " nonzero length");
881 return -EINVAL;
883 return 0;
886 if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
887 if (!qiov) {
888 error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
889 return -EINVAL;
892 return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
893 qiov, errp);
896 if (nbd_reply_type_is_error(chunk->type)) {
897 payload = &local_payload;
900 ret = nbd_co_receive_structured_payload(s, payload, errp);
901 if (ret < 0) {
902 return ret;
905 if (nbd_reply_type_is_error(chunk->type)) {
906 ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
907 g_free(local_payload);
908 return ret;
911 return 0;
915 * nbd_co_receive_one_chunk
916 * Read reply, wake up connection_co and set s->quit if needed.
917 * Return value is a fatal error code or normal nbd reply error code
919 static coroutine_fn int nbd_co_receive_one_chunk(
920 BDRVNBDState *s, uint64_t cookie, bool only_structured,
921 int *request_ret, QEMUIOVector *qiov, NBDReply *reply, void **payload,
922 Error **errp)
924 int ret = nbd_co_do_receive_one_chunk(s, cookie, only_structured,
925 request_ret, qiov, payload, errp);
927 if (ret < 0) {
928 memset(reply, 0, sizeof(*reply));
929 nbd_channel_error(s, ret);
930 } else {
931 /* For assert at loop start in nbd_connection_entry */
932 *reply = s->reply;
934 s->reply.cookie = 0;
936 nbd_recv_coroutines_wake(s);
938 return ret;
941 typedef struct NBDReplyChunkIter {
942 int ret;
943 int request_ret;
944 Error *err;
945 bool done, only_structured;
946 } NBDReplyChunkIter;
948 static void nbd_iter_channel_error(NBDReplyChunkIter *iter,
949 int ret, Error **local_err)
951 assert(local_err && *local_err);
952 assert(ret < 0);
954 if (!iter->ret) {
955 iter->ret = ret;
956 error_propagate(&iter->err, *local_err);
957 } else {
958 error_free(*local_err);
961 *local_err = NULL;
964 static void nbd_iter_request_error(NBDReplyChunkIter *iter, int ret)
966 assert(ret < 0);
968 if (!iter->request_ret) {
969 iter->request_ret = ret;
974 * NBD_FOREACH_REPLY_CHUNK
975 * The pointer stored in @payload requires g_free() to free it.
977 #define NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, structured, \
978 qiov, reply, payload) \
979 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
980 nbd_reply_chunk_iter_receive(s, &iter, cookie, qiov, reply, payload);)
983 * nbd_reply_chunk_iter_receive
984 * The pointer stored in @payload requires g_free() to free it.
986 static bool coroutine_fn nbd_reply_chunk_iter_receive(BDRVNBDState *s,
987 NBDReplyChunkIter *iter,
988 uint64_t cookie,
989 QEMUIOVector *qiov,
990 NBDReply *reply,
991 void **payload)
993 int ret, request_ret;
994 NBDReply local_reply;
995 NBDStructuredReplyChunk *chunk;
996 Error *local_err = NULL;
998 if (iter->done) {
999 /* Previous iteration was last. */
1000 goto break_loop;
1003 if (reply == NULL) {
1004 reply = &local_reply;
1007 ret = nbd_co_receive_one_chunk(s, cookie, iter->only_structured,
1008 &request_ret, qiov, reply, payload,
1009 &local_err);
1010 if (ret < 0) {
1011 nbd_iter_channel_error(iter, ret, &local_err);
1012 } else if (request_ret < 0) {
1013 nbd_iter_request_error(iter, request_ret);
1016 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
1017 if (nbd_reply_is_simple(reply) || iter->ret < 0) {
1018 goto break_loop;
1021 chunk = &reply->structured;
1022 iter->only_structured = true;
1024 if (chunk->type == NBD_REPLY_TYPE_NONE) {
1025 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
1026 assert(chunk->flags & NBD_REPLY_FLAG_DONE);
1027 goto break_loop;
1030 if (chunk->flags & NBD_REPLY_FLAG_DONE) {
1031 /* This iteration is last. */
1032 iter->done = true;
1035 /* Execute the loop body */
1036 return true;
1038 break_loop:
1039 qemu_mutex_lock(&s->requests_lock);
1040 s->requests[COOKIE_TO_INDEX(cookie)].coroutine = NULL;
1041 s->in_flight--;
1042 qemu_co_queue_next(&s->free_sema);
1043 qemu_mutex_unlock(&s->requests_lock);
1045 return false;
1048 static int coroutine_fn
1049 nbd_co_receive_return_code(BDRVNBDState *s, uint64_t cookie,
1050 int *request_ret, Error **errp)
1052 NBDReplyChunkIter iter;
1054 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, NULL, NULL) {
1055 /* nbd_reply_chunk_iter_receive does all the work */
1058 error_propagate(errp, iter.err);
1059 *request_ret = iter.request_ret;
1060 return iter.ret;
1063 static int coroutine_fn
1064 nbd_co_receive_cmdread_reply(BDRVNBDState *s, uint64_t cookie,
1065 uint64_t offset, QEMUIOVector *qiov,
1066 int *request_ret, Error **errp)
1068 NBDReplyChunkIter iter;
1069 NBDReply reply;
1070 void *payload = NULL;
1071 Error *local_err = NULL;
1073 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, s->info.structured_reply,
1074 qiov, &reply, &payload)
1076 int ret;
1077 NBDStructuredReplyChunk *chunk = &reply.structured;
1079 assert(nbd_reply_is_structured(&reply));
1081 switch (chunk->type) {
1082 case NBD_REPLY_TYPE_OFFSET_DATA:
1084 * special cased in nbd_co_receive_one_chunk, data is already
1085 * in qiov
1087 break;
1088 case NBD_REPLY_TYPE_OFFSET_HOLE:
1089 ret = nbd_parse_offset_hole_payload(s, &reply.structured, payload,
1090 offset, qiov, &local_err);
1091 if (ret < 0) {
1092 nbd_channel_error(s, ret);
1093 nbd_iter_channel_error(&iter, ret, &local_err);
1095 break;
1096 default:
1097 if (!nbd_reply_type_is_error(chunk->type)) {
1098 /* not allowed reply type */
1099 nbd_channel_error(s, -EINVAL);
1100 error_setg(&local_err,
1101 "Unexpected reply type: %d (%s) for CMD_READ",
1102 chunk->type, nbd_reply_type_lookup(chunk->type));
1103 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1107 g_free(payload);
1108 payload = NULL;
1111 error_propagate(errp, iter.err);
1112 *request_ret = iter.request_ret;
1113 return iter.ret;
1116 static int coroutine_fn
1117 nbd_co_receive_blockstatus_reply(BDRVNBDState *s, uint64_t cookie,
1118 uint64_t length, NBDExtent *extent,
1119 int *request_ret, Error **errp)
1121 NBDReplyChunkIter iter;
1122 NBDReply reply;
1123 void *payload = NULL;
1124 Error *local_err = NULL;
1125 bool received = false;
1127 assert(!extent->length);
1128 NBD_FOREACH_REPLY_CHUNK(s, iter, cookie, false, NULL, &reply, &payload) {
1129 int ret;
1130 NBDStructuredReplyChunk *chunk = &reply.structured;
1132 assert(nbd_reply_is_structured(&reply));
1134 switch (chunk->type) {
1135 case NBD_REPLY_TYPE_BLOCK_STATUS:
1136 if (received) {
1137 nbd_channel_error(s, -EINVAL);
1138 error_setg(&local_err, "Several BLOCK_STATUS chunks in reply");
1139 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1141 received = true;
1143 ret = nbd_parse_blockstatus_payload(s, &reply.structured,
1144 payload, length, extent,
1145 &local_err);
1146 if (ret < 0) {
1147 nbd_channel_error(s, ret);
1148 nbd_iter_channel_error(&iter, ret, &local_err);
1150 break;
1151 default:
1152 if (!nbd_reply_type_is_error(chunk->type)) {
1153 nbd_channel_error(s, -EINVAL);
1154 error_setg(&local_err,
1155 "Unexpected reply type: %d (%s) "
1156 "for CMD_BLOCK_STATUS",
1157 chunk->type, nbd_reply_type_lookup(chunk->type));
1158 nbd_iter_channel_error(&iter, -EINVAL, &local_err);
1162 g_free(payload);
1163 payload = NULL;
1166 if (!extent->length && !iter.request_ret) {
1167 error_setg(&local_err, "Server did not reply with any status extents");
1168 nbd_iter_channel_error(&iter, -EIO, &local_err);
1171 error_propagate(errp, iter.err);
1172 *request_ret = iter.request_ret;
1173 return iter.ret;
1176 static int coroutine_fn GRAPH_RDLOCK
1177 nbd_co_request(BlockDriverState *bs, NBDRequest *request,
1178 QEMUIOVector *write_qiov)
1180 int ret, request_ret;
1181 Error *local_err = NULL;
1182 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1184 assert(request->type != NBD_CMD_READ);
1185 if (write_qiov) {
1186 assert(request->type == NBD_CMD_WRITE);
1187 assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
1188 } else {
1189 assert(request->type != NBD_CMD_WRITE);
1192 do {
1193 ret = nbd_co_send_request(bs, request, write_qiov);
1194 if (ret < 0) {
1195 continue;
1198 ret = nbd_co_receive_return_code(s, request->cookie,
1199 &request_ret, &local_err);
1200 if (local_err) {
1201 trace_nbd_co_request_fail(request->from, request->len,
1202 request->cookie, request->flags,
1203 request->type,
1204 nbd_cmd_lookup(request->type),
1205 ret, error_get_pretty(local_err));
1206 error_free(local_err);
1207 local_err = NULL;
1209 } while (ret < 0 && nbd_client_will_reconnect(s));
1211 return ret ? ret : request_ret;
1214 static int coroutine_fn GRAPH_RDLOCK
1215 nbd_client_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1216 QEMUIOVector *qiov, BdrvRequestFlags flags)
1218 int ret, request_ret;
1219 Error *local_err = NULL;
1220 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1221 NBDRequest request = {
1222 .type = NBD_CMD_READ,
1223 .from = offset,
1224 .len = bytes,
1227 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1229 if (!bytes) {
1230 return 0;
1233 * Work around the fact that the block layer doesn't do
1234 * byte-accurate sizing yet - if the read exceeds the server's
1235 * advertised size because the block layer rounded size up, then
1236 * truncate the request to the server and tail-pad with zero.
1238 if (offset >= s->info.size) {
1239 assert(bytes < BDRV_SECTOR_SIZE);
1240 qemu_iovec_memset(qiov, 0, 0, bytes);
1241 return 0;
1243 if (offset + bytes > s->info.size) {
1244 uint64_t slop = offset + bytes - s->info.size;
1246 assert(slop < BDRV_SECTOR_SIZE);
1247 qemu_iovec_memset(qiov, bytes - slop, 0, slop);
1248 request.len -= slop;
1251 do {
1252 ret = nbd_co_send_request(bs, &request, NULL);
1253 if (ret < 0) {
1254 continue;
1257 ret = nbd_co_receive_cmdread_reply(s, request.cookie, offset, qiov,
1258 &request_ret, &local_err);
1259 if (local_err) {
1260 trace_nbd_co_request_fail(request.from, request.len, request.cookie,
1261 request.flags, request.type,
1262 nbd_cmd_lookup(request.type),
1263 ret, error_get_pretty(local_err));
1264 error_free(local_err);
1265 local_err = NULL;
1267 } while (ret < 0 && nbd_client_will_reconnect(s));
1269 return ret ? ret : request_ret;
1272 static int coroutine_fn GRAPH_RDLOCK
1273 nbd_client_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1274 QEMUIOVector *qiov, BdrvRequestFlags flags)
1276 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1277 NBDRequest request = {
1278 .type = NBD_CMD_WRITE,
1279 .from = offset,
1280 .len = bytes,
1283 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1284 if (flags & BDRV_REQ_FUA) {
1285 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1286 request.flags |= NBD_CMD_FLAG_FUA;
1289 assert(bytes <= NBD_MAX_BUFFER_SIZE);
1291 if (!bytes) {
1292 return 0;
1294 return nbd_co_request(bs, &request, qiov);
1297 static int coroutine_fn GRAPH_RDLOCK
1298 nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1299 BdrvRequestFlags flags)
1301 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1302 NBDRequest request = {
1303 .type = NBD_CMD_WRITE_ZEROES,
1304 .from = offset,
1305 .len = bytes, /* .len is uint32_t actually */
1308 assert(bytes <= UINT32_MAX); /* rely on max_pwrite_zeroes */
1310 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1311 if (!(s->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
1312 return -ENOTSUP;
1315 if (flags & BDRV_REQ_FUA) {
1316 assert(s->info.flags & NBD_FLAG_SEND_FUA);
1317 request.flags |= NBD_CMD_FLAG_FUA;
1319 if (!(flags & BDRV_REQ_MAY_UNMAP)) {
1320 request.flags |= NBD_CMD_FLAG_NO_HOLE;
1322 if (flags & BDRV_REQ_NO_FALLBACK) {
1323 assert(s->info.flags & NBD_FLAG_SEND_FAST_ZERO);
1324 request.flags |= NBD_CMD_FLAG_FAST_ZERO;
1327 if (!bytes) {
1328 return 0;
1330 return nbd_co_request(bs, &request, NULL);
1333 static int coroutine_fn GRAPH_RDLOCK nbd_client_co_flush(BlockDriverState *bs)
1335 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1336 NBDRequest request = { .type = NBD_CMD_FLUSH };
1338 if (!(s->info.flags & NBD_FLAG_SEND_FLUSH)) {
1339 return 0;
1342 request.from = 0;
1343 request.len = 0;
1345 return nbd_co_request(bs, &request, NULL);
1348 static int coroutine_fn GRAPH_RDLOCK
1349 nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
1351 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1352 NBDRequest request = {
1353 .type = NBD_CMD_TRIM,
1354 .from = offset,
1355 .len = bytes, /* len is uint32_t */
1358 assert(bytes <= UINT32_MAX); /* rely on max_pdiscard */
1360 assert(!(s->info.flags & NBD_FLAG_READ_ONLY));
1361 if (!(s->info.flags & NBD_FLAG_SEND_TRIM) || !bytes) {
1362 return 0;
1365 return nbd_co_request(bs, &request, NULL);
1368 static int coroutine_fn GRAPH_RDLOCK nbd_client_co_block_status(
1369 BlockDriverState *bs, bool want_zero, int64_t offset, int64_t bytes,
1370 int64_t *pnum, int64_t *map, BlockDriverState **file)
1372 int ret, request_ret;
1373 NBDExtent extent = { 0 };
1374 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1375 Error *local_err = NULL;
1377 NBDRequest request = {
1378 .type = NBD_CMD_BLOCK_STATUS,
1379 .from = offset,
1380 .len = MIN(QEMU_ALIGN_DOWN(INT_MAX, bs->bl.request_alignment),
1381 MIN(bytes, s->info.size - offset)),
1382 .flags = NBD_CMD_FLAG_REQ_ONE,
1385 if (!s->info.base_allocation) {
1386 *pnum = bytes;
1387 *map = offset;
1388 *file = bs;
1389 return BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
1393 * Work around the fact that the block layer doesn't do
1394 * byte-accurate sizing yet - if the status request exceeds the
1395 * server's advertised size because the block layer rounded size
1396 * up, we truncated the request to the server (above), or are
1397 * called on just the hole.
1399 if (offset >= s->info.size) {
1400 *pnum = bytes;
1401 assert(bytes < BDRV_SECTOR_SIZE);
1402 /* Intentionally don't report offset_valid for the hole */
1403 return BDRV_BLOCK_ZERO;
1406 if (s->info.min_block) {
1407 assert(QEMU_IS_ALIGNED(request.len, s->info.min_block));
1409 do {
1410 ret = nbd_co_send_request(bs, &request, NULL);
1411 if (ret < 0) {
1412 continue;
1415 ret = nbd_co_receive_blockstatus_reply(s, request.cookie, bytes,
1416 &extent, &request_ret,
1417 &local_err);
1418 if (local_err) {
1419 trace_nbd_co_request_fail(request.from, request.len, request.cookie,
1420 request.flags, request.type,
1421 nbd_cmd_lookup(request.type),
1422 ret, error_get_pretty(local_err));
1423 error_free(local_err);
1424 local_err = NULL;
1426 } while (ret < 0 && nbd_client_will_reconnect(s));
1428 if (ret < 0 || request_ret < 0) {
1429 return ret ? ret : request_ret;
1432 assert(extent.length);
1433 *pnum = extent.length;
1434 *map = offset;
1435 *file = bs;
1436 return (extent.flags & NBD_STATE_HOLE ? 0 : BDRV_BLOCK_DATA) |
1437 (extent.flags & NBD_STATE_ZERO ? BDRV_BLOCK_ZERO : 0) |
1438 BDRV_BLOCK_OFFSET_VALID;
1441 static int nbd_client_reopen_prepare(BDRVReopenState *state,
1442 BlockReopenQueue *queue, Error **errp)
1444 BDRVNBDState *s = (BDRVNBDState *)state->bs->opaque;
1446 if ((state->flags & BDRV_O_RDWR) && (s->info.flags & NBD_FLAG_READ_ONLY)) {
1447 error_setg(errp, "Can't reopen read-only NBD mount as read/write");
1448 return -EACCES;
1450 return 0;
1453 static void nbd_yank(void *opaque)
1455 BlockDriverState *bs = opaque;
1456 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1458 QEMU_LOCK_GUARD(&s->requests_lock);
1459 qio_channel_shutdown(s->ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1460 s->state = NBD_CLIENT_QUIT;
1463 static void nbd_client_close(BlockDriverState *bs)
1465 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1466 NBDRequest request = { .type = NBD_CMD_DISC };
1468 if (s->ioc) {
1469 nbd_send_request(s->ioc, &request);
1472 nbd_teardown_connection(bs);
1477 * Parse nbd_open options
1480 static int nbd_parse_uri(const char *filename, QDict *options)
1482 URI *uri;
1483 const char *p;
1484 QueryParams *qp = NULL;
1485 int ret = 0;
1486 bool is_unix;
1488 uri = uri_parse(filename);
1489 if (!uri) {
1490 return -EINVAL;
1493 /* transport */
1494 if (!g_strcmp0(uri->scheme, "nbd")) {
1495 is_unix = false;
1496 } else if (!g_strcmp0(uri->scheme, "nbd+tcp")) {
1497 is_unix = false;
1498 } else if (!g_strcmp0(uri->scheme, "nbd+unix")) {
1499 is_unix = true;
1500 } else {
1501 ret = -EINVAL;
1502 goto out;
1505 p = uri->path ? uri->path : "";
1506 if (p[0] == '/') {
1507 p++;
1509 if (p[0]) {
1510 qdict_put_str(options, "export", p);
1513 qp = query_params_parse(uri->query);
1514 if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) {
1515 ret = -EINVAL;
1516 goto out;
1519 if (is_unix) {
1520 /* nbd+unix:///export?socket=path */
1521 if (uri->server || uri->port || strcmp(qp->p[0].name, "socket")) {
1522 ret = -EINVAL;
1523 goto out;
1525 qdict_put_str(options, "server.type", "unix");
1526 qdict_put_str(options, "server.path", qp->p[0].value);
1527 } else {
1528 QString *host;
1529 char *port_str;
1531 /* nbd[+tcp]://host[:port]/export */
1532 if (!uri->server) {
1533 ret = -EINVAL;
1534 goto out;
1537 /* strip braces from literal IPv6 address */
1538 if (uri->server[0] == '[') {
1539 host = qstring_from_substr(uri->server, 1,
1540 strlen(uri->server) - 1);
1541 } else {
1542 host = qstring_from_str(uri->server);
1545 qdict_put_str(options, "server.type", "inet");
1546 qdict_put(options, "server.host", host);
1548 port_str = g_strdup_printf("%d", uri->port ?: NBD_DEFAULT_PORT);
1549 qdict_put_str(options, "server.port", port_str);
1550 g_free(port_str);
1553 out:
1554 if (qp) {
1555 query_params_free(qp);
1557 uri_free(uri);
1558 return ret;
1561 static bool nbd_has_filename_options_conflict(QDict *options, Error **errp)
1563 const QDictEntry *e;
1565 for (e = qdict_first(options); e; e = qdict_next(options, e)) {
1566 if (!strcmp(e->key, "host") ||
1567 !strcmp(e->key, "port") ||
1568 !strcmp(e->key, "path") ||
1569 !strcmp(e->key, "export") ||
1570 strstart(e->key, "server.", NULL))
1572 error_setg(errp, "Option '%s' cannot be used with a file name",
1573 e->key);
1574 return true;
1578 return false;
1581 static void nbd_parse_filename(const char *filename, QDict *options,
1582 Error **errp)
1584 g_autofree char *file = NULL;
1585 char *export_name;
1586 const char *host_spec;
1587 const char *unixpath;
1589 if (nbd_has_filename_options_conflict(options, errp)) {
1590 return;
1593 if (strstr(filename, "://")) {
1594 int ret = nbd_parse_uri(filename, options);
1595 if (ret < 0) {
1596 error_setg(errp, "No valid URL specified");
1598 return;
1601 file = g_strdup(filename);
1603 export_name = strstr(file, EN_OPTSTR);
1604 if (export_name) {
1605 if (export_name[strlen(EN_OPTSTR)] == 0) {
1606 return;
1608 export_name[0] = 0; /* truncate 'file' */
1609 export_name += strlen(EN_OPTSTR);
1611 qdict_put_str(options, "export", export_name);
1614 /* extract the host_spec - fail if it's not nbd:... */
1615 if (!strstart(file, "nbd:", &host_spec)) {
1616 error_setg(errp, "File name string for NBD must start with 'nbd:'");
1617 return;
1620 if (!*host_spec) {
1621 return;
1624 /* are we a UNIX or TCP socket? */
1625 if (strstart(host_spec, "unix:", &unixpath)) {
1626 qdict_put_str(options, "server.type", "unix");
1627 qdict_put_str(options, "server.path", unixpath);
1628 } else {
1629 InetSocketAddress *addr = g_new(InetSocketAddress, 1);
1631 if (inet_parse(addr, host_spec, errp)) {
1632 goto out_inet;
1635 qdict_put_str(options, "server.type", "inet");
1636 qdict_put_str(options, "server.host", addr->host);
1637 qdict_put_str(options, "server.port", addr->port);
1638 out_inet:
1639 qapi_free_InetSocketAddress(addr);
1643 static bool nbd_process_legacy_socket_options(QDict *output_options,
1644 QemuOpts *legacy_opts,
1645 Error **errp)
1647 const char *path = qemu_opt_get(legacy_opts, "path");
1648 const char *host = qemu_opt_get(legacy_opts, "host");
1649 const char *port = qemu_opt_get(legacy_opts, "port");
1650 const QDictEntry *e;
1652 if (!path && !host && !port) {
1653 return true;
1656 for (e = qdict_first(output_options); e; e = qdict_next(output_options, e))
1658 if (strstart(e->key, "server.", NULL)) {
1659 error_setg(errp, "Cannot use 'server' and path/host/port at the "
1660 "same time");
1661 return false;
1665 if (path && host) {
1666 error_setg(errp, "path and host may not be used at the same time");
1667 return false;
1668 } else if (path) {
1669 if (port) {
1670 error_setg(errp, "port may not be used without host");
1671 return false;
1674 qdict_put_str(output_options, "server.type", "unix");
1675 qdict_put_str(output_options, "server.path", path);
1676 } else if (host) {
1677 qdict_put_str(output_options, "server.type", "inet");
1678 qdict_put_str(output_options, "server.host", host);
1679 qdict_put_str(output_options, "server.port",
1680 port ?: stringify(NBD_DEFAULT_PORT));
1683 return true;
1686 static SocketAddress *nbd_config(BDRVNBDState *s, QDict *options,
1687 Error **errp)
1689 SocketAddress *saddr = NULL;
1690 QDict *addr = NULL;
1691 Visitor *iv = NULL;
1693 qdict_extract_subqdict(options, &addr, "server.");
1694 if (!qdict_size(addr)) {
1695 error_setg(errp, "NBD server address missing");
1696 goto done;
1699 iv = qobject_input_visitor_new_flat_confused(addr, errp);
1700 if (!iv) {
1701 goto done;
1704 if (!visit_type_SocketAddress(iv, NULL, &saddr, errp)) {
1705 goto done;
1708 if (socket_address_parse_named_fd(saddr, errp) < 0) {
1709 qapi_free_SocketAddress(saddr);
1710 saddr = NULL;
1711 goto done;
1714 done:
1715 qobject_unref(addr);
1716 visit_free(iv);
1717 return saddr;
1720 static QCryptoTLSCreds *nbd_get_tls_creds(const char *id, Error **errp)
1722 Object *obj;
1723 QCryptoTLSCreds *creds;
1725 obj = object_resolve_path_component(
1726 object_get_objects_root(), id);
1727 if (!obj) {
1728 error_setg(errp, "No TLS credentials with id '%s'",
1729 id);
1730 return NULL;
1732 creds = (QCryptoTLSCreds *)
1733 object_dynamic_cast(obj, TYPE_QCRYPTO_TLS_CREDS);
1734 if (!creds) {
1735 error_setg(errp, "Object with id '%s' is not TLS credentials",
1736 id);
1737 return NULL;
1740 if (!qcrypto_tls_creds_check_endpoint(creds,
1741 QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT,
1742 errp)) {
1743 return NULL;
1745 object_ref(obj);
1746 return creds;
1750 static QemuOptsList nbd_runtime_opts = {
1751 .name = "nbd",
1752 .head = QTAILQ_HEAD_INITIALIZER(nbd_runtime_opts.head),
1753 .desc = {
1755 .name = "host",
1756 .type = QEMU_OPT_STRING,
1757 .help = "TCP host to connect to",
1760 .name = "port",
1761 .type = QEMU_OPT_STRING,
1762 .help = "TCP port to connect to",
1765 .name = "path",
1766 .type = QEMU_OPT_STRING,
1767 .help = "Unix socket path to connect to",
1770 .name = "export",
1771 .type = QEMU_OPT_STRING,
1772 .help = "Name of the NBD export to open",
1775 .name = "tls-creds",
1776 .type = QEMU_OPT_STRING,
1777 .help = "ID of the TLS credentials to use",
1780 .name = "tls-hostname",
1781 .type = QEMU_OPT_STRING,
1782 .help = "Override hostname for validating TLS x509 certificate",
1785 .name = "x-dirty-bitmap",
1786 .type = QEMU_OPT_STRING,
1787 .help = "experimental: expose named dirty bitmap in place of "
1788 "block status",
1791 .name = "reconnect-delay",
1792 .type = QEMU_OPT_NUMBER,
1793 .help = "On an unexpected disconnect, the nbd client tries to "
1794 "connect again until succeeding or encountering a serious "
1795 "error. During the first @reconnect-delay seconds, all "
1796 "requests are paused and will be rerun on a successful "
1797 "reconnect. After that time, any delayed requests and all "
1798 "future requests before a successful reconnect will "
1799 "immediately fail. Default 0",
1802 .name = "open-timeout",
1803 .type = QEMU_OPT_NUMBER,
1804 .help = "In seconds. If zero, the nbd driver tries the connection "
1805 "only once, and fails to open if the connection fails. "
1806 "If non-zero, the nbd driver will repeat connection "
1807 "attempts until successful or until @open-timeout seconds "
1808 "have elapsed. Default 0",
1810 { /* end of list */ }
1814 static int nbd_process_options(BlockDriverState *bs, QDict *options,
1815 Error **errp)
1817 BDRVNBDState *s = bs->opaque;
1818 QemuOpts *opts;
1819 int ret = -EINVAL;
1821 opts = qemu_opts_create(&nbd_runtime_opts, NULL, 0, &error_abort);
1822 if (!qemu_opts_absorb_qdict(opts, options, errp)) {
1823 goto error;
1826 /* Translate @host, @port, and @path to a SocketAddress */
1827 if (!nbd_process_legacy_socket_options(options, opts, errp)) {
1828 goto error;
1831 /* Pop the config into our state object. Exit if invalid. */
1832 s->saddr = nbd_config(s, options, errp);
1833 if (!s->saddr) {
1834 goto error;
1837 s->export = g_strdup(qemu_opt_get(opts, "export"));
1838 if (s->export && strlen(s->export) > NBD_MAX_STRING_SIZE) {
1839 error_setg(errp, "export name too long to send to server");
1840 goto error;
1843 s->tlscredsid = g_strdup(qemu_opt_get(opts, "tls-creds"));
1844 if (s->tlscredsid) {
1845 s->tlscreds = nbd_get_tls_creds(s->tlscredsid, errp);
1846 if (!s->tlscreds) {
1847 goto error;
1850 s->tlshostname = g_strdup(qemu_opt_get(opts, "tls-hostname"));
1851 if (!s->tlshostname &&
1852 s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
1853 s->tlshostname = g_strdup(s->saddr->u.inet.host);
1857 s->x_dirty_bitmap = g_strdup(qemu_opt_get(opts, "x-dirty-bitmap"));
1858 if (s->x_dirty_bitmap && strlen(s->x_dirty_bitmap) > NBD_MAX_STRING_SIZE) {
1859 error_setg(errp, "x-dirty-bitmap query too long to send to server");
1860 goto error;
1863 s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0);
1864 s->open_timeout = qemu_opt_get_number(opts, "open-timeout", 0);
1866 ret = 0;
1868 error:
1869 qemu_opts_del(opts);
1870 return ret;
1873 static int nbd_open(BlockDriverState *bs, QDict *options, int flags,
1874 Error **errp)
1876 int ret;
1877 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1879 s->bs = bs;
1880 qemu_mutex_init(&s->requests_lock);
1881 qemu_co_queue_init(&s->free_sema);
1882 qemu_co_mutex_init(&s->send_mutex);
1883 qemu_co_mutex_init(&s->receive_mutex);
1885 if (!yank_register_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name), errp)) {
1886 return -EEXIST;
1889 ret = nbd_process_options(bs, options, errp);
1890 if (ret < 0) {
1891 goto fail;
1894 s->conn = nbd_client_connection_new(s->saddr, true, s->export,
1895 s->x_dirty_bitmap, s->tlscreds,
1896 s->tlshostname);
1898 if (s->open_timeout) {
1899 nbd_client_connection_enable_retry(s->conn);
1900 open_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) +
1901 s->open_timeout * NANOSECONDS_PER_SECOND);
1904 s->state = NBD_CLIENT_CONNECTING_WAIT;
1905 ret = nbd_do_establish_connection(bs, true, errp);
1906 if (ret < 0) {
1907 goto fail;
1911 * The connect attempt is done, so we no longer need this timer.
1912 * Delete it, because we do not want it to be around when this node
1913 * is drained or closed.
1915 open_timer_del(s);
1917 nbd_client_connection_enable_retry(s->conn);
1919 return 0;
1921 fail:
1922 open_timer_del(s);
1923 nbd_clear_bdrvstate(bs);
1924 return ret;
1927 static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
1929 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
1930 uint32_t min = s->info.min_block;
1931 uint32_t max = MIN_NON_ZERO(NBD_MAX_BUFFER_SIZE, s->info.max_block);
1934 * If the server did not advertise an alignment:
1935 * - a size that is not sector-aligned implies that an alignment
1936 * of 1 can be used to access those tail bytes
1937 * - advertisement of block status requires an alignment of 1, so
1938 * that we don't violate block layer constraints that block
1939 * status is always aligned (as we can't control whether the
1940 * server will report sub-sector extents, such as a hole at EOF
1941 * on an unaligned POSIX file)
1942 * - otherwise, assume the server is so old that we are safer avoiding
1943 * sub-sector requests
1945 if (!min) {
1946 min = (!QEMU_IS_ALIGNED(s->info.size, BDRV_SECTOR_SIZE) ||
1947 s->info.base_allocation) ? 1 : BDRV_SECTOR_SIZE;
1950 bs->bl.request_alignment = min;
1951 bs->bl.max_pdiscard = QEMU_ALIGN_DOWN(INT_MAX, min);
1952 bs->bl.max_pwrite_zeroes = max;
1953 bs->bl.max_transfer = max;
1955 if (s->info.opt_block &&
1956 s->info.opt_block > bs->bl.opt_transfer) {
1957 bs->bl.opt_transfer = s->info.opt_block;
1961 static void nbd_close(BlockDriverState *bs)
1963 nbd_client_close(bs);
1964 nbd_clear_bdrvstate(bs);
1968 * NBD cannot truncate, but if the caller asks to truncate to the same size, or
1969 * to a smaller size with exact=false, there is no reason to fail the
1970 * operation.
1972 * Preallocation mode is ignored since it does not seems useful to fail when
1973 * we never change anything.
1975 static int coroutine_fn nbd_co_truncate(BlockDriverState *bs, int64_t offset,
1976 bool exact, PreallocMode prealloc,
1977 BdrvRequestFlags flags, Error **errp)
1979 BDRVNBDState *s = bs->opaque;
1981 if (offset != s->info.size && exact) {
1982 error_setg(errp, "Cannot resize NBD nodes");
1983 return -ENOTSUP;
1986 if (offset > s->info.size) {
1987 error_setg(errp, "Cannot grow NBD nodes");
1988 return -EINVAL;
1991 return 0;
1994 static int64_t coroutine_fn nbd_co_getlength(BlockDriverState *bs)
1996 BDRVNBDState *s = bs->opaque;
1998 return s->info.size;
2001 static void nbd_refresh_filename(BlockDriverState *bs)
2003 BDRVNBDState *s = bs->opaque;
2004 const char *host = NULL, *port = NULL, *path = NULL;
2005 size_t len = 0;
2007 if (s->saddr->type == SOCKET_ADDRESS_TYPE_INET) {
2008 const InetSocketAddress *inet = &s->saddr->u.inet;
2009 if (!inet->has_ipv4 && !inet->has_ipv6 && !inet->has_to) {
2010 host = inet->host;
2011 port = inet->port;
2013 } else if (s->saddr->type == SOCKET_ADDRESS_TYPE_UNIX) {
2014 path = s->saddr->u.q_unix.path;
2015 } /* else can't represent as pseudo-filename */
2017 if (path && s->export) {
2018 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2019 "nbd+unix:///%s?socket=%s", s->export, path);
2020 } else if (path && !s->export) {
2021 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2022 "nbd+unix://?socket=%s", path);
2023 } else if (host && s->export) {
2024 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2025 "nbd://%s:%s/%s", host, port, s->export);
2026 } else if (host && !s->export) {
2027 len = snprintf(bs->exact_filename, sizeof(bs->exact_filename),
2028 "nbd://%s:%s", host, port);
2030 if (len >= sizeof(bs->exact_filename)) {
2031 /* Name is too long to represent exactly, so leave it empty. */
2032 bs->exact_filename[0] = '\0';
2036 static char *nbd_dirname(BlockDriverState *bs, Error **errp)
2038 /* The generic bdrv_dirname() implementation is able to work out some
2039 * directory name for NBD nodes, but that would be wrong. So far there is no
2040 * specification for how "export paths" would work, so NBD does not have
2041 * directory names. */
2042 error_setg(errp, "Cannot generate a base directory for NBD nodes");
2043 return NULL;
2046 static const char *const nbd_strong_runtime_opts[] = {
2047 "path",
2048 "host",
2049 "port",
2050 "export",
2051 "tls-creds",
2052 "tls-hostname",
2053 "server.",
2055 NULL
2058 static void nbd_cancel_in_flight(BlockDriverState *bs)
2060 BDRVNBDState *s = (BDRVNBDState *)bs->opaque;
2062 reconnect_delay_timer_del(s);
2064 qemu_mutex_lock(&s->requests_lock);
2065 if (s->state == NBD_CLIENT_CONNECTING_WAIT) {
2066 s->state = NBD_CLIENT_CONNECTING_NOWAIT;
2068 qemu_mutex_unlock(&s->requests_lock);
2070 nbd_co_establish_connection_cancel(s->conn);
2073 static void nbd_attach_aio_context(BlockDriverState *bs,
2074 AioContext *new_context)
2076 BDRVNBDState *s = bs->opaque;
2078 /* The open_timer is used only during nbd_open() */
2079 assert(!s->open_timer);
2082 * The reconnect_delay_timer is scheduled in I/O paths when the
2083 * connection is lost, to cancel the reconnection attempt after a
2084 * given time. Once this attempt is done (successfully or not),
2085 * nbd_reconnect_attempt() ensures the timer is deleted before the
2086 * respective I/O request is resumed.
2087 * Since the AioContext can only be changed when a node is drained,
2088 * the reconnect_delay_timer cannot be active here.
2090 assert(!s->reconnect_delay_timer);
2093 static void nbd_detach_aio_context(BlockDriverState *bs)
2095 BDRVNBDState *s = bs->opaque;
2097 assert(!s->open_timer);
2098 assert(!s->reconnect_delay_timer);
2101 static BlockDriver bdrv_nbd = {
2102 .format_name = "nbd",
2103 .protocol_name = "nbd",
2104 .instance_size = sizeof(BDRVNBDState),
2105 .bdrv_parse_filename = nbd_parse_filename,
2106 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2107 .create_opts = &bdrv_create_opts_simple,
2108 .bdrv_file_open = nbd_open,
2109 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2110 .bdrv_co_preadv = nbd_client_co_preadv,
2111 .bdrv_co_pwritev = nbd_client_co_pwritev,
2112 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2113 .bdrv_close = nbd_close,
2114 .bdrv_co_flush_to_os = nbd_client_co_flush,
2115 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2116 .bdrv_refresh_limits = nbd_refresh_limits,
2117 .bdrv_co_truncate = nbd_co_truncate,
2118 .bdrv_co_getlength = nbd_co_getlength,
2119 .bdrv_refresh_filename = nbd_refresh_filename,
2120 .bdrv_co_block_status = nbd_client_co_block_status,
2121 .bdrv_dirname = nbd_dirname,
2122 .strong_runtime_opts = nbd_strong_runtime_opts,
2123 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2125 .bdrv_attach_aio_context = nbd_attach_aio_context,
2126 .bdrv_detach_aio_context = nbd_detach_aio_context,
2129 static BlockDriver bdrv_nbd_tcp = {
2130 .format_name = "nbd",
2131 .protocol_name = "nbd+tcp",
2132 .instance_size = sizeof(BDRVNBDState),
2133 .bdrv_parse_filename = nbd_parse_filename,
2134 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2135 .create_opts = &bdrv_create_opts_simple,
2136 .bdrv_file_open = nbd_open,
2137 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2138 .bdrv_co_preadv = nbd_client_co_preadv,
2139 .bdrv_co_pwritev = nbd_client_co_pwritev,
2140 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2141 .bdrv_close = nbd_close,
2142 .bdrv_co_flush_to_os = nbd_client_co_flush,
2143 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2144 .bdrv_refresh_limits = nbd_refresh_limits,
2145 .bdrv_co_truncate = nbd_co_truncate,
2146 .bdrv_co_getlength = nbd_co_getlength,
2147 .bdrv_refresh_filename = nbd_refresh_filename,
2148 .bdrv_co_block_status = nbd_client_co_block_status,
2149 .bdrv_dirname = nbd_dirname,
2150 .strong_runtime_opts = nbd_strong_runtime_opts,
2151 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2153 .bdrv_attach_aio_context = nbd_attach_aio_context,
2154 .bdrv_detach_aio_context = nbd_detach_aio_context,
2157 static BlockDriver bdrv_nbd_unix = {
2158 .format_name = "nbd",
2159 .protocol_name = "nbd+unix",
2160 .instance_size = sizeof(BDRVNBDState),
2161 .bdrv_parse_filename = nbd_parse_filename,
2162 .bdrv_co_create_opts = bdrv_co_create_opts_simple,
2163 .create_opts = &bdrv_create_opts_simple,
2164 .bdrv_file_open = nbd_open,
2165 .bdrv_reopen_prepare = nbd_client_reopen_prepare,
2166 .bdrv_co_preadv = nbd_client_co_preadv,
2167 .bdrv_co_pwritev = nbd_client_co_pwritev,
2168 .bdrv_co_pwrite_zeroes = nbd_client_co_pwrite_zeroes,
2169 .bdrv_close = nbd_close,
2170 .bdrv_co_flush_to_os = nbd_client_co_flush,
2171 .bdrv_co_pdiscard = nbd_client_co_pdiscard,
2172 .bdrv_refresh_limits = nbd_refresh_limits,
2173 .bdrv_co_truncate = nbd_co_truncate,
2174 .bdrv_co_getlength = nbd_co_getlength,
2175 .bdrv_refresh_filename = nbd_refresh_filename,
2176 .bdrv_co_block_status = nbd_client_co_block_status,
2177 .bdrv_dirname = nbd_dirname,
2178 .strong_runtime_opts = nbd_strong_runtime_opts,
2179 .bdrv_cancel_in_flight = nbd_cancel_in_flight,
2181 .bdrv_attach_aio_context = nbd_attach_aio_context,
2182 .bdrv_detach_aio_context = nbd_detach_aio_context,
2185 static void bdrv_nbd_init(void)
2187 bdrv_register(&bdrv_nbd);
2188 bdrv_register(&bdrv_nbd_tcp);
2189 bdrv_register(&bdrv_nbd_unix);
2192 block_init(bdrv_nbd_init);