2 * QEMU Block driver for NBD
4 * Copyright (C) 2016 Red Hat, Inc.
5 * Copyright (C) 2008 Bull S.A.S.
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30 #include "qemu/osdep.h"
31 #include "qapi/error.h"
32 #include "nbd-client.h"
34 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
35 #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
37 static void nbd_recv_coroutines_wake_all(NBDClientSession
*s
)
41 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
42 NBDClientRequest
*req
= &s
->requests
[i
];
44 if (req
->coroutine
&& req
->receiving
) {
45 aio_co_wake(req
->coroutine
);
50 static void nbd_teardown_connection(BlockDriverState
*bs
)
52 NBDClientSession
*client
= nbd_get_client_session(bs
);
54 if (!client
->ioc
) { /* Already closed */
58 /* finish any pending coroutines */
59 qio_channel_shutdown(client
->ioc
,
60 QIO_CHANNEL_SHUTDOWN_BOTH
,
62 BDRV_POLL_WHILE(bs
, client
->read_reply_co
);
64 nbd_client_detach_aio_context(bs
);
65 object_unref(OBJECT(client
->sioc
));
67 object_unref(OBJECT(client
->ioc
));
71 static coroutine_fn
void nbd_read_reply_entry(void *opaque
)
73 NBDClientSession
*s
= opaque
;
76 Error
*local_err
= NULL
;
79 assert(s
->reply
.handle
== 0);
80 ret
= nbd_receive_reply(s
->ioc
, &s
->reply
, &local_err
);
82 error_report_err(local_err
);
88 /* There's no need for a mutex on the receive side, because the
89 * handler acts as a synchronization point and ensures that only
90 * one coroutine is called until the reply finishes.
92 i
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
93 if (i
>= MAX_NBD_REQUESTS
||
94 !s
->requests
[i
].coroutine
||
95 !s
->requests
[i
].receiving
||
96 (nbd_reply_is_structured(&s
->reply
) && !s
->info
.structured_reply
))
101 /* We're woken up again by the request itself. Note that there
102 * is no race between yielding and reentering read_reply_co. This
105 * - if the request runs on the same AioContext, it is only
106 * entered after we yield
108 * - if the request runs on a different AioContext, reentering
109 * read_reply_co happens through a bottom half, which can only
110 * run after we yield.
112 aio_co_wake(s
->requests
[i
].coroutine
);
113 qemu_coroutine_yield();
117 nbd_recv_coroutines_wake_all(s
);
118 s
->read_reply_co
= NULL
;
121 static int nbd_co_send_request(BlockDriverState
*bs
,
125 NBDClientSession
*s
= nbd_get_client_session(bs
);
128 qemu_co_mutex_lock(&s
->send_mutex
);
129 while (s
->in_flight
== MAX_NBD_REQUESTS
) {
130 qemu_co_queue_wait(&s
->free_sema
, &s
->send_mutex
);
134 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
135 if (s
->requests
[i
].coroutine
== NULL
) {
140 g_assert(qemu_in_coroutine());
141 assert(i
< MAX_NBD_REQUESTS
);
143 s
->requests
[i
].coroutine
= qemu_coroutine_self();
144 s
->requests
[i
].offset
= request
->from
;
145 s
->requests
[i
].receiving
= false;
147 request
->handle
= INDEX_TO_HANDLE(s
, i
);
159 qio_channel_set_cork(s
->ioc
, true);
160 rc
= nbd_send_request(s
->ioc
, request
);
161 if (rc
>= 0 && !s
->quit
) {
162 if (qio_channel_writev_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
166 } else if (rc
>= 0) {
169 qio_channel_set_cork(s
->ioc
, false);
171 rc
= nbd_send_request(s
->ioc
, request
);
177 s
->requests
[i
].coroutine
= NULL
;
179 qemu_co_queue_next(&s
->free_sema
);
181 qemu_co_mutex_unlock(&s
->send_mutex
);
185 static inline uint16_t payload_advance16(uint8_t **payload
)
188 return lduw_be_p(*payload
- 2);
191 static inline uint32_t payload_advance32(uint8_t **payload
)
194 return ldl_be_p(*payload
- 4);
197 static inline uint64_t payload_advance64(uint8_t **payload
)
200 return ldq_be_p(*payload
- 8);
203 static int nbd_parse_offset_hole_payload(NBDStructuredReplyChunk
*chunk
,
204 uint8_t *payload
, uint64_t orig_offset
,
205 QEMUIOVector
*qiov
, Error
**errp
)
210 if (chunk
->length
!= sizeof(offset
) + sizeof(hole_size
)) {
211 error_setg(errp
, "Protocol error: invalid payload for "
212 "NBD_REPLY_TYPE_OFFSET_HOLE");
216 offset
= payload_advance64(&payload
);
217 hole_size
= payload_advance32(&payload
);
219 if (!hole_size
|| offset
< orig_offset
|| hole_size
> qiov
->size
||
220 offset
> orig_offset
+ qiov
->size
- hole_size
) {
221 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
226 qemu_iovec_memset(qiov
, offset
- orig_offset
, 0, hole_size
);
231 /* nbd_parse_error_payload
232 * on success @errp contains message describing nbd error reply
234 static int nbd_parse_error_payload(NBDStructuredReplyChunk
*chunk
,
235 uint8_t *payload
, int *request_ret
,
239 uint16_t message_size
;
241 assert(chunk
->type
& (1 << 15));
243 if (chunk
->length
< sizeof(error
) + sizeof(message_size
)) {
245 "Protocol error: invalid payload for structured error");
249 error
= nbd_errno_to_system_errno(payload_advance32(&payload
));
251 error_setg(errp
, "Protocol error: server sent structured error chunk "
256 *request_ret
= -error
;
257 message_size
= payload_advance16(&payload
);
259 if (message_size
> chunk
->length
- sizeof(error
) - sizeof(message_size
)) {
260 error_setg(errp
, "Protocol error: server sent structured error chunk "
261 "with incorrect message size");
265 /* TODO: Add a trace point to mention the server complaint */
267 /* TODO handle ERROR_OFFSET */
272 static int nbd_co_receive_offset_data_payload(NBDClientSession
*s
,
273 uint64_t orig_offset
,
274 QEMUIOVector
*qiov
, Error
**errp
)
276 QEMUIOVector sub_qiov
;
280 NBDStructuredReplyChunk
*chunk
= &s
->reply
.structured
;
282 assert(nbd_reply_is_structured(&s
->reply
));
284 /* The NBD spec requires at least one byte of payload */
285 if (chunk
->length
<= sizeof(offset
)) {
286 error_setg(errp
, "Protocol error: invalid payload for "
287 "NBD_REPLY_TYPE_OFFSET_DATA");
291 if (nbd_read(s
->ioc
, &offset
, sizeof(offset
), errp
) < 0) {
294 be64_to_cpus(&offset
);
296 data_size
= chunk
->length
- sizeof(offset
);
298 if (offset
< orig_offset
|| data_size
> qiov
->size
||
299 offset
> orig_offset
+ qiov
->size
- data_size
) {
300 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
305 qemu_iovec_init(&sub_qiov
, qiov
->niov
);
306 qemu_iovec_concat(&sub_qiov
, qiov
, offset
- orig_offset
, data_size
);
307 ret
= qio_channel_readv_all(s
->ioc
, sub_qiov
.iov
, sub_qiov
.niov
, errp
);
308 qemu_iovec_destroy(&sub_qiov
);
310 return ret
< 0 ? -EIO
: 0;
313 #define NBD_MAX_MALLOC_PAYLOAD 1000
314 /* nbd_co_receive_structured_payload
316 static coroutine_fn
int nbd_co_receive_structured_payload(
317 NBDClientSession
*s
, void **payload
, Error
**errp
)
322 assert(nbd_reply_is_structured(&s
->reply
));
324 len
= s
->reply
.structured
.length
;
330 if (payload
== NULL
) {
331 error_setg(errp
, "Unexpected structured payload");
335 if (len
> NBD_MAX_MALLOC_PAYLOAD
) {
336 error_setg(errp
, "Payload too large");
340 *payload
= g_new(char, len
);
341 ret
= nbd_read(s
->ioc
, *payload
, len
, errp
);
351 /* nbd_co_do_receive_one_chunk
353 * set request_ret to received reply error
354 * if qiov is not NULL: read payload to @qiov
355 * for structured reply chunk:
356 * if error chunk: read payload, set @request_ret, do not set @payload
357 * else if offset_data chunk: read payload data to @qiov, do not set @payload
358 * else: read payload to @payload
360 * If function fails, @errp contains corresponding error message, and the
361 * connection with the server is suspect. If it returns 0, then the
362 * transaction succeeded (although @request_ret may be a negative errno
363 * corresponding to the server's error reply), and errp is unchanged.
365 static coroutine_fn
int nbd_co_do_receive_one_chunk(
366 NBDClientSession
*s
, uint64_t handle
, bool only_structured
,
367 int *request_ret
, QEMUIOVector
*qiov
, void **payload
, Error
**errp
)
370 int i
= HANDLE_TO_INDEX(s
, handle
);
371 void *local_payload
= NULL
;
372 NBDStructuredReplyChunk
*chunk
;
379 /* Wait until we're woken up by nbd_read_reply_entry. */
380 s
->requests
[i
].receiving
= true;
381 qemu_coroutine_yield();
382 s
->requests
[i
].receiving
= false;
383 if (!s
->ioc
|| s
->quit
) {
384 error_setg(errp
, "Connection closed");
388 assert(s
->reply
.handle
== handle
);
390 if (nbd_reply_is_simple(&s
->reply
)) {
391 if (only_structured
) {
392 error_setg(errp
, "Protocol error: simple reply when structured "
393 "reply chunk was expected");
397 *request_ret
= -nbd_errno_to_system_errno(s
->reply
.simple
.error
);
398 if (*request_ret
< 0 || !qiov
) {
402 return qio_channel_readv_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
403 errp
) < 0 ? -EIO
: 0;
406 /* handle structured reply chunk */
407 assert(s
->info
.structured_reply
);
408 chunk
= &s
->reply
.structured
;
410 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
411 if (!(chunk
->flags
& NBD_REPLY_FLAG_DONE
)) {
412 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
413 " NBD_REPLY_FLAG_DONE flag set");
417 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
424 if (chunk
->type
== NBD_REPLY_TYPE_OFFSET_DATA
) {
426 error_setg(errp
, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
430 return nbd_co_receive_offset_data_payload(s
, s
->requests
[i
].offset
,
434 if (nbd_reply_type_is_error(chunk
->type
)) {
435 payload
= &local_payload
;
438 ret
= nbd_co_receive_structured_payload(s
, payload
, errp
);
443 if (nbd_reply_type_is_error(chunk
->type
)) {
444 ret
= nbd_parse_error_payload(chunk
, local_payload
, request_ret
, errp
);
445 g_free(local_payload
);
452 /* nbd_co_receive_one_chunk
453 * Read reply, wake up read_reply_co and set s->quit if needed.
454 * Return value is a fatal error code or normal nbd reply error code
456 static coroutine_fn
int nbd_co_receive_one_chunk(
457 NBDClientSession
*s
, uint64_t handle
, bool only_structured
,
458 QEMUIOVector
*qiov
, NBDReply
*reply
, void **payload
, Error
**errp
)
461 int ret
= nbd_co_do_receive_one_chunk(s
, handle
, only_structured
,
462 &request_ret
, qiov
, payload
, errp
);
467 /* For assert at loop start in nbd_read_reply_entry */
475 if (s
->read_reply_co
) {
476 aio_co_wake(s
->read_reply_co
);
482 typedef struct NBDReplyChunkIter
{
485 bool done
, only_structured
;
488 static void nbd_iter_error(NBDReplyChunkIter
*iter
, bool fatal
,
489 int ret
, Error
**local_err
)
493 if (fatal
|| iter
->ret
== 0) {
494 if (iter
->ret
!= 0) {
495 error_free(iter
->err
);
499 error_propagate(&iter
->err
, *local_err
);
501 error_free(*local_err
);
507 /* NBD_FOREACH_REPLY_CHUNK
509 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
510 qiov, reply, payload) \
511 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
512 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
514 /* nbd_reply_chunk_iter_receive
516 static bool nbd_reply_chunk_iter_receive(NBDClientSession
*s
,
517 NBDReplyChunkIter
*iter
,
519 QEMUIOVector
*qiov
, NBDReply
*reply
,
523 NBDReply local_reply
;
524 NBDStructuredReplyChunk
*chunk
;
525 Error
*local_err
= NULL
;
527 error_setg(&local_err
, "Connection closed");
528 nbd_iter_error(iter
, true, -EIO
, &local_err
);
533 /* Previous iteration was last. */
538 reply
= &local_reply
;
541 ret
= nbd_co_receive_one_chunk(s
, handle
, iter
->only_structured
,
542 qiov
, reply
, payload
, &local_err
);
544 /* If it is a fatal error s->quit is set by nbd_co_receive_one_chunk */
545 nbd_iter_error(iter
, s
->quit
, ret
, &local_err
);
548 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
549 if (nbd_reply_is_simple(&s
->reply
) || s
->quit
) {
553 chunk
= &reply
->structured
;
554 iter
->only_structured
= true;
556 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
557 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
558 assert(chunk
->flags
& NBD_REPLY_FLAG_DONE
);
562 if (chunk
->flags
& NBD_REPLY_FLAG_DONE
) {
563 /* This iteration is last. */
567 /* Execute the loop body */
571 s
->requests
[HANDLE_TO_INDEX(s
, handle
)].coroutine
= NULL
;
573 qemu_co_mutex_lock(&s
->send_mutex
);
575 qemu_co_queue_next(&s
->free_sema
);
576 qemu_co_mutex_unlock(&s
->send_mutex
);
581 static int nbd_co_receive_return_code(NBDClientSession
*s
, uint64_t handle
,
584 NBDReplyChunkIter iter
;
586 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, false, NULL
, NULL
, NULL
) {
587 /* nbd_reply_chunk_iter_receive does all the work */
590 error_propagate(errp
, iter
.err
);
594 static int nbd_co_receive_cmdread_reply(NBDClientSession
*s
, uint64_t handle
,
595 uint64_t offset
, QEMUIOVector
*qiov
,
598 NBDReplyChunkIter iter
;
600 void *payload
= NULL
;
601 Error
*local_err
= NULL
;
603 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, s
->info
.structured_reply
,
604 qiov
, &reply
, &payload
)
607 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
609 assert(nbd_reply_is_structured(&reply
));
611 switch (chunk
->type
) {
612 case NBD_REPLY_TYPE_OFFSET_DATA
:
613 /* special cased in nbd_co_receive_one_chunk, data is already
616 case NBD_REPLY_TYPE_OFFSET_HOLE
:
617 ret
= nbd_parse_offset_hole_payload(&reply
.structured
, payload
,
618 offset
, qiov
, &local_err
);
621 nbd_iter_error(&iter
, true, ret
, &local_err
);
625 if (!nbd_reply_type_is_error(chunk
->type
)) {
626 /* not allowed reply type */
628 error_setg(&local_err
,
629 "Unexpected reply type: %d (%s) for CMD_READ",
630 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
631 nbd_iter_error(&iter
, true, -EINVAL
, &local_err
);
639 error_propagate(errp
, iter
.err
);
643 static int nbd_co_request(BlockDriverState
*bs
, NBDRequest
*request
,
644 QEMUIOVector
*write_qiov
)
647 Error
*local_err
= NULL
;
648 NBDClientSession
*client
= nbd_get_client_session(bs
);
650 assert(request
->type
!= NBD_CMD_READ
);
652 assert(request
->type
== NBD_CMD_WRITE
);
653 assert(request
->len
== iov_size(write_qiov
->iov
, write_qiov
->niov
));
655 assert(request
->type
!= NBD_CMD_WRITE
);
657 ret
= nbd_co_send_request(bs
, request
, write_qiov
);
662 ret
= nbd_co_receive_return_code(client
, request
->handle
, &local_err
);
664 error_report_err(local_err
);
669 int nbd_client_co_preadv(BlockDriverState
*bs
, uint64_t offset
,
670 uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
673 Error
*local_err
= NULL
;
674 NBDClientSession
*client
= nbd_get_client_session(bs
);
675 NBDRequest request
= {
676 .type
= NBD_CMD_READ
,
681 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
687 ret
= nbd_co_send_request(bs
, &request
, NULL
);
692 ret
= nbd_co_receive_cmdread_reply(client
, request
.handle
, offset
, qiov
,
695 error_report_err(local_err
);
700 int nbd_client_co_pwritev(BlockDriverState
*bs
, uint64_t offset
,
701 uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
703 NBDClientSession
*client
= nbd_get_client_session(bs
);
704 NBDRequest request
= {
705 .type
= NBD_CMD_WRITE
,
710 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
711 if (flags
& BDRV_REQ_FUA
) {
712 assert(client
->info
.flags
& NBD_FLAG_SEND_FUA
);
713 request
.flags
|= NBD_CMD_FLAG_FUA
;
716 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
721 return nbd_co_request(bs
, &request
, qiov
);
724 int nbd_client_co_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
,
725 int bytes
, BdrvRequestFlags flags
)
727 NBDClientSession
*client
= nbd_get_client_session(bs
);
728 NBDRequest request
= {
729 .type
= NBD_CMD_WRITE_ZEROES
,
734 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
735 if (!(client
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
)) {
739 if (flags
& BDRV_REQ_FUA
) {
740 assert(client
->info
.flags
& NBD_FLAG_SEND_FUA
);
741 request
.flags
|= NBD_CMD_FLAG_FUA
;
743 if (!(flags
& BDRV_REQ_MAY_UNMAP
)) {
744 request
.flags
|= NBD_CMD_FLAG_NO_HOLE
;
750 return nbd_co_request(bs
, &request
, NULL
);
753 int nbd_client_co_flush(BlockDriverState
*bs
)
755 NBDClientSession
*client
= nbd_get_client_session(bs
);
756 NBDRequest request
= { .type
= NBD_CMD_FLUSH
};
758 if (!(client
->info
.flags
& NBD_FLAG_SEND_FLUSH
)) {
765 return nbd_co_request(bs
, &request
, NULL
);
768 int nbd_client_co_pdiscard(BlockDriverState
*bs
, int64_t offset
, int bytes
)
770 NBDClientSession
*client
= nbd_get_client_session(bs
);
771 NBDRequest request
= {
772 .type
= NBD_CMD_TRIM
,
777 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
778 if (!(client
->info
.flags
& NBD_FLAG_SEND_TRIM
) || !bytes
) {
782 return nbd_co_request(bs
, &request
, NULL
);
785 void nbd_client_detach_aio_context(BlockDriverState
*bs
)
787 NBDClientSession
*client
= nbd_get_client_session(bs
);
788 qio_channel_detach_aio_context(QIO_CHANNEL(client
->ioc
));
791 void nbd_client_attach_aio_context(BlockDriverState
*bs
,
792 AioContext
*new_context
)
794 NBDClientSession
*client
= nbd_get_client_session(bs
);
795 qio_channel_attach_aio_context(QIO_CHANNEL(client
->ioc
), new_context
);
796 aio_co_schedule(new_context
, client
->read_reply_co
);
799 void nbd_client_close(BlockDriverState
*bs
)
801 NBDClientSession
*client
= nbd_get_client_session(bs
);
802 NBDRequest request
= { .type
= NBD_CMD_DISC
};
804 if (client
->ioc
== NULL
) {
808 nbd_send_request(client
->ioc
, &request
);
810 nbd_teardown_connection(bs
);
813 int nbd_client_init(BlockDriverState
*bs
,
814 QIOChannelSocket
*sioc
,
816 QCryptoTLSCreds
*tlscreds
,
817 const char *hostname
,
820 NBDClientSession
*client
= nbd_get_client_session(bs
);
824 logout("session init %s\n", export
);
825 qio_channel_set_blocking(QIO_CHANNEL(sioc
), true, NULL
);
827 client
->info
.request_sizes
= true;
828 client
->info
.structured_reply
= true;
829 ret
= nbd_receive_negotiate(QIO_CHANNEL(sioc
), export
,
831 &client
->ioc
, &client
->info
, errp
);
833 logout("Failed to negotiate with the NBD server\n");
836 if (client
->info
.flags
& NBD_FLAG_READ_ONLY
&&
837 !bdrv_is_read_only(bs
)) {
839 "request for write access conflicts with read-only export");
842 if (client
->info
.flags
& NBD_FLAG_SEND_FUA
) {
843 bs
->supported_write_flags
= BDRV_REQ_FUA
;
844 bs
->supported_zero_flags
|= BDRV_REQ_FUA
;
846 if (client
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
) {
847 bs
->supported_zero_flags
|= BDRV_REQ_MAY_UNMAP
;
849 if (client
->info
.min_block
> bs
->bl
.request_alignment
) {
850 bs
->bl
.request_alignment
= client
->info
.min_block
;
853 qemu_co_mutex_init(&client
->send_mutex
);
854 qemu_co_queue_init(&client
->free_sema
);
856 object_ref(OBJECT(client
->sioc
));
859 client
->ioc
= QIO_CHANNEL(sioc
);
860 object_ref(OBJECT(client
->ioc
));
863 /* Now that we're connected, set the socket to be non-blocking and
864 * kick the reply mechanism. */
865 qio_channel_set_blocking(QIO_CHANNEL(sioc
), false, NULL
);
866 client
->read_reply_co
= qemu_coroutine_create(nbd_read_reply_entry
, client
);
867 nbd_client_attach_aio_context(bs
, bdrv_get_aio_context(bs
));
869 logout("Established connection with NBD server\n");