2 * QEMU Block driver for NBD
4 * Copyright (C) 2016 Red Hat, Inc.
5 * Copyright (C) 2008 Bull S.A.S.
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
30 #include "qemu/osdep.h"
33 #include "qapi/error.h"
34 #include "nbd-client.h"
36 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
37 #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
39 static void nbd_recv_coroutines_wake_all(NBDClientSession
*s
)
43 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
44 NBDClientRequest
*req
= &s
->requests
[i
];
46 if (req
->coroutine
&& req
->receiving
) {
47 aio_co_wake(req
->coroutine
);
52 static void nbd_teardown_connection(BlockDriverState
*bs
)
54 NBDClientSession
*client
= nbd_get_client_session(bs
);
56 if (!client
->ioc
) { /* Already closed */
60 /* finish any pending coroutines */
61 qio_channel_shutdown(client
->ioc
,
62 QIO_CHANNEL_SHUTDOWN_BOTH
,
64 BDRV_POLL_WHILE(bs
, client
->read_reply_co
);
66 nbd_client_detach_aio_context(bs
);
67 object_unref(OBJECT(client
->sioc
));
69 object_unref(OBJECT(client
->ioc
));
73 static coroutine_fn
void nbd_read_reply_entry(void *opaque
)
75 NBDClientSession
*s
= opaque
;
78 Error
*local_err
= NULL
;
81 assert(s
->reply
.handle
== 0);
82 ret
= nbd_receive_reply(s
->ioc
, &s
->reply
, &local_err
);
84 trace_nbd_read_reply_entry_fail(ret
, error_get_pretty(local_err
));
85 error_free(local_err
);
91 /* There's no need for a mutex on the receive side, because the
92 * handler acts as a synchronization point and ensures that only
93 * one coroutine is called until the reply finishes.
95 i
= HANDLE_TO_INDEX(s
, s
->reply
.handle
);
96 if (i
>= MAX_NBD_REQUESTS
||
97 !s
->requests
[i
].coroutine
||
98 !s
->requests
[i
].receiving
||
99 (nbd_reply_is_structured(&s
->reply
) && !s
->info
.structured_reply
))
104 /* We're woken up again by the request itself. Note that there
105 * is no race between yielding and reentering read_reply_co. This
108 * - if the request runs on the same AioContext, it is only
109 * entered after we yield
111 * - if the request runs on a different AioContext, reentering
112 * read_reply_co happens through a bottom half, which can only
113 * run after we yield.
115 aio_co_wake(s
->requests
[i
].coroutine
);
116 qemu_coroutine_yield();
120 nbd_recv_coroutines_wake_all(s
);
121 s
->read_reply_co
= NULL
;
125 static int nbd_co_send_request(BlockDriverState
*bs
,
129 NBDClientSession
*s
= nbd_get_client_session(bs
);
132 qemu_co_mutex_lock(&s
->send_mutex
);
133 while (s
->in_flight
== MAX_NBD_REQUESTS
) {
134 qemu_co_queue_wait(&s
->free_sema
, &s
->send_mutex
);
138 for (i
= 0; i
< MAX_NBD_REQUESTS
; i
++) {
139 if (s
->requests
[i
].coroutine
== NULL
) {
144 g_assert(qemu_in_coroutine());
145 assert(i
< MAX_NBD_REQUESTS
);
147 s
->requests
[i
].coroutine
= qemu_coroutine_self();
148 s
->requests
[i
].offset
= request
->from
;
149 s
->requests
[i
].receiving
= false;
151 request
->handle
= INDEX_TO_HANDLE(s
, i
);
163 qio_channel_set_cork(s
->ioc
, true);
164 rc
= nbd_send_request(s
->ioc
, request
);
165 if (rc
>= 0 && !s
->quit
) {
166 if (qio_channel_writev_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
170 } else if (rc
>= 0) {
173 qio_channel_set_cork(s
->ioc
, false);
175 rc
= nbd_send_request(s
->ioc
, request
);
181 s
->requests
[i
].coroutine
= NULL
;
183 qemu_co_queue_next(&s
->free_sema
);
185 qemu_co_mutex_unlock(&s
->send_mutex
);
189 static inline uint16_t payload_advance16(uint8_t **payload
)
192 return lduw_be_p(*payload
- 2);
195 static inline uint32_t payload_advance32(uint8_t **payload
)
198 return ldl_be_p(*payload
- 4);
201 static inline uint64_t payload_advance64(uint8_t **payload
)
204 return ldq_be_p(*payload
- 8);
207 static int nbd_parse_offset_hole_payload(NBDStructuredReplyChunk
*chunk
,
208 uint8_t *payload
, uint64_t orig_offset
,
209 QEMUIOVector
*qiov
, Error
**errp
)
214 if (chunk
->length
!= sizeof(offset
) + sizeof(hole_size
)) {
215 error_setg(errp
, "Protocol error: invalid payload for "
216 "NBD_REPLY_TYPE_OFFSET_HOLE");
220 offset
= payload_advance64(&payload
);
221 hole_size
= payload_advance32(&payload
);
223 if (!hole_size
|| offset
< orig_offset
|| hole_size
> qiov
->size
||
224 offset
> orig_offset
+ qiov
->size
- hole_size
) {
225 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
230 qemu_iovec_memset(qiov
, offset
- orig_offset
, 0, hole_size
);
235 /* nbd_parse_blockstatus_payload
236 * support only one extent in reply and only for
237 * base:allocation context
239 static int nbd_parse_blockstatus_payload(NBDClientSession
*client
,
240 NBDStructuredReplyChunk
*chunk
,
241 uint8_t *payload
, uint64_t orig_length
,
242 NBDExtent
*extent
, Error
**errp
)
246 if (chunk
->length
!= sizeof(context_id
) + sizeof(*extent
)) {
247 error_setg(errp
, "Protocol error: invalid payload for "
248 "NBD_REPLY_TYPE_BLOCK_STATUS");
252 context_id
= payload_advance32(&payload
);
253 if (client
->info
.context_id
!= context_id
) {
254 error_setg(errp
, "Protocol error: unexpected context id %d for "
255 "NBD_REPLY_TYPE_BLOCK_STATUS, when negotiated context "
256 "id is %d", context_id
,
257 client
->info
.context_id
);
261 extent
->length
= payload_advance32(&payload
);
262 extent
->flags
= payload_advance32(&payload
);
264 if (extent
->length
== 0 ||
265 (client
->info
.min_block
&& !QEMU_IS_ALIGNED(extent
->length
,
266 client
->info
.min_block
))) {
267 error_setg(errp
, "Protocol error: server sent status chunk with "
272 /* The server is allowed to send us extra information on the final
273 * extent; just clamp it to the length we requested. */
274 if (extent
->length
> orig_length
) {
275 extent
->length
= orig_length
;
281 /* nbd_parse_error_payload
282 * on success @errp contains message describing nbd error reply
284 static int nbd_parse_error_payload(NBDStructuredReplyChunk
*chunk
,
285 uint8_t *payload
, int *request_ret
,
289 uint16_t message_size
;
291 assert(chunk
->type
& (1 << 15));
293 if (chunk
->length
< sizeof(error
) + sizeof(message_size
)) {
295 "Protocol error: invalid payload for structured error");
299 error
= nbd_errno_to_system_errno(payload_advance32(&payload
));
301 error_setg(errp
, "Protocol error: server sent structured error chunk "
306 *request_ret
= -error
;
307 message_size
= payload_advance16(&payload
);
309 if (message_size
> chunk
->length
- sizeof(error
) - sizeof(message_size
)) {
310 error_setg(errp
, "Protocol error: server sent structured error chunk "
311 "with incorrect message size");
315 /* TODO: Add a trace point to mention the server complaint */
317 /* TODO handle ERROR_OFFSET */
322 static int nbd_co_receive_offset_data_payload(NBDClientSession
*s
,
323 uint64_t orig_offset
,
324 QEMUIOVector
*qiov
, Error
**errp
)
326 QEMUIOVector sub_qiov
;
330 NBDStructuredReplyChunk
*chunk
= &s
->reply
.structured
;
332 assert(nbd_reply_is_structured(&s
->reply
));
334 /* The NBD spec requires at least one byte of payload */
335 if (chunk
->length
<= sizeof(offset
)) {
336 error_setg(errp
, "Protocol error: invalid payload for "
337 "NBD_REPLY_TYPE_OFFSET_DATA");
341 if (nbd_read(s
->ioc
, &offset
, sizeof(offset
), errp
) < 0) {
344 be64_to_cpus(&offset
);
346 data_size
= chunk
->length
- sizeof(offset
);
348 if (offset
< orig_offset
|| data_size
> qiov
->size
||
349 offset
> orig_offset
+ qiov
->size
- data_size
) {
350 error_setg(errp
, "Protocol error: server sent chunk exceeding requested"
355 qemu_iovec_init(&sub_qiov
, qiov
->niov
);
356 qemu_iovec_concat(&sub_qiov
, qiov
, offset
- orig_offset
, data_size
);
357 ret
= qio_channel_readv_all(s
->ioc
, sub_qiov
.iov
, sub_qiov
.niov
, errp
);
358 qemu_iovec_destroy(&sub_qiov
);
360 return ret
< 0 ? -EIO
: 0;
363 #define NBD_MAX_MALLOC_PAYLOAD 1000
364 /* nbd_co_receive_structured_payload
366 static coroutine_fn
int nbd_co_receive_structured_payload(
367 NBDClientSession
*s
, void **payload
, Error
**errp
)
372 assert(nbd_reply_is_structured(&s
->reply
));
374 len
= s
->reply
.structured
.length
;
380 if (payload
== NULL
) {
381 error_setg(errp
, "Unexpected structured payload");
385 if (len
> NBD_MAX_MALLOC_PAYLOAD
) {
386 error_setg(errp
, "Payload too large");
390 *payload
= g_new(char, len
);
391 ret
= nbd_read(s
->ioc
, *payload
, len
, errp
);
401 /* nbd_co_do_receive_one_chunk
403 * set request_ret to received reply error
404 * if qiov is not NULL: read payload to @qiov
405 * for structured reply chunk:
406 * if error chunk: read payload, set @request_ret, do not set @payload
407 * else if offset_data chunk: read payload data to @qiov, do not set @payload
408 * else: read payload to @payload
410 * If function fails, @errp contains corresponding error message, and the
411 * connection with the server is suspect. If it returns 0, then the
412 * transaction succeeded (although @request_ret may be a negative errno
413 * corresponding to the server's error reply), and errp is unchanged.
415 static coroutine_fn
int nbd_co_do_receive_one_chunk(
416 NBDClientSession
*s
, uint64_t handle
, bool only_structured
,
417 int *request_ret
, QEMUIOVector
*qiov
, void **payload
, Error
**errp
)
420 int i
= HANDLE_TO_INDEX(s
, handle
);
421 void *local_payload
= NULL
;
422 NBDStructuredReplyChunk
*chunk
;
429 /* Wait until we're woken up by nbd_read_reply_entry. */
430 s
->requests
[i
].receiving
= true;
431 qemu_coroutine_yield();
432 s
->requests
[i
].receiving
= false;
433 if (!s
->ioc
|| s
->quit
) {
434 error_setg(errp
, "Connection closed");
438 assert(s
->reply
.handle
== handle
);
440 if (nbd_reply_is_simple(&s
->reply
)) {
441 if (only_structured
) {
442 error_setg(errp
, "Protocol error: simple reply when structured "
443 "reply chunk was expected");
447 *request_ret
= -nbd_errno_to_system_errno(s
->reply
.simple
.error
);
448 if (*request_ret
< 0 || !qiov
) {
452 return qio_channel_readv_all(s
->ioc
, qiov
->iov
, qiov
->niov
,
453 errp
) < 0 ? -EIO
: 0;
456 /* handle structured reply chunk */
457 assert(s
->info
.structured_reply
);
458 chunk
= &s
->reply
.structured
;
460 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
461 if (!(chunk
->flags
& NBD_REPLY_FLAG_DONE
)) {
462 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
463 " NBD_REPLY_FLAG_DONE flag set");
467 error_setg(errp
, "Protocol error: NBD_REPLY_TYPE_NONE chunk with"
474 if (chunk
->type
== NBD_REPLY_TYPE_OFFSET_DATA
) {
476 error_setg(errp
, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
480 return nbd_co_receive_offset_data_payload(s
, s
->requests
[i
].offset
,
484 if (nbd_reply_type_is_error(chunk
->type
)) {
485 payload
= &local_payload
;
488 ret
= nbd_co_receive_structured_payload(s
, payload
, errp
);
493 if (nbd_reply_type_is_error(chunk
->type
)) {
494 ret
= nbd_parse_error_payload(chunk
, local_payload
, request_ret
, errp
);
495 g_free(local_payload
);
502 /* nbd_co_receive_one_chunk
503 * Read reply, wake up read_reply_co and set s->quit if needed.
504 * Return value is a fatal error code or normal nbd reply error code
506 static coroutine_fn
int nbd_co_receive_one_chunk(
507 NBDClientSession
*s
, uint64_t handle
, bool only_structured
,
508 QEMUIOVector
*qiov
, NBDReply
*reply
, void **payload
, Error
**errp
)
511 int ret
= nbd_co_do_receive_one_chunk(s
, handle
, only_structured
,
512 &request_ret
, qiov
, payload
, errp
);
517 /* For assert at loop start in nbd_read_reply_entry */
525 if (s
->read_reply_co
) {
526 aio_co_wake(s
->read_reply_co
);
532 typedef struct NBDReplyChunkIter
{
536 bool done
, only_structured
;
539 static void nbd_iter_error(NBDReplyChunkIter
*iter
, bool fatal
,
540 int ret
, Error
**local_err
)
544 if ((fatal
&& !iter
->fatal
) || iter
->ret
== 0) {
545 if (iter
->ret
!= 0) {
546 error_free(iter
->err
);
551 error_propagate(&iter
->err
, *local_err
);
553 error_free(*local_err
);
559 /* NBD_FOREACH_REPLY_CHUNK
561 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
562 qiov, reply, payload) \
563 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
564 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
566 /* nbd_reply_chunk_iter_receive
568 static bool nbd_reply_chunk_iter_receive(NBDClientSession
*s
,
569 NBDReplyChunkIter
*iter
,
571 QEMUIOVector
*qiov
, NBDReply
*reply
,
575 NBDReply local_reply
;
576 NBDStructuredReplyChunk
*chunk
;
577 Error
*local_err
= NULL
;
579 error_setg(&local_err
, "Connection closed");
580 nbd_iter_error(iter
, true, -EIO
, &local_err
);
585 /* Previous iteration was last. */
590 reply
= &local_reply
;
593 ret
= nbd_co_receive_one_chunk(s
, handle
, iter
->only_structured
,
594 qiov
, reply
, payload
, &local_err
);
596 /* If it is a fatal error s->quit is set by nbd_co_receive_one_chunk */
597 nbd_iter_error(iter
, s
->quit
, ret
, &local_err
);
600 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
601 if (nbd_reply_is_simple(&s
->reply
) || s
->quit
) {
605 chunk
= &reply
->structured
;
606 iter
->only_structured
= true;
608 if (chunk
->type
== NBD_REPLY_TYPE_NONE
) {
609 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
610 assert(chunk
->flags
& NBD_REPLY_FLAG_DONE
);
614 if (chunk
->flags
& NBD_REPLY_FLAG_DONE
) {
615 /* This iteration is last. */
619 /* Execute the loop body */
623 s
->requests
[HANDLE_TO_INDEX(s
, handle
)].coroutine
= NULL
;
625 qemu_co_mutex_lock(&s
->send_mutex
);
627 qemu_co_queue_next(&s
->free_sema
);
628 qemu_co_mutex_unlock(&s
->send_mutex
);
633 static int nbd_co_receive_return_code(NBDClientSession
*s
, uint64_t handle
,
636 NBDReplyChunkIter iter
;
638 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, false, NULL
, NULL
, NULL
) {
639 /* nbd_reply_chunk_iter_receive does all the work */
642 error_propagate(errp
, iter
.err
);
646 static int nbd_co_receive_cmdread_reply(NBDClientSession
*s
, uint64_t handle
,
647 uint64_t offset
, QEMUIOVector
*qiov
,
650 NBDReplyChunkIter iter
;
652 void *payload
= NULL
;
653 Error
*local_err
= NULL
;
655 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, s
->info
.structured_reply
,
656 qiov
, &reply
, &payload
)
659 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
661 assert(nbd_reply_is_structured(&reply
));
663 switch (chunk
->type
) {
664 case NBD_REPLY_TYPE_OFFSET_DATA
:
665 /* special cased in nbd_co_receive_one_chunk, data is already
668 case NBD_REPLY_TYPE_OFFSET_HOLE
:
669 ret
= nbd_parse_offset_hole_payload(&reply
.structured
, payload
,
670 offset
, qiov
, &local_err
);
673 nbd_iter_error(&iter
, true, ret
, &local_err
);
677 if (!nbd_reply_type_is_error(chunk
->type
)) {
678 /* not allowed reply type */
680 error_setg(&local_err
,
681 "Unexpected reply type: %d (%s) for CMD_READ",
682 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
683 nbd_iter_error(&iter
, true, -EINVAL
, &local_err
);
691 error_propagate(errp
, iter
.err
);
695 static int nbd_co_receive_blockstatus_reply(NBDClientSession
*s
,
696 uint64_t handle
, uint64_t length
,
697 NBDExtent
*extent
, Error
**errp
)
699 NBDReplyChunkIter iter
;
701 void *payload
= NULL
;
702 Error
*local_err
= NULL
;
703 bool received
= false;
705 assert(!extent
->length
);
706 NBD_FOREACH_REPLY_CHUNK(s
, iter
, handle
, s
->info
.structured_reply
,
707 NULL
, &reply
, &payload
)
710 NBDStructuredReplyChunk
*chunk
= &reply
.structured
;
712 assert(nbd_reply_is_structured(&reply
));
714 switch (chunk
->type
) {
715 case NBD_REPLY_TYPE_BLOCK_STATUS
:
718 error_setg(&local_err
, "Several BLOCK_STATUS chunks in reply");
719 nbd_iter_error(&iter
, true, -EINVAL
, &local_err
);
723 ret
= nbd_parse_blockstatus_payload(s
, &reply
.structured
,
724 payload
, length
, extent
,
728 nbd_iter_error(&iter
, true, ret
, &local_err
);
732 if (!nbd_reply_type_is_error(chunk
->type
)) {
734 error_setg(&local_err
,
735 "Unexpected reply type: %d (%s) "
736 "for CMD_BLOCK_STATUS",
737 chunk
->type
, nbd_reply_type_lookup(chunk
->type
));
738 nbd_iter_error(&iter
, true, -EINVAL
, &local_err
);
746 if (!extent
->length
&& !iter
.err
) {
747 error_setg(&iter
.err
,
748 "Server did not reply with any status extents");
753 error_propagate(errp
, iter
.err
);
757 static int nbd_co_request(BlockDriverState
*bs
, NBDRequest
*request
,
758 QEMUIOVector
*write_qiov
)
761 Error
*local_err
= NULL
;
762 NBDClientSession
*client
= nbd_get_client_session(bs
);
764 assert(request
->type
!= NBD_CMD_READ
);
766 assert(request
->type
== NBD_CMD_WRITE
);
767 assert(request
->len
== iov_size(write_qiov
->iov
, write_qiov
->niov
));
769 assert(request
->type
!= NBD_CMD_WRITE
);
771 ret
= nbd_co_send_request(bs
, request
, write_qiov
);
776 ret
= nbd_co_receive_return_code(client
, request
->handle
, &local_err
);
778 trace_nbd_co_request_fail(request
->from
, request
->len
, request
->handle
,
779 request
->flags
, request
->type
,
780 nbd_cmd_lookup(request
->type
),
781 ret
, error_get_pretty(local_err
));
782 error_free(local_err
);
787 int nbd_client_co_preadv(BlockDriverState
*bs
, uint64_t offset
,
788 uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
791 Error
*local_err
= NULL
;
792 NBDClientSession
*client
= nbd_get_client_session(bs
);
793 NBDRequest request
= {
794 .type
= NBD_CMD_READ
,
799 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
805 ret
= nbd_co_send_request(bs
, &request
, NULL
);
810 ret
= nbd_co_receive_cmdread_reply(client
, request
.handle
, offset
, qiov
,
813 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.handle
,
814 request
.flags
, request
.type
,
815 nbd_cmd_lookup(request
.type
),
816 ret
, error_get_pretty(local_err
));
817 error_free(local_err
);
822 int nbd_client_co_pwritev(BlockDriverState
*bs
, uint64_t offset
,
823 uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
825 NBDClientSession
*client
= nbd_get_client_session(bs
);
826 NBDRequest request
= {
827 .type
= NBD_CMD_WRITE
,
832 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
833 if (flags
& BDRV_REQ_FUA
) {
834 assert(client
->info
.flags
& NBD_FLAG_SEND_FUA
);
835 request
.flags
|= NBD_CMD_FLAG_FUA
;
838 assert(bytes
<= NBD_MAX_BUFFER_SIZE
);
843 return nbd_co_request(bs
, &request
, qiov
);
846 int nbd_client_co_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
,
847 int bytes
, BdrvRequestFlags flags
)
849 NBDClientSession
*client
= nbd_get_client_session(bs
);
850 NBDRequest request
= {
851 .type
= NBD_CMD_WRITE_ZEROES
,
856 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
857 if (!(client
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
)) {
861 if (flags
& BDRV_REQ_FUA
) {
862 assert(client
->info
.flags
& NBD_FLAG_SEND_FUA
);
863 request
.flags
|= NBD_CMD_FLAG_FUA
;
865 if (!(flags
& BDRV_REQ_MAY_UNMAP
)) {
866 request
.flags
|= NBD_CMD_FLAG_NO_HOLE
;
872 return nbd_co_request(bs
, &request
, NULL
);
875 int nbd_client_co_flush(BlockDriverState
*bs
)
877 NBDClientSession
*client
= nbd_get_client_session(bs
);
878 NBDRequest request
= { .type
= NBD_CMD_FLUSH
};
880 if (!(client
->info
.flags
& NBD_FLAG_SEND_FLUSH
)) {
887 return nbd_co_request(bs
, &request
, NULL
);
890 int nbd_client_co_pdiscard(BlockDriverState
*bs
, int64_t offset
, int bytes
)
892 NBDClientSession
*client
= nbd_get_client_session(bs
);
893 NBDRequest request
= {
894 .type
= NBD_CMD_TRIM
,
899 assert(!(client
->info
.flags
& NBD_FLAG_READ_ONLY
));
900 if (!(client
->info
.flags
& NBD_FLAG_SEND_TRIM
) || !bytes
) {
904 return nbd_co_request(bs
, &request
, NULL
);
907 int coroutine_fn
nbd_client_co_block_status(BlockDriverState
*bs
,
909 int64_t offset
, int64_t bytes
,
910 int64_t *pnum
, int64_t *map
,
911 BlockDriverState
**file
)
914 NBDExtent extent
= { 0 };
915 NBDClientSession
*client
= nbd_get_client_session(bs
);
916 Error
*local_err
= NULL
;
918 NBDRequest request
= {
919 .type
= NBD_CMD_BLOCK_STATUS
,
921 .len
= MIN(MIN_NON_ZERO(QEMU_ALIGN_DOWN(INT_MAX
,
922 bs
->bl
.request_alignment
),
923 client
->info
.max_block
), bytes
),
924 .flags
= NBD_CMD_FLAG_REQ_ONE
,
927 if (!client
->info
.base_allocation
) {
929 return BDRV_BLOCK_DATA
;
932 ret
= nbd_co_send_request(bs
, &request
, NULL
);
937 ret
= nbd_co_receive_blockstatus_reply(client
, request
.handle
, bytes
,
938 &extent
, &local_err
);
940 trace_nbd_co_request_fail(request
.from
, request
.len
, request
.handle
,
941 request
.flags
, request
.type
,
942 nbd_cmd_lookup(request
.type
),
943 ret
, error_get_pretty(local_err
));
944 error_free(local_err
);
950 assert(extent
.length
);
951 *pnum
= extent
.length
;
952 return (extent
.flags
& NBD_STATE_HOLE
? 0 : BDRV_BLOCK_DATA
) |
953 (extent
.flags
& NBD_STATE_ZERO
? BDRV_BLOCK_ZERO
: 0);
956 void nbd_client_detach_aio_context(BlockDriverState
*bs
)
958 NBDClientSession
*client
= nbd_get_client_session(bs
);
959 qio_channel_detach_aio_context(QIO_CHANNEL(client
->ioc
));
962 void nbd_client_attach_aio_context(BlockDriverState
*bs
,
963 AioContext
*new_context
)
965 NBDClientSession
*client
= nbd_get_client_session(bs
);
966 qio_channel_attach_aio_context(QIO_CHANNEL(client
->ioc
), new_context
);
967 aio_co_schedule(new_context
, client
->read_reply_co
);
970 void nbd_client_close(BlockDriverState
*bs
)
972 NBDClientSession
*client
= nbd_get_client_session(bs
);
973 NBDRequest request
= { .type
= NBD_CMD_DISC
};
975 if (client
->ioc
== NULL
) {
979 nbd_send_request(client
->ioc
, &request
);
981 nbd_teardown_connection(bs
);
984 int nbd_client_init(BlockDriverState
*bs
,
985 QIOChannelSocket
*sioc
,
987 QCryptoTLSCreds
*tlscreds
,
988 const char *hostname
,
989 const char *x_dirty_bitmap
,
992 NBDClientSession
*client
= nbd_get_client_session(bs
);
996 logout("session init %s\n", export
);
997 qio_channel_set_blocking(QIO_CHANNEL(sioc
), true, NULL
);
999 client
->info
.request_sizes
= true;
1000 client
->info
.structured_reply
= true;
1001 client
->info
.base_allocation
= true;
1002 client
->info
.x_dirty_bitmap
= g_strdup(x_dirty_bitmap
);
1003 client
->info
.name
= g_strdup(export
?: "");
1004 ret
= nbd_receive_negotiate(QIO_CHANNEL(sioc
), tlscreds
, hostname
,
1005 &client
->ioc
, &client
->info
, errp
);
1006 g_free(client
->info
.x_dirty_bitmap
);
1007 g_free(client
->info
.name
);
1009 logout("Failed to negotiate with the NBD server\n");
1012 if (x_dirty_bitmap
&& !client
->info
.base_allocation
) {
1013 error_setg(errp
, "requested x-dirty-bitmap %s not found",
1018 if (client
->info
.flags
& NBD_FLAG_READ_ONLY
) {
1019 ret
= bdrv_apply_auto_read_only(bs
, "NBD export is read-only", errp
);
1024 if (client
->info
.flags
& NBD_FLAG_SEND_FUA
) {
1025 bs
->supported_write_flags
= BDRV_REQ_FUA
;
1026 bs
->supported_zero_flags
|= BDRV_REQ_FUA
;
1028 if (client
->info
.flags
& NBD_FLAG_SEND_WRITE_ZEROES
) {
1029 bs
->supported_zero_flags
|= BDRV_REQ_MAY_UNMAP
;
1032 qemu_co_mutex_init(&client
->send_mutex
);
1033 qemu_co_queue_init(&client
->free_sema
);
1034 client
->sioc
= sioc
;
1035 object_ref(OBJECT(client
->sioc
));
1038 client
->ioc
= QIO_CHANNEL(sioc
);
1039 object_ref(OBJECT(client
->ioc
));
1042 /* Now that we're connected, set the socket to be non-blocking and
1043 * kick the reply mechanism. */
1044 qio_channel_set_blocking(QIO_CHANNEL(sioc
), false, NULL
);
1045 client
->read_reply_co
= qemu_coroutine_create(nbd_read_reply_entry
, client
);
1046 nbd_client_attach_aio_context(bs
, bdrv_get_aio_context(bs
));
1048 logout("Established connection with NBD server\n");
1053 * We have connected, but must fail for other reasons. The
1054 * connection is still blocking; send NBD_CMD_DISC as a courtesy
1058 NBDRequest request
= { .type
= NBD_CMD_DISC
};
1060 nbd_send_request(client
->ioc
?: QIO_CHANNEL(sioc
), &request
);