linux-user/hppa: Fix TARGET_SA_* defines
[qemu/ar7.git] / block / nbd-client.c
blobb44d4d4a01c5202623194154bc8ed26b28b667b4
1 /*
2 * QEMU Block driver for NBD
4 * Copyright (C) 2016 Red Hat, Inc.
5 * Copyright (C) 2008 Bull S.A.S.
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 * Some parts:
9 * Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
11 * Permission is hereby granted, free of charge, to any person obtaining a copy
12 * of this software and associated documentation files (the "Software"), to deal
13 * in the Software without restriction, including without limitation the rights
14 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 * copies of the Software, and to permit persons to whom the Software is
16 * furnished to do so, subject to the following conditions:
18 * The above copyright notice and this permission notice shall be included in
19 * all copies or substantial portions of the Software.
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
24 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
27 * THE SOFTWARE.
30 #include "qemu/osdep.h"
31 #include "qapi/error.h"
32 #include "nbd-client.h"
34 #define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
35 #define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
37 static void nbd_recv_coroutines_wake_all(NBDClientSession *s)
39 int i;
41 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
42 NBDClientRequest *req = &s->requests[i];
44 if (req->coroutine && req->receiving) {
45 aio_co_wake(req->coroutine);
50 static void nbd_teardown_connection(BlockDriverState *bs)
52 NBDClientSession *client = nbd_get_client_session(bs);
54 if (!client->ioc) { /* Already closed */
55 return;
58 /* finish any pending coroutines */
59 qio_channel_shutdown(client->ioc,
60 QIO_CHANNEL_SHUTDOWN_BOTH,
61 NULL);
62 BDRV_POLL_WHILE(bs, client->read_reply_co);
64 nbd_client_detach_aio_context(bs);
65 object_unref(OBJECT(client->sioc));
66 client->sioc = NULL;
67 object_unref(OBJECT(client->ioc));
68 client->ioc = NULL;
71 static coroutine_fn void nbd_read_reply_entry(void *opaque)
73 NBDClientSession *s = opaque;
74 uint64_t i;
75 int ret = 0;
76 Error *local_err = NULL;
78 while (!s->quit) {
79 assert(s->reply.handle == 0);
80 ret = nbd_receive_reply(s->ioc, &s->reply, &local_err);
81 if (ret < 0) {
82 error_report_err(local_err);
84 if (ret <= 0) {
85 break;
88 /* There's no need for a mutex on the receive side, because the
89 * handler acts as a synchronization point and ensures that only
90 * one coroutine is called until the reply finishes.
92 i = HANDLE_TO_INDEX(s, s->reply.handle);
93 if (i >= MAX_NBD_REQUESTS ||
94 !s->requests[i].coroutine ||
95 !s->requests[i].receiving ||
96 (nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
98 break;
101 /* We're woken up again by the request itself. Note that there
102 * is no race between yielding and reentering read_reply_co. This
103 * is because:
105 * - if the request runs on the same AioContext, it is only
106 * entered after we yield
108 * - if the request runs on a different AioContext, reentering
109 * read_reply_co happens through a bottom half, which can only
110 * run after we yield.
112 aio_co_wake(s->requests[i].coroutine);
113 qemu_coroutine_yield();
116 s->quit = true;
117 nbd_recv_coroutines_wake_all(s);
118 s->read_reply_co = NULL;
121 static int nbd_co_send_request(BlockDriverState *bs,
122 NBDRequest *request,
123 QEMUIOVector *qiov)
125 NBDClientSession *s = nbd_get_client_session(bs);
126 int rc, i;
128 qemu_co_mutex_lock(&s->send_mutex);
129 while (s->in_flight == MAX_NBD_REQUESTS) {
130 qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
132 s->in_flight++;
134 for (i = 0; i < MAX_NBD_REQUESTS; i++) {
135 if (s->requests[i].coroutine == NULL) {
136 break;
140 g_assert(qemu_in_coroutine());
141 assert(i < MAX_NBD_REQUESTS);
143 s->requests[i].coroutine = qemu_coroutine_self();
144 s->requests[i].offset = request->from;
145 s->requests[i].receiving = false;
147 request->handle = INDEX_TO_HANDLE(s, i);
149 if (s->quit) {
150 rc = -EIO;
151 goto err;
153 if (!s->ioc) {
154 rc = -EPIPE;
155 goto err;
158 if (qiov) {
159 qio_channel_set_cork(s->ioc, true);
160 rc = nbd_send_request(s->ioc, request);
161 if (rc >= 0 && !s->quit) {
162 if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
163 NULL) < 0) {
164 rc = -EIO;
166 } else if (rc >= 0) {
167 rc = -EIO;
169 qio_channel_set_cork(s->ioc, false);
170 } else {
171 rc = nbd_send_request(s->ioc, request);
174 err:
175 if (rc < 0) {
176 s->quit = true;
177 s->requests[i].coroutine = NULL;
178 s->in_flight--;
179 qemu_co_queue_next(&s->free_sema);
181 qemu_co_mutex_unlock(&s->send_mutex);
182 return rc;
185 static inline uint16_t payload_advance16(uint8_t **payload)
187 *payload += 2;
188 return lduw_be_p(*payload - 2);
191 static inline uint32_t payload_advance32(uint8_t **payload)
193 *payload += 4;
194 return ldl_be_p(*payload - 4);
197 static inline uint64_t payload_advance64(uint8_t **payload)
199 *payload += 8;
200 return ldq_be_p(*payload - 8);
203 static int nbd_parse_offset_hole_payload(NBDStructuredReplyChunk *chunk,
204 uint8_t *payload, uint64_t orig_offset,
205 QEMUIOVector *qiov, Error **errp)
207 uint64_t offset;
208 uint32_t hole_size;
210 if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
211 error_setg(errp, "Protocol error: invalid payload for "
212 "NBD_REPLY_TYPE_OFFSET_HOLE");
213 return -EINVAL;
216 offset = payload_advance64(&payload);
217 hole_size = payload_advance32(&payload);
219 if (offset < orig_offset || hole_size > qiov->size ||
220 offset > orig_offset + qiov->size - hole_size) {
221 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
222 " region");
223 return -EINVAL;
226 qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
228 return 0;
231 /* nbd_parse_error_payload
232 * on success @errp contains message describing nbd error reply
234 static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
235 uint8_t *payload, int *request_ret,
236 Error **errp)
238 uint32_t error;
239 uint16_t message_size;
241 assert(chunk->type & (1 << 15));
243 if (chunk->length < sizeof(error) + sizeof(message_size)) {
244 error_setg(errp,
245 "Protocol error: invalid payload for structured error");
246 return -EINVAL;
249 error = nbd_errno_to_system_errno(payload_advance32(&payload));
250 if (error == 0) {
251 error_setg(errp, "Protocol error: server sent structured error chunk"
252 "with error = 0");
253 return -EINVAL;
256 *request_ret = -error;
257 message_size = payload_advance16(&payload);
259 if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
260 error_setg(errp, "Protocol error: server sent structured error chunk"
261 "with incorrect message size");
262 return -EINVAL;
265 /* TODO: Add a trace point to mention the server complaint */
267 /* TODO handle ERROR_OFFSET */
269 return 0;
272 static int nbd_co_receive_offset_data_payload(NBDClientSession *s,
273 uint64_t orig_offset,
274 QEMUIOVector *qiov, Error **errp)
276 QEMUIOVector sub_qiov;
277 uint64_t offset;
278 size_t data_size;
279 int ret;
280 NBDStructuredReplyChunk *chunk = &s->reply.structured;
282 assert(nbd_reply_is_structured(&s->reply));
284 if (chunk->length < sizeof(offset)) {
285 error_setg(errp, "Protocol error: invalid payload for "
286 "NBD_REPLY_TYPE_OFFSET_DATA");
287 return -EINVAL;
290 if (nbd_read(s->ioc, &offset, sizeof(offset), errp) < 0) {
291 return -EIO;
293 be64_to_cpus(&offset);
295 data_size = chunk->length - sizeof(offset);
296 if (offset < orig_offset || data_size > qiov->size ||
297 offset > orig_offset + qiov->size - data_size) {
298 error_setg(errp, "Protocol error: server sent chunk exceeding requested"
299 " region");
300 return -EINVAL;
303 qemu_iovec_init(&sub_qiov, qiov->niov);
304 qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
305 ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
306 qemu_iovec_destroy(&sub_qiov);
308 return ret < 0 ? -EIO : 0;
311 #define NBD_MAX_MALLOC_PAYLOAD 1000
312 /* nbd_co_receive_structured_payload
314 static coroutine_fn int nbd_co_receive_structured_payload(
315 NBDClientSession *s, void **payload, Error **errp)
317 int ret;
318 uint32_t len;
320 assert(nbd_reply_is_structured(&s->reply));
322 len = s->reply.structured.length;
324 if (len == 0) {
325 return 0;
328 if (payload == NULL) {
329 error_setg(errp, "Unexpected structured payload");
330 return -EINVAL;
333 if (len > NBD_MAX_MALLOC_PAYLOAD) {
334 error_setg(errp, "Payload too large");
335 return -EINVAL;
338 *payload = g_new(char, len);
339 ret = nbd_read(s->ioc, *payload, len, errp);
340 if (ret < 0) {
341 g_free(*payload);
342 *payload = NULL;
343 return ret;
346 return 0;
349 /* nbd_co_do_receive_one_chunk
350 * for simple reply:
351 * set request_ret to received reply error
352 * if qiov is not NULL: read payload to @qiov
353 * for structured reply chunk:
354 * if error chunk: read payload, set @request_ret, do not set @payload
355 * else if offset_data chunk: read payload data to @qiov, do not set @payload
356 * else: read payload to @payload
358 * If function fails, @errp contains corresponding error message, and the
359 * connection with the server is suspect. If it returns 0, then the
360 * transaction succeeded (although @request_ret may be a negative errno
361 * corresponding to the server's error reply), and errp is unchanged.
363 static coroutine_fn int nbd_co_do_receive_one_chunk(
364 NBDClientSession *s, uint64_t handle, bool only_structured,
365 int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
367 int ret;
368 int i = HANDLE_TO_INDEX(s, handle);
369 void *local_payload = NULL;
370 NBDStructuredReplyChunk *chunk;
372 if (payload) {
373 *payload = NULL;
375 *request_ret = 0;
377 /* Wait until we're woken up by nbd_read_reply_entry. */
378 s->requests[i].receiving = true;
379 qemu_coroutine_yield();
380 s->requests[i].receiving = false;
381 if (!s->ioc || s->quit) {
382 error_setg(errp, "Connection closed");
383 return -EIO;
386 assert(s->reply.handle == handle);
388 if (nbd_reply_is_simple(&s->reply)) {
389 if (only_structured) {
390 error_setg(errp, "Protocol error: simple reply when structured "
391 "reply chunk was expected");
392 return -EINVAL;
395 *request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
396 if (*request_ret < 0 || !qiov) {
397 return 0;
400 return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
401 errp) < 0 ? -EIO : 0;
404 /* handle structured reply chunk */
405 assert(s->info.structured_reply);
406 chunk = &s->reply.structured;
408 if (chunk->type == NBD_REPLY_TYPE_NONE) {
409 if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
410 error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
411 "NBD_REPLY_FLAG_DONE flag set");
412 return -EINVAL;
414 return 0;
417 if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
418 if (!qiov) {
419 error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
420 return -EINVAL;
423 return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
424 qiov, errp);
427 if (nbd_reply_type_is_error(chunk->type)) {
428 payload = &local_payload;
431 ret = nbd_co_receive_structured_payload(s, payload, errp);
432 if (ret < 0) {
433 return ret;
436 if (nbd_reply_type_is_error(chunk->type)) {
437 ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
438 g_free(local_payload);
439 return ret;
442 return 0;
445 /* nbd_co_receive_one_chunk
446 * Read reply, wake up read_reply_co and set s->quit if needed.
447 * Return value is a fatal error code or normal nbd reply error code
449 static coroutine_fn int nbd_co_receive_one_chunk(
450 NBDClientSession *s, uint64_t handle, bool only_structured,
451 QEMUIOVector *qiov, NBDReply *reply, void **payload, Error **errp)
453 int request_ret;
454 int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
455 &request_ret, qiov, payload, errp);
457 if (ret < 0) {
458 s->quit = true;
459 } else {
460 /* For assert at loop start in nbd_read_reply_entry */
461 if (reply) {
462 *reply = s->reply;
464 s->reply.handle = 0;
465 ret = request_ret;
468 if (s->read_reply_co) {
469 aio_co_wake(s->read_reply_co);
472 return ret;
475 typedef struct NBDReplyChunkIter {
476 int ret;
477 Error *err;
478 bool done, only_structured;
479 } NBDReplyChunkIter;
481 static void nbd_iter_error(NBDReplyChunkIter *iter, bool fatal,
482 int ret, Error **local_err)
484 assert(ret < 0);
486 if (fatal || iter->ret == 0) {
487 if (iter->ret != 0) {
488 error_free(iter->err);
489 iter->err = NULL;
491 iter->ret = ret;
492 error_propagate(&iter->err, *local_err);
493 } else {
494 error_free(*local_err);
497 *local_err = NULL;
500 /* NBD_FOREACH_REPLY_CHUNK
502 #define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
503 qiov, reply, payload) \
504 for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
505 nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
507 /* nbd_reply_chunk_iter_receive
509 static bool nbd_reply_chunk_iter_receive(NBDClientSession *s,
510 NBDReplyChunkIter *iter,
511 uint64_t handle,
512 QEMUIOVector *qiov, NBDReply *reply,
513 void **payload)
515 int ret;
516 NBDReply local_reply;
517 NBDStructuredReplyChunk *chunk;
518 Error *local_err = NULL;
519 if (s->quit) {
520 error_setg(&local_err, "Connection closed");
521 nbd_iter_error(iter, true, -EIO, &local_err);
522 goto break_loop;
525 if (iter->done) {
526 /* Previous iteration was last. */
527 goto break_loop;
530 if (reply == NULL) {
531 reply = &local_reply;
534 ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
535 qiov, reply, payload, &local_err);
536 if (ret < 0) {
537 /* If it is a fatal error s->quit is set by nbd_co_receive_one_chunk */
538 nbd_iter_error(iter, s->quit, ret, &local_err);
541 /* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
542 if (nbd_reply_is_simple(&s->reply) || s->quit) {
543 goto break_loop;
546 chunk = &reply->structured;
547 iter->only_structured = true;
549 if (chunk->type == NBD_REPLY_TYPE_NONE) {
550 /* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
551 assert(chunk->flags & NBD_REPLY_FLAG_DONE);
552 goto break_loop;
555 if (chunk->flags & NBD_REPLY_FLAG_DONE) {
556 /* This iteration is last. */
557 iter->done = true;
560 /* Execute the loop body */
561 return true;
563 break_loop:
564 s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
566 qemu_co_mutex_lock(&s->send_mutex);
567 s->in_flight--;
568 qemu_co_queue_next(&s->free_sema);
569 qemu_co_mutex_unlock(&s->send_mutex);
571 return false;
574 static int nbd_co_receive_return_code(NBDClientSession *s, uint64_t handle,
575 Error **errp)
577 NBDReplyChunkIter iter;
579 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
580 /* nbd_reply_chunk_iter_receive does all the work */
583 error_propagate(errp, iter.err);
584 return iter.ret;
587 static int nbd_co_receive_cmdread_reply(NBDClientSession *s, uint64_t handle,
588 uint64_t offset, QEMUIOVector *qiov,
589 Error **errp)
591 NBDReplyChunkIter iter;
592 NBDReply reply;
593 void *payload = NULL;
594 Error *local_err = NULL;
596 NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
597 qiov, &reply, &payload)
599 int ret;
600 NBDStructuredReplyChunk *chunk = &reply.structured;
602 assert(nbd_reply_is_structured(&reply));
604 switch (chunk->type) {
605 case NBD_REPLY_TYPE_OFFSET_DATA:
606 /* special cased in nbd_co_receive_one_chunk, data is already
607 * in qiov */
608 break;
609 case NBD_REPLY_TYPE_OFFSET_HOLE:
610 ret = nbd_parse_offset_hole_payload(&reply.structured, payload,
611 offset, qiov, &local_err);
612 if (ret < 0) {
613 s->quit = true;
614 nbd_iter_error(&iter, true, ret, &local_err);
616 break;
617 default:
618 if (!nbd_reply_type_is_error(chunk->type)) {
619 /* not allowed reply type */
620 s->quit = true;
621 error_setg(&local_err,
622 "Unexpected reply type: %d (%s) for CMD_READ",
623 chunk->type, nbd_reply_type_lookup(chunk->type));
624 nbd_iter_error(&iter, true, -EINVAL, &local_err);
628 g_free(payload);
629 payload = NULL;
632 error_propagate(errp, iter.err);
633 return iter.ret;
636 static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
637 QEMUIOVector *write_qiov)
639 int ret;
640 Error *local_err = NULL;
641 NBDClientSession *client = nbd_get_client_session(bs);
643 assert(request->type != NBD_CMD_READ);
644 if (write_qiov) {
645 assert(request->type == NBD_CMD_WRITE);
646 assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
647 } else {
648 assert(request->type != NBD_CMD_WRITE);
650 ret = nbd_co_send_request(bs, request, write_qiov);
651 if (ret < 0) {
652 return ret;
655 ret = nbd_co_receive_return_code(client, request->handle, &local_err);
656 if (local_err) {
657 error_report_err(local_err);
659 return ret;
662 int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
663 uint64_t bytes, QEMUIOVector *qiov, int flags)
665 int ret;
666 Error *local_err = NULL;
667 NBDClientSession *client = nbd_get_client_session(bs);
668 NBDRequest request = {
669 .type = NBD_CMD_READ,
670 .from = offset,
671 .len = bytes,
674 assert(bytes <= NBD_MAX_BUFFER_SIZE);
675 assert(!flags);
677 ret = nbd_co_send_request(bs, &request, NULL);
678 if (ret < 0) {
679 return ret;
682 ret = nbd_co_receive_cmdread_reply(client, request.handle, offset, qiov,
683 &local_err);
684 if (ret < 0) {
685 error_report_err(local_err);
687 return ret;
690 int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
691 uint64_t bytes, QEMUIOVector *qiov, int flags)
693 NBDClientSession *client = nbd_get_client_session(bs);
694 NBDRequest request = {
695 .type = NBD_CMD_WRITE,
696 .from = offset,
697 .len = bytes,
700 if (flags & BDRV_REQ_FUA) {
701 assert(client->info.flags & NBD_FLAG_SEND_FUA);
702 request.flags |= NBD_CMD_FLAG_FUA;
705 assert(bytes <= NBD_MAX_BUFFER_SIZE);
707 return nbd_co_request(bs, &request, qiov);
710 int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
711 int bytes, BdrvRequestFlags flags)
713 NBDClientSession *client = nbd_get_client_session(bs);
714 NBDRequest request = {
715 .type = NBD_CMD_WRITE_ZEROES,
716 .from = offset,
717 .len = bytes,
720 if (!(client->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
721 return -ENOTSUP;
724 if (flags & BDRV_REQ_FUA) {
725 assert(client->info.flags & NBD_FLAG_SEND_FUA);
726 request.flags |= NBD_CMD_FLAG_FUA;
728 if (!(flags & BDRV_REQ_MAY_UNMAP)) {
729 request.flags |= NBD_CMD_FLAG_NO_HOLE;
732 return nbd_co_request(bs, &request, NULL);
735 int nbd_client_co_flush(BlockDriverState *bs)
737 NBDClientSession *client = nbd_get_client_session(bs);
738 NBDRequest request = { .type = NBD_CMD_FLUSH };
740 if (!(client->info.flags & NBD_FLAG_SEND_FLUSH)) {
741 return 0;
744 request.from = 0;
745 request.len = 0;
747 return nbd_co_request(bs, &request, NULL);
750 int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
752 NBDClientSession *client = nbd_get_client_session(bs);
753 NBDRequest request = {
754 .type = NBD_CMD_TRIM,
755 .from = offset,
756 .len = bytes,
759 if (!(client->info.flags & NBD_FLAG_SEND_TRIM)) {
760 return 0;
763 return nbd_co_request(bs, &request, NULL);
766 void nbd_client_detach_aio_context(BlockDriverState *bs)
768 NBDClientSession *client = nbd_get_client_session(bs);
769 qio_channel_detach_aio_context(QIO_CHANNEL(client->ioc));
772 void nbd_client_attach_aio_context(BlockDriverState *bs,
773 AioContext *new_context)
775 NBDClientSession *client = nbd_get_client_session(bs);
776 qio_channel_attach_aio_context(QIO_CHANNEL(client->ioc), new_context);
777 aio_co_schedule(new_context, client->read_reply_co);
780 void nbd_client_close(BlockDriverState *bs)
782 NBDClientSession *client = nbd_get_client_session(bs);
783 NBDRequest request = { .type = NBD_CMD_DISC };
785 if (client->ioc == NULL) {
786 return;
789 nbd_send_request(client->ioc, &request);
791 nbd_teardown_connection(bs);
794 int nbd_client_init(BlockDriverState *bs,
795 QIOChannelSocket *sioc,
796 const char *export,
797 QCryptoTLSCreds *tlscreds,
798 const char *hostname,
799 Error **errp)
801 NBDClientSession *client = nbd_get_client_session(bs);
802 int ret;
804 /* NBD handshake */
805 logout("session init %s\n", export);
806 qio_channel_set_blocking(QIO_CHANNEL(sioc), true, NULL);
808 client->info.request_sizes = true;
809 client->info.structured_reply = true;
810 ret = nbd_receive_negotiate(QIO_CHANNEL(sioc), export,
811 tlscreds, hostname,
812 &client->ioc, &client->info, errp);
813 if (ret < 0) {
814 logout("Failed to negotiate with the NBD server\n");
815 return ret;
817 if (client->info.flags & NBD_FLAG_SEND_FUA) {
818 bs->supported_write_flags = BDRV_REQ_FUA;
819 bs->supported_zero_flags |= BDRV_REQ_FUA;
821 if (client->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
822 bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
824 if (client->info.min_block > bs->bl.request_alignment) {
825 bs->bl.request_alignment = client->info.min_block;
828 qemu_co_mutex_init(&client->send_mutex);
829 qemu_co_queue_init(&client->free_sema);
830 client->sioc = sioc;
831 object_ref(OBJECT(client->sioc));
833 if (!client->ioc) {
834 client->ioc = QIO_CHANNEL(sioc);
835 object_ref(OBJECT(client->ioc));
838 /* Now that we're connected, set the socket to be non-blocking and
839 * kick the reply mechanism. */
840 qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
841 client->read_reply_co = qemu_coroutine_create(nbd_read_reply_entry, client);
842 nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));
844 logout("Established connection with NBD server\n");
845 return 0;