target/hppa: Use gva_offset_mask() everywhere
[qemu/armbru.git] / nbd / client.c
blob29ffc609a4b7dd0b01e758ff02839f0e1a651421
1 /*
2 * Copyright Red Hat
3 * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
5 * Network Block Device Client Side
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; under version 2 of the License.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qapi/error.h"
22 #include "qemu/queue.h"
23 #include "trace.h"
24 #include "nbd-internal.h"
25 #include "qemu/cutils.h"
27 /* Definitions for opaque data types */
29 static QTAILQ_HEAD(, NBDExport) exports = QTAILQ_HEAD_INITIALIZER(exports);
31 /* That's all folks */
33 /* Basic flow for negotiation
35 Server Client
36 Negotiate
40 Server Client
41 Negotiate #1
42 Option
43 Negotiate #2
45 ----
47 followed by
49 Server Client
50 Request
51 Response
52 Request
53 Response
54 ...
55 ...
56 Request (type == 2)
60 /* Send an option request.
62 * The request is for option @opt, with @data containing @len bytes of
63 * additional payload for the request (@len may be -1 to treat @data as
64 * a C string; and @data may be NULL if @len is 0).
65 * Return 0 if successful, -1 with errp set if it is impossible to
66 * continue. */
67 static int nbd_send_option_request(QIOChannel *ioc, uint32_t opt,
68 uint32_t len, const char *data,
69 Error **errp)
71 ERRP_GUARD();
72 NBDOption req;
73 QEMU_BUILD_BUG_ON(sizeof(req) != 16);
75 if (len == -1) {
76 req.length = len = strlen(data);
78 trace_nbd_send_option_request(opt, nbd_opt_lookup(opt), len);
80 stq_be_p(&req.magic, NBD_OPTS_MAGIC);
81 stl_be_p(&req.option, opt);
82 stl_be_p(&req.length, len);
84 if (nbd_write(ioc, &req, sizeof(req), errp) < 0) {
85 error_prepend(errp, "Failed to send option request header: ");
86 return -1;
89 if (len && nbd_write(ioc, (char *) data, len, errp) < 0) {
90 error_prepend(errp, "Failed to send option request data: ");
91 return -1;
94 return 0;
97 /* Send NBD_OPT_ABORT as a courtesy to let the server know that we are
98 * not going to attempt further negotiation. */
99 static void nbd_send_opt_abort(QIOChannel *ioc)
101 /* Technically, a compliant server is supposed to reply to us; but
102 * older servers disconnected instead. At any rate, we're allowed
103 * to disconnect without waiting for the server reply, so we don't
104 * even care if the request makes it to the server, let alone
105 * waiting around for whether the server replies. */
106 nbd_send_option_request(ioc, NBD_OPT_ABORT, 0, NULL, NULL);
110 /* Receive the header of an option reply, which should match the given
111 * opt. Read through the length field, but NOT the length bytes of
112 * payload. Return 0 if successful, -1 with errp set if it is
113 * impossible to continue. */
114 static int nbd_receive_option_reply(QIOChannel *ioc, uint32_t opt,
115 NBDOptionReply *reply, Error **errp)
117 QEMU_BUILD_BUG_ON(sizeof(*reply) != 20);
118 if (nbd_read(ioc, reply, sizeof(*reply), "option reply", errp) < 0) {
119 nbd_send_opt_abort(ioc);
120 return -1;
122 reply->magic = be64_to_cpu(reply->magic);
123 reply->option = be32_to_cpu(reply->option);
124 reply->type = be32_to_cpu(reply->type);
125 reply->length = be32_to_cpu(reply->length);
127 trace_nbd_receive_option_reply(reply->option, nbd_opt_lookup(reply->option),
128 reply->type, nbd_rep_lookup(reply->type),
129 reply->length);
131 if (reply->magic != NBD_REP_MAGIC) {
132 error_setg(errp, "Unexpected option reply magic");
133 nbd_send_opt_abort(ioc);
134 return -1;
136 if (reply->option != opt) {
137 error_setg(errp, "Unexpected option type %u (%s), expected %u (%s)",
138 reply->option, nbd_opt_lookup(reply->option),
139 opt, nbd_opt_lookup(opt));
140 nbd_send_opt_abort(ioc);
141 return -1;
143 return 0;
147 * If reply represents success, return 1 without further action. If
148 * reply represents an error, consume the optional payload of the
149 * packet on ioc. Then return 0 for unsupported (so the client can
150 * fall back to other approaches), where @strict determines if only
151 * ERR_UNSUP or all errors fit that category, or -1 with errp set for
152 * other errors.
154 static int nbd_handle_reply_err(QIOChannel *ioc, NBDOptionReply *reply,
155 bool strict, Error **errp)
157 ERRP_GUARD();
158 g_autofree char *msg = NULL;
160 if (!(reply->type & (1 << 31))) {
161 return 1;
164 if (reply->length) {
165 if (reply->length > NBD_MAX_BUFFER_SIZE) {
166 error_setg(errp, "server error %" PRIu32
167 " (%s) message is too long",
168 reply->type, nbd_rep_lookup(reply->type));
169 goto err;
171 msg = g_malloc(reply->length + 1);
172 if (nbd_read(ioc, msg, reply->length, NULL, errp) < 0) {
173 error_prepend(errp, "Failed to read option error %" PRIu32
174 " (%s) message: ",
175 reply->type, nbd_rep_lookup(reply->type));
176 goto err;
178 msg[reply->length] = '\0';
179 trace_nbd_server_error_msg(reply->type,
180 nbd_reply_type_lookup(reply->type), msg);
183 if (reply->type == NBD_REP_ERR_UNSUP || !strict) {
184 trace_nbd_reply_err_ignored(reply->option,
185 nbd_opt_lookup(reply->option),
186 reply->type, nbd_rep_lookup(reply->type));
187 return 0;
190 switch (reply->type) {
191 case NBD_REP_ERR_POLICY:
192 error_setg(errp, "Denied by server for option %" PRIu32 " (%s)",
193 reply->option, nbd_opt_lookup(reply->option));
194 break;
196 case NBD_REP_ERR_INVALID:
197 error_setg(errp, "Invalid parameters for option %" PRIu32 " (%s)",
198 reply->option, nbd_opt_lookup(reply->option));
199 break;
201 case NBD_REP_ERR_PLATFORM:
202 error_setg(errp, "Server lacks support for option %" PRIu32 " (%s)",
203 reply->option, nbd_opt_lookup(reply->option));
204 break;
206 case NBD_REP_ERR_TLS_REQD:
207 error_setg(errp, "TLS negotiation required before option %" PRIu32
208 " (%s)", reply->option, nbd_opt_lookup(reply->option));
209 error_append_hint(errp, "Did you forget a valid tls-creds?\n");
210 break;
212 case NBD_REP_ERR_UNKNOWN:
213 error_setg(errp, "Requested export not available");
214 break;
216 case NBD_REP_ERR_SHUTDOWN:
217 error_setg(errp, "Server shutting down before option %" PRIu32 " (%s)",
218 reply->option, nbd_opt_lookup(reply->option));
219 break;
221 case NBD_REP_ERR_BLOCK_SIZE_REQD:
222 error_setg(errp, "Server requires INFO_BLOCK_SIZE for option %" PRIu32
223 " (%s)", reply->option, nbd_opt_lookup(reply->option));
224 break;
226 default:
227 error_setg(errp, "Unknown error code when asking for option %" PRIu32
228 " (%s)", reply->option, nbd_opt_lookup(reply->option));
229 break;
232 if (msg) {
233 error_append_hint(errp, "server reported: %s\n", msg);
236 err:
237 nbd_send_opt_abort(ioc);
238 return -1;
241 /* nbd_receive_list:
242 * Process another portion of the NBD_OPT_LIST reply, populating any
243 * name received into *@name. If @description is non-NULL, and the
244 * server provided a description, that is also populated. The caller
245 * must eventually call g_free() on success.
246 * Returns 1 if name and description were set and iteration must continue,
247 * 0 if iteration is complete (including if OPT_LIST unsupported),
248 * -1 with @errp set if an unrecoverable error occurred.
250 static int nbd_receive_list(QIOChannel *ioc, char **name, char **description,
251 Error **errp)
253 NBDOptionReply reply;
254 uint32_t len;
255 uint32_t namelen;
256 g_autofree char *local_name = NULL;
257 g_autofree char *local_desc = NULL;
258 int error;
260 if (nbd_receive_option_reply(ioc, NBD_OPT_LIST, &reply, errp) < 0) {
261 return -1;
263 error = nbd_handle_reply_err(ioc, &reply, true, errp);
264 if (error <= 0) {
265 return error;
267 len = reply.length;
269 if (reply.type == NBD_REP_ACK) {
270 if (len != 0) {
271 error_setg(errp, "length too long for option end");
272 nbd_send_opt_abort(ioc);
273 return -1;
275 return 0;
276 } else if (reply.type != NBD_REP_SERVER) {
277 error_setg(errp, "Unexpected reply type %u (%s), expected %u (%s)",
278 reply.type, nbd_rep_lookup(reply.type),
279 NBD_REP_SERVER, nbd_rep_lookup(NBD_REP_SERVER));
280 nbd_send_opt_abort(ioc);
281 return -1;
284 if (len < sizeof(namelen) || len > NBD_MAX_BUFFER_SIZE) {
285 error_setg(errp, "incorrect option length %" PRIu32, len);
286 nbd_send_opt_abort(ioc);
287 return -1;
289 if (nbd_read32(ioc, &namelen, "option name length", errp) < 0) {
290 nbd_send_opt_abort(ioc);
291 return -1;
293 len -= sizeof(namelen);
294 if (len < namelen || namelen > NBD_MAX_STRING_SIZE) {
295 error_setg(errp, "incorrect name length in server's list response");
296 nbd_send_opt_abort(ioc);
297 return -1;
300 local_name = g_malloc(namelen + 1);
301 if (nbd_read(ioc, local_name, namelen, "export name", errp) < 0) {
302 nbd_send_opt_abort(ioc);
303 return -1;
305 local_name[namelen] = '\0';
306 len -= namelen;
307 if (len) {
308 if (len > NBD_MAX_STRING_SIZE) {
309 error_setg(errp, "incorrect description length in server's "
310 "list response");
311 nbd_send_opt_abort(ioc);
312 return -1;
314 local_desc = g_malloc(len + 1);
315 if (nbd_read(ioc, local_desc, len, "export description", errp) < 0) {
316 nbd_send_opt_abort(ioc);
317 return -1;
319 local_desc[len] = '\0';
322 trace_nbd_receive_list(local_name, local_desc ?: "");
323 *name = g_steal_pointer(&local_name);
324 if (description) {
325 *description = g_steal_pointer(&local_desc);
327 return 1;
332 * nbd_opt_info_or_go:
333 * Send option for NBD_OPT_INFO or NBD_OPT_GO and parse the reply.
334 * Returns -1 if the option proves the export @info->name cannot be
335 * used, 0 if the option is unsupported (fall back to NBD_OPT_LIST and
336 * NBD_OPT_EXPORT_NAME in that case), and > 0 if the export is good to
337 * go (with the rest of @info populated).
339 static int nbd_opt_info_or_go(QIOChannel *ioc, uint32_t opt,
340 NBDExportInfo *info, Error **errp)
342 ERRP_GUARD();
343 NBDOptionReply reply;
344 uint32_t len = strlen(info->name);
345 uint16_t type;
346 int error;
347 char *buf;
349 /* The protocol requires that the server send NBD_INFO_EXPORT with
350 * a non-zero flags (at least NBD_FLAG_HAS_FLAGS must be set); so
351 * flags still 0 is a witness of a broken server. */
352 info->flags = 0;
354 assert(opt == NBD_OPT_GO || opt == NBD_OPT_INFO);
355 trace_nbd_opt_info_go_start(nbd_opt_lookup(opt), info->name);
356 buf = g_malloc(4 + len + 2 + 2 * info->request_sizes + 1);
357 stl_be_p(buf, len);
358 memcpy(buf + 4, info->name, len);
359 /* At most one request, everything else up to server */
360 stw_be_p(buf + 4 + len, info->request_sizes);
361 if (info->request_sizes) {
362 stw_be_p(buf + 4 + len + 2, NBD_INFO_BLOCK_SIZE);
364 error = nbd_send_option_request(ioc, opt,
365 4 + len + 2 + 2 * info->request_sizes,
366 buf, errp);
367 g_free(buf);
368 if (error < 0) {
369 return -1;
372 while (1) {
373 if (nbd_receive_option_reply(ioc, opt, &reply, errp) < 0) {
374 return -1;
376 error = nbd_handle_reply_err(ioc, &reply, true, errp);
377 if (error <= 0) {
378 return error;
380 len = reply.length;
382 if (reply.type == NBD_REP_ACK) {
384 * Server is done sending info, and moved into transmission
385 * phase for NBD_OPT_GO, but make sure it sent flags
387 if (len) {
388 error_setg(errp, "server sent invalid NBD_REP_ACK");
389 return -1;
391 if (!info->flags) {
392 error_setg(errp, "broken server omitted NBD_INFO_EXPORT");
393 return -1;
395 trace_nbd_opt_info_go_success(nbd_opt_lookup(opt));
396 return 1;
398 if (reply.type != NBD_REP_INFO) {
399 error_setg(errp, "unexpected reply type %u (%s), expected %u (%s)",
400 reply.type, nbd_rep_lookup(reply.type),
401 NBD_REP_INFO, nbd_rep_lookup(NBD_REP_INFO));
402 nbd_send_opt_abort(ioc);
403 return -1;
405 if (len < sizeof(type)) {
406 error_setg(errp, "NBD_REP_INFO length %" PRIu32 " is too short",
407 len);
408 nbd_send_opt_abort(ioc);
409 return -1;
411 if (nbd_read16(ioc, &type, "info type", errp) < 0) {
412 nbd_send_opt_abort(ioc);
413 return -1;
415 len -= sizeof(type);
416 switch (type) {
417 case NBD_INFO_EXPORT:
418 if (len != sizeof(info->size) + sizeof(info->flags)) {
419 error_setg(errp, "remaining export info len %" PRIu32
420 " is unexpected size", len);
421 nbd_send_opt_abort(ioc);
422 return -1;
424 if (nbd_read64(ioc, &info->size, "info size", errp) < 0) {
425 nbd_send_opt_abort(ioc);
426 return -1;
428 if (nbd_read16(ioc, &info->flags, "info flags", errp) < 0) {
429 nbd_send_opt_abort(ioc);
430 return -1;
432 if (info->min_block &&
433 !QEMU_IS_ALIGNED(info->size, info->min_block)) {
434 error_setg(errp, "export size %" PRIu64 " is not multiple of "
435 "minimum block size %" PRIu32, info->size,
436 info->min_block);
437 nbd_send_opt_abort(ioc);
438 return -1;
440 trace_nbd_receive_negotiate_size_flags(info->size, info->flags);
441 break;
443 case NBD_INFO_BLOCK_SIZE:
444 if (len != sizeof(info->min_block) * 3) {
445 error_setg(errp, "remaining export info len %" PRIu32
446 " is unexpected size", len);
447 nbd_send_opt_abort(ioc);
448 return -1;
450 if (nbd_read32(ioc, &info->min_block, "info minimum block size",
451 errp) < 0) {
452 nbd_send_opt_abort(ioc);
453 return -1;
455 if (!is_power_of_2(info->min_block)) {
456 error_setg(errp, "server minimum block size %" PRIu32
457 " is not a power of two", info->min_block);
458 nbd_send_opt_abort(ioc);
459 return -1;
461 if (nbd_read32(ioc, &info->opt_block, "info preferred block size",
462 errp) < 0)
464 nbd_send_opt_abort(ioc);
465 return -1;
467 if (!is_power_of_2(info->opt_block) ||
468 info->opt_block < info->min_block) {
469 error_setg(errp, "server preferred block size %" PRIu32
470 " is not valid", info->opt_block);
471 nbd_send_opt_abort(ioc);
472 return -1;
474 if (nbd_read32(ioc, &info->max_block, "info maximum block size",
475 errp) < 0)
477 nbd_send_opt_abort(ioc);
478 return -1;
480 if (info->max_block < info->min_block) {
481 error_setg(errp, "server maximum block size %" PRIu32
482 " is not valid", info->max_block);
483 nbd_send_opt_abort(ioc);
484 return -1;
486 trace_nbd_opt_info_block_size(info->min_block, info->opt_block,
487 info->max_block);
488 break;
490 default:
492 * Not worth the bother to check if NBD_INFO_NAME or
493 * NBD_INFO_DESCRIPTION exceed NBD_MAX_STRING_SIZE.
495 trace_nbd_opt_info_unknown(type, nbd_info_lookup(type));
496 if (nbd_drop(ioc, len, errp) < 0) {
497 error_prepend(errp, "Failed to read info payload: ");
498 nbd_send_opt_abort(ioc);
499 return -1;
501 break;
506 /* Return -1 on failure, 0 if wantname is an available export. */
507 static int nbd_receive_query_exports(QIOChannel *ioc,
508 const char *wantname,
509 Error **errp)
511 bool list_empty = true;
512 bool found_export = false;
514 trace_nbd_receive_query_exports_start(wantname);
515 if (nbd_send_option_request(ioc, NBD_OPT_LIST, 0, NULL, errp) < 0) {
516 return -1;
519 while (1) {
520 char *name;
521 int ret = nbd_receive_list(ioc, &name, NULL, errp);
523 if (ret < 0) {
524 /* Server gave unexpected reply */
525 return -1;
526 } else if (ret == 0) {
527 /* Done iterating. */
528 if (list_empty) {
530 * We don't have enough context to tell a server that
531 * sent an empty list apart from a server that does
532 * not support the list command; but as this function
533 * is just used to trigger a nicer error message
534 * before trying NBD_OPT_EXPORT_NAME, assume the
535 * export is available.
537 return 0;
538 } else if (!found_export) {
539 error_setg(errp, "No export with name '%s' available",
540 wantname);
541 nbd_send_opt_abort(ioc);
542 return -1;
544 trace_nbd_receive_query_exports_success(wantname);
545 return 0;
547 list_empty = false;
548 if (!strcmp(name, wantname)) {
549 found_export = true;
551 g_free(name);
556 * nbd_request_simple_option: Send an option request, and parse the reply.
557 * @strict controls whether ERR_UNSUP or all errors produce 0 status.
558 * return 1 for successful negotiation,
559 * 0 if operation is unsupported,
560 * -1 with errp set for any other error
562 static int nbd_request_simple_option(QIOChannel *ioc, int opt, bool strict,
563 Error **errp)
565 NBDOptionReply reply;
566 int error;
568 if (nbd_send_option_request(ioc, opt, 0, NULL, errp) < 0) {
569 return -1;
572 if (nbd_receive_option_reply(ioc, opt, &reply, errp) < 0) {
573 return -1;
575 error = nbd_handle_reply_err(ioc, &reply, strict, errp);
576 if (error <= 0) {
577 return error;
580 if (reply.type != NBD_REP_ACK) {
581 error_setg(errp, "Server answered option %d (%s) with unexpected "
582 "reply %" PRIu32 " (%s)", opt, nbd_opt_lookup(opt),
583 reply.type, nbd_rep_lookup(reply.type));
584 nbd_send_opt_abort(ioc);
585 return -1;
588 if (reply.length != 0) {
589 error_setg(errp, "Option %d ('%s') response length is %" PRIu32
590 " (it should be zero)", opt, nbd_opt_lookup(opt),
591 reply.length);
592 nbd_send_opt_abort(ioc);
593 return -1;
596 return 1;
599 static QIOChannel *nbd_receive_starttls(QIOChannel *ioc,
600 QCryptoTLSCreds *tlscreds,
601 const char *hostname, Error **errp)
603 int ret;
604 QIOChannelTLS *tioc;
605 struct NBDTLSHandshakeData data = { 0 };
607 ret = nbd_request_simple_option(ioc, NBD_OPT_STARTTLS, true, errp);
608 if (ret <= 0) {
609 if (ret == 0) {
610 error_setg(errp, "Server don't support STARTTLS option");
611 nbd_send_opt_abort(ioc);
613 return NULL;
616 trace_nbd_receive_starttls_new_client();
617 tioc = qio_channel_tls_new_client(ioc, tlscreds, hostname, errp);
618 if (!tioc) {
619 return NULL;
621 qio_channel_set_name(QIO_CHANNEL(tioc), "nbd-client-tls");
622 data.loop = g_main_loop_new(g_main_context_default(), FALSE);
623 trace_nbd_receive_starttls_tls_handshake();
624 qio_channel_tls_handshake(tioc,
625 nbd_tls_handshake,
626 &data,
627 NULL,
628 NULL);
630 if (!data.complete) {
631 g_main_loop_run(data.loop);
633 g_main_loop_unref(data.loop);
634 if (data.error) {
635 error_propagate(errp, data.error);
636 object_unref(OBJECT(tioc));
637 return NULL;
640 return QIO_CHANNEL(tioc);
644 * nbd_send_meta_query:
645 * Send 0 or 1 set/list meta context queries.
646 * Return 0 on success, -1 with errp set for any error
648 static int nbd_send_meta_query(QIOChannel *ioc, uint32_t opt,
649 const char *export, const char *query,
650 Error **errp)
652 int ret;
653 uint32_t export_len;
654 uint32_t queries = !!query;
655 uint32_t query_len = 0;
656 uint32_t data_len;
657 char *data;
658 char *p;
660 assert(strnlen(export, NBD_MAX_STRING_SIZE + 1) <= NBD_MAX_STRING_SIZE);
661 export_len = strlen(export);
662 data_len = sizeof(export_len) + export_len + sizeof(queries);
663 if (query) {
664 assert(strnlen(query, NBD_MAX_STRING_SIZE + 1) <= NBD_MAX_STRING_SIZE);
665 query_len = strlen(query);
666 data_len += sizeof(query_len) + query_len;
667 } else {
668 assert(opt == NBD_OPT_LIST_META_CONTEXT);
670 p = data = g_malloc(data_len);
672 trace_nbd_opt_meta_request(nbd_opt_lookup(opt), query ?: "(all)", export);
673 stl_be_p(p, export_len);
674 memcpy(p += sizeof(export_len), export, export_len);
675 stl_be_p(p += export_len, queries);
676 if (query) {
677 stl_be_p(p += sizeof(queries), query_len);
678 memcpy(p += sizeof(query_len), query, query_len);
681 ret = nbd_send_option_request(ioc, opt, data_len, data, errp);
682 g_free(data);
683 return ret;
687 * nbd_receive_one_meta_context:
688 * Called in a loop to receive and trace one set/list meta context reply.
689 * Pass non-NULL @name or @id to collect results back to the caller, which
690 * must eventually call g_free().
691 * return 1 if name is set and iteration must continue,
692 * 0 if iteration is complete (including if option is unsupported),
693 * -1 with errp set for any error
695 static int nbd_receive_one_meta_context(QIOChannel *ioc,
696 uint32_t opt,
697 char **name,
698 uint32_t *id,
699 Error **errp)
701 int ret;
702 NBDOptionReply reply;
703 char *local_name = NULL;
704 uint32_t local_id;
706 if (nbd_receive_option_reply(ioc, opt, &reply, errp) < 0) {
707 return -1;
710 ret = nbd_handle_reply_err(ioc, &reply, false, errp);
711 if (ret <= 0) {
712 return ret;
715 if (reply.type == NBD_REP_ACK) {
716 if (reply.length != 0) {
717 error_setg(errp, "Unexpected length to ACK response");
718 nbd_send_opt_abort(ioc);
719 return -1;
721 return 0;
722 } else if (reply.type != NBD_REP_META_CONTEXT) {
723 error_setg(errp, "Unexpected reply type %u (%s), expected %u (%s)",
724 reply.type, nbd_rep_lookup(reply.type),
725 NBD_REP_META_CONTEXT, nbd_rep_lookup(NBD_REP_META_CONTEXT));
726 nbd_send_opt_abort(ioc);
727 return -1;
730 if (reply.length <= sizeof(local_id) ||
731 reply.length > NBD_MAX_BUFFER_SIZE) {
732 error_setg(errp, "Failed to negotiate meta context, server "
733 "answered with unexpected length %" PRIu32,
734 reply.length);
735 nbd_send_opt_abort(ioc);
736 return -1;
739 if (nbd_read32(ioc, &local_id, "context id", errp) < 0) {
740 return -1;
743 reply.length -= sizeof(local_id);
744 local_name = g_malloc(reply.length + 1);
745 if (nbd_read(ioc, local_name, reply.length, "context name", errp) < 0) {
746 g_free(local_name);
747 return -1;
749 local_name[reply.length] = '\0';
750 trace_nbd_opt_meta_reply(nbd_opt_lookup(opt), local_name, local_id);
752 if (name) {
753 *name = local_name;
754 } else {
755 g_free(local_name);
757 if (id) {
758 *id = local_id;
760 return 1;
764 * nbd_negotiate_simple_meta_context:
765 * Request the server to set the meta context for export @info->name
766 * using @info->x_dirty_bitmap with a fallback to "base:allocation",
767 * setting @info->context_id to the resulting id. Fail if the server
768 * responds with more than one context or with a context different
769 * than the query.
770 * return 1 for successful negotiation,
771 * 0 if operation is unsupported,
772 * -1 with errp set for any other error
774 static int nbd_negotiate_simple_meta_context(QIOChannel *ioc,
775 NBDExportInfo *info,
776 Error **errp)
779 * TODO: Removing the x_dirty_bitmap hack will mean refactoring
780 * this function to request and store ids for multiple contexts
781 * (both base:allocation and a dirty bitmap), at which point this
782 * function should lose the term _simple.
784 int ret;
785 const char *context = info->x_dirty_bitmap ?: "base:allocation";
786 bool received = false;
787 char *name = NULL;
789 if (nbd_send_meta_query(ioc, NBD_OPT_SET_META_CONTEXT,
790 info->name, context, errp) < 0) {
791 return -1;
794 ret = nbd_receive_one_meta_context(ioc, NBD_OPT_SET_META_CONTEXT,
795 &name, &info->context_id, errp);
796 if (ret < 0) {
797 return -1;
799 if (ret == 1) {
800 if (strcmp(context, name)) {
801 error_setg(errp, "Failed to negotiate meta context '%s', server "
802 "answered with different context '%s'", context,
803 name);
804 g_free(name);
805 nbd_send_opt_abort(ioc);
806 return -1;
808 g_free(name);
809 received = true;
811 ret = nbd_receive_one_meta_context(ioc, NBD_OPT_SET_META_CONTEXT,
812 NULL, NULL, errp);
813 if (ret < 0) {
814 return -1;
817 if (ret != 0) {
818 error_setg(errp, "Server answered with more than one context");
819 nbd_send_opt_abort(ioc);
820 return -1;
822 return received;
826 * nbd_list_meta_contexts:
827 * Request the server to list all meta contexts for export @info->name.
828 * return 0 if list is complete (even if empty),
829 * -1 with errp set for any error
831 static int nbd_list_meta_contexts(QIOChannel *ioc,
832 NBDExportInfo *info,
833 Error **errp)
835 int ret;
836 int seen_any = false;
837 int seen_qemu = false;
839 if (nbd_send_meta_query(ioc, NBD_OPT_LIST_META_CONTEXT,
840 info->name, NULL, errp) < 0) {
841 return -1;
844 while (1) {
845 char *context;
847 ret = nbd_receive_one_meta_context(ioc, NBD_OPT_LIST_META_CONTEXT,
848 &context, NULL, errp);
849 if (ret == 0 && seen_any && !seen_qemu) {
851 * Work around qemu 3.0 bug: the server forgot to send
852 * "qemu:" replies to 0 queries. If we saw at least one
853 * reply (probably base:allocation), but none of them were
854 * qemu:, then run a more specific query to make sure.
856 seen_qemu = true;
857 if (nbd_send_meta_query(ioc, NBD_OPT_LIST_META_CONTEXT,
858 info->name, "qemu:", errp) < 0) {
859 return -1;
861 continue;
863 if (ret <= 0) {
864 return ret;
866 seen_any = true;
867 seen_qemu |= strstart(context, "qemu:", NULL);
868 info->contexts = g_renew(char *, info->contexts, ++info->n_contexts);
869 info->contexts[info->n_contexts - 1] = context;
874 * nbd_start_negotiate:
875 * Start the handshake to the server. After a positive return, the server
876 * is ready to accept additional NBD_OPT requests.
877 * Returns: negative errno: failure talking to server
878 * non-negative: enum NBDMode describing server abilities
880 static int nbd_start_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
881 const char *hostname, QIOChannel **outioc,
882 NBDMode max_mode, bool *zeroes,
883 Error **errp)
885 ERRP_GUARD();
886 uint64_t magic;
888 trace_nbd_start_negotiate(tlscreds, hostname ? hostname : "<null>");
890 if (zeroes) {
891 *zeroes = true;
893 if (outioc) {
894 *outioc = NULL;
896 if (tlscreds && !outioc) {
897 error_setg(errp, "Output I/O channel required for TLS");
898 return -EINVAL;
901 if (nbd_read64(ioc, &magic, "initial magic", errp) < 0) {
902 return -EINVAL;
904 trace_nbd_receive_negotiate_magic(magic);
906 if (magic != NBD_INIT_MAGIC) {
907 error_setg(errp, "Bad initial magic received: 0x%" PRIx64, magic);
908 return -EINVAL;
911 if (nbd_read64(ioc, &magic, "server magic", errp) < 0) {
912 return -EINVAL;
914 trace_nbd_receive_negotiate_magic(magic);
916 if (magic == NBD_OPTS_MAGIC) {
917 uint32_t clientflags = 0;
918 uint16_t globalflags;
919 bool fixedNewStyle = false;
921 if (nbd_read16(ioc, &globalflags, "server flags", errp) < 0) {
922 return -EINVAL;
924 trace_nbd_receive_negotiate_server_flags(globalflags);
925 if (globalflags & NBD_FLAG_FIXED_NEWSTYLE) {
926 fixedNewStyle = true;
927 clientflags |= NBD_FLAG_C_FIXED_NEWSTYLE;
929 if (globalflags & NBD_FLAG_NO_ZEROES) {
930 if (zeroes) {
931 *zeroes = false;
933 clientflags |= NBD_FLAG_C_NO_ZEROES;
935 /* client requested flags */
936 clientflags = cpu_to_be32(clientflags);
937 if (nbd_write(ioc, &clientflags, sizeof(clientflags), errp) < 0) {
938 error_prepend(errp, "Failed to send clientflags field: ");
939 return -EINVAL;
941 if (tlscreds) {
942 if (fixedNewStyle) {
943 *outioc = nbd_receive_starttls(ioc, tlscreds, hostname, errp);
944 if (!*outioc) {
945 return -EINVAL;
947 ioc = *outioc;
948 } else {
949 error_setg(errp, "Server does not support STARTTLS");
950 return -EINVAL;
953 if (fixedNewStyle) {
954 int result = 0;
956 if (max_mode >= NBD_MODE_EXTENDED) {
957 result = nbd_request_simple_option(ioc,
958 NBD_OPT_EXTENDED_HEADERS,
959 false, errp);
960 if (result) {
961 return result < 0 ? -EINVAL : NBD_MODE_EXTENDED;
964 if (max_mode >= NBD_MODE_STRUCTURED) {
965 result = nbd_request_simple_option(ioc,
966 NBD_OPT_STRUCTURED_REPLY,
967 false, errp);
968 if (result) {
969 return result < 0 ? -EINVAL : NBD_MODE_STRUCTURED;
972 return NBD_MODE_SIMPLE;
973 } else {
974 return NBD_MODE_EXPORT_NAME;
976 } else if (magic == NBD_CLIENT_MAGIC) {
977 if (tlscreds) {
978 error_setg(errp, "Server does not support STARTTLS");
979 return -EINVAL;
981 return NBD_MODE_OLDSTYLE;
982 } else {
983 error_setg(errp, "Bad server magic received: 0x%" PRIx64, magic);
984 return -EINVAL;
989 * nbd_negotiate_finish_oldstyle:
990 * Populate @info with the size and export flags from an oldstyle server,
991 * but does not consume 124 bytes of reserved zero padding.
992 * Returns 0 on success, -1 with @errp set on failure
994 static int nbd_negotiate_finish_oldstyle(QIOChannel *ioc, NBDExportInfo *info,
995 Error **errp)
997 uint32_t oldflags;
999 if (nbd_read64(ioc, &info->size, "export length", errp) < 0) {
1000 return -EINVAL;
1003 if (nbd_read32(ioc, &oldflags, "export flags", errp) < 0) {
1004 return -EINVAL;
1006 if (oldflags & ~0xffff) {
1007 error_setg(errp, "Unexpected export flags %0x" PRIx32, oldflags);
1008 return -EINVAL;
1010 info->flags = oldflags;
1011 return 0;
1015 * nbd_receive_negotiate:
1016 * Connect to server, complete negotiation, and move into transmission phase.
1017 * Returns: negative errno: failure talking to server
1018 * 0: server is connected
1020 int nbd_receive_negotiate(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
1021 const char *hostname, QIOChannel **outioc,
1022 NBDExportInfo *info, Error **errp)
1024 ERRP_GUARD();
1025 int result;
1026 bool zeroes;
1027 bool base_allocation = info->base_allocation;
1029 assert(info->name && strlen(info->name) <= NBD_MAX_STRING_SIZE);
1030 trace_nbd_receive_negotiate_name(info->name);
1032 result = nbd_start_negotiate(ioc, tlscreds, hostname, outioc,
1033 info->mode, &zeroes, errp);
1034 if (result < 0) {
1035 return result;
1038 info->mode = result;
1039 info->base_allocation = false;
1040 if (tlscreds && *outioc) {
1041 ioc = *outioc;
1044 switch (info->mode) {
1045 case NBD_MODE_EXTENDED:
1046 case NBD_MODE_STRUCTURED:
1047 if (base_allocation) {
1048 result = nbd_negotiate_simple_meta_context(ioc, info, errp);
1049 if (result < 0) {
1050 return -EINVAL;
1052 info->base_allocation = result == 1;
1054 /* fall through */
1055 case NBD_MODE_SIMPLE:
1056 /* Try NBD_OPT_GO first - if it works, we are done (it
1057 * also gives us a good message if the server requires
1058 * TLS). If it is not available, fall back to
1059 * NBD_OPT_LIST for nicer error messages about a missing
1060 * export, then use NBD_OPT_EXPORT_NAME. */
1061 result = nbd_opt_info_or_go(ioc, NBD_OPT_GO, info, errp);
1062 if (result < 0) {
1063 return -EINVAL;
1065 if (result > 0) {
1066 return 0;
1068 /* Check our desired export is present in the
1069 * server export list. Since NBD_OPT_EXPORT_NAME
1070 * cannot return an error message, running this
1071 * query gives us better error reporting if the
1072 * export name is not available.
1074 if (nbd_receive_query_exports(ioc, info->name, errp) < 0) {
1075 return -EINVAL;
1077 /* fall through */
1078 case NBD_MODE_EXPORT_NAME:
1079 /* write the export name request */
1080 if (nbd_send_option_request(ioc, NBD_OPT_EXPORT_NAME, -1, info->name,
1081 errp) < 0) {
1082 return -EINVAL;
1085 /* Read the response */
1086 if (nbd_read64(ioc, &info->size, "export length", errp) < 0) {
1087 return -EINVAL;
1090 if (nbd_read16(ioc, &info->flags, "export flags", errp) < 0) {
1091 return -EINVAL;
1093 break;
1094 case NBD_MODE_OLDSTYLE:
1095 if (*info->name) {
1096 error_setg(errp, "Server does not support non-empty export names");
1097 return -EINVAL;
1099 if (nbd_negotiate_finish_oldstyle(ioc, info, errp) < 0) {
1100 return -EINVAL;
1102 break;
1103 default:
1104 g_assert_not_reached();
1107 trace_nbd_receive_negotiate_size_flags(info->size, info->flags);
1108 if (zeroes && nbd_drop(ioc, 124, errp) < 0) {
1109 error_prepend(errp, "Failed to read reserved block: ");
1110 return -EINVAL;
1112 return 0;
1115 /* Clean up result of nbd_receive_export_list */
1116 void nbd_free_export_list(NBDExportInfo *info, int count)
1118 int i, j;
1120 if (!info) {
1121 return;
1124 for (i = 0; i < count; i++) {
1125 g_free(info[i].name);
1126 g_free(info[i].description);
1127 for (j = 0; j < info[i].n_contexts; j++) {
1128 g_free(info[i].contexts[j]);
1130 g_free(info[i].contexts);
1132 g_free(info);
1136 * nbd_receive_export_list:
1137 * Query details about a server's exports, then disconnect without
1138 * going into transmission phase. Return a count of the exports listed
1139 * in @info by the server, or -1 on error. Caller must free @info using
1140 * nbd_free_export_list().
1142 int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
1143 const char *hostname, NBDExportInfo **info,
1144 Error **errp)
1146 int result;
1147 int count = 0;
1148 int i;
1149 int rc;
1150 int ret = -1;
1151 NBDExportInfo *array = NULL;
1152 QIOChannel *sioc = NULL;
1154 *info = NULL;
1155 result = nbd_start_negotiate(ioc, tlscreds, hostname, &sioc,
1156 NBD_MODE_EXTENDED, NULL, errp);
1157 if (tlscreds && sioc) {
1158 ioc = sioc;
1160 if (result < 0) {
1161 goto out;
1164 switch ((NBDMode)result) {
1165 case NBD_MODE_SIMPLE:
1166 case NBD_MODE_STRUCTURED:
1167 case NBD_MODE_EXTENDED:
1168 /* newstyle - use NBD_OPT_LIST to populate array, then try
1169 * NBD_OPT_INFO on each array member. If structured replies
1170 * are enabled, also try NBD_OPT_LIST_META_CONTEXT. */
1171 if (nbd_send_option_request(ioc, NBD_OPT_LIST, 0, NULL, errp) < 0) {
1172 goto out;
1174 while (1) {
1175 char *name;
1176 char *desc;
1178 rc = nbd_receive_list(ioc, &name, &desc, errp);
1179 if (rc < 0) {
1180 goto out;
1181 } else if (rc == 0) {
1182 break;
1184 array = g_renew(NBDExportInfo, array, ++count);
1185 memset(&array[count - 1], 0, sizeof(*array));
1186 array[count - 1].name = name;
1187 array[count - 1].description = desc;
1188 array[count - 1].mode = result;
1191 for (i = 0; i < count; i++) {
1192 array[i].request_sizes = true;
1193 rc = nbd_opt_info_or_go(ioc, NBD_OPT_INFO, &array[i], errp);
1194 if (rc < 0) {
1195 goto out;
1196 } else if (rc == 0) {
1198 * Pointless to try rest of loop. If OPT_INFO doesn't work,
1199 * it's unlikely that meta contexts work either
1201 break;
1204 if (result >= NBD_MODE_STRUCTURED &&
1205 nbd_list_meta_contexts(ioc, &array[i], errp) < 0) {
1206 goto out;
1210 /* Send NBD_OPT_ABORT as a courtesy before hanging up */
1211 nbd_send_opt_abort(ioc);
1212 break;
1213 case NBD_MODE_EXPORT_NAME:
1214 error_setg(errp, "Server does not support export lists");
1215 /* We can't even send NBD_OPT_ABORT, so merely hang up */
1216 goto out;
1217 case NBD_MODE_OLDSTYLE:
1218 /* Lone export name is implied, but we can parse length and flags */
1219 array = g_new0(NBDExportInfo, 1);
1220 array->name = g_strdup("");
1221 array->mode = NBD_MODE_OLDSTYLE;
1222 count = 1;
1224 if (nbd_negotiate_finish_oldstyle(ioc, array, errp) < 0) {
1225 goto out;
1228 /* Send NBD_CMD_DISC as a courtesy to the server, but ignore all
1229 * errors now that we have the information we wanted. */
1230 if (nbd_drop(ioc, 124, NULL) == 0) {
1231 NBDRequest request = { .type = NBD_CMD_DISC, .mode = result };
1233 nbd_send_request(ioc, &request);
1235 break;
1236 default:
1237 g_assert_not_reached();
1240 *info = array;
1241 array = NULL;
1242 ret = count;
1244 out:
1245 qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
1246 qio_channel_close(ioc, NULL);
1247 object_unref(OBJECT(sioc));
1248 nbd_free_export_list(array, count);
1249 return ret;
1252 #ifdef __linux__
1253 int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info,
1254 Error **errp)
1256 unsigned long sector_size = MAX(BDRV_SECTOR_SIZE, info->min_block);
1257 unsigned long sectors = info->size / sector_size;
1259 /* FIXME: Once the kernel module is patched to honor block sizes,
1260 * and to advertise that fact to user space, we should update the
1261 * hand-off to the kernel to use any block sizes we learned. */
1262 assert(!info->request_sizes);
1263 if (info->size / sector_size != sectors) {
1264 error_setg(errp, "Export size %" PRIu64 " too large for 32-bit kernel",
1265 info->size);
1266 return -E2BIG;
1269 trace_nbd_init_set_socket();
1271 if (ioctl(fd, NBD_SET_SOCK, (unsigned long) sioc->fd) < 0) {
1272 int serrno = errno;
1273 error_setg(errp, "Failed to set NBD socket");
1274 return -serrno;
1277 trace_nbd_init_set_block_size(sector_size);
1279 if (ioctl(fd, NBD_SET_BLKSIZE, sector_size) < 0) {
1280 int serrno = errno;
1281 error_setg(errp, "Failed setting NBD block size");
1282 return -serrno;
1285 trace_nbd_init_set_size(sectors);
1286 if (info->size % sector_size) {
1287 trace_nbd_init_trailing_bytes(info->size % sector_size);
1290 if (ioctl(fd, NBD_SET_SIZE_BLOCKS, sectors) < 0) {
1291 int serrno = errno;
1292 error_setg(errp, "Failed setting size (in blocks)");
1293 return -serrno;
1296 if (ioctl(fd, NBD_SET_FLAGS, (unsigned long) info->flags) < 0) {
1297 if (errno == ENOTTY) {
1298 int read_only = (info->flags & NBD_FLAG_READ_ONLY) != 0;
1299 trace_nbd_init_set_readonly();
1301 if (ioctl(fd, BLKROSET, (unsigned long) &read_only) < 0) {
1302 int serrno = errno;
1303 error_setg(errp, "Failed setting read-only attribute");
1304 return -serrno;
1306 } else {
1307 int serrno = errno;
1308 error_setg(errp, "Failed setting flags");
1309 return -serrno;
1313 trace_nbd_init_finish();
1315 return 0;
1318 int nbd_client(int fd)
1320 int ret;
1321 int serrno;
1323 trace_nbd_client_loop();
1325 ret = ioctl(fd, NBD_DO_IT);
1326 if (ret < 0 && errno == EPIPE) {
1327 /* NBD_DO_IT normally returns EPIPE when someone has disconnected
1328 * the socket via NBD_DISCONNECT. We do not want to return 1 in
1329 * that case.
1331 ret = 0;
1333 serrno = errno;
1335 trace_nbd_client_loop_ret(ret, strerror(serrno));
1337 trace_nbd_client_clear_queue();
1338 ioctl(fd, NBD_CLEAR_QUE);
1340 trace_nbd_client_clear_socket();
1341 ioctl(fd, NBD_CLEAR_SOCK);
1343 errno = serrno;
1344 return ret;
1347 int nbd_disconnect(int fd)
1349 ioctl(fd, NBD_CLEAR_QUE);
1350 ioctl(fd, NBD_DISCONNECT);
1351 ioctl(fd, NBD_CLEAR_SOCK);
1352 return 0;
1355 #endif /* __linux__ */
1357 int nbd_send_request(QIOChannel *ioc, NBDRequest *request)
1359 uint8_t buf[NBD_EXTENDED_REQUEST_SIZE];
1360 size_t len;
1362 trace_nbd_send_request(request->from, request->len, request->cookie,
1363 request->flags, request->type,
1364 nbd_cmd_lookup(request->type));
1366 stw_be_p(buf + 4, request->flags);
1367 stw_be_p(buf + 6, request->type);
1368 stq_be_p(buf + 8, request->cookie);
1369 stq_be_p(buf + 16, request->from);
1370 if (request->mode >= NBD_MODE_EXTENDED) {
1371 stl_be_p(buf, NBD_EXTENDED_REQUEST_MAGIC);
1372 stq_be_p(buf + 24, request->len);
1373 len = NBD_EXTENDED_REQUEST_SIZE;
1374 } else {
1375 assert(request->len <= UINT32_MAX);
1376 stl_be_p(buf, NBD_REQUEST_MAGIC);
1377 stl_be_p(buf + 24, request->len);
1378 len = NBD_REQUEST_SIZE;
1381 return nbd_write(ioc, buf, len, NULL);
1384 /* nbd_receive_simple_reply
1385 * Read simple reply except magic field (which should be already read).
1386 * Payload is not read (payload is possible for CMD_READ, but here we even
1387 * don't know whether it take place or not).
1389 static int nbd_receive_simple_reply(QIOChannel *ioc, NBDSimpleReply *reply,
1390 Error **errp)
1392 int ret;
1394 assert(reply->magic == NBD_SIMPLE_REPLY_MAGIC);
1396 ret = nbd_read(ioc, (uint8_t *)reply + sizeof(reply->magic),
1397 sizeof(*reply) - sizeof(reply->magic), "reply", errp);
1398 if (ret < 0) {
1399 return ret;
1402 reply->error = be32_to_cpu(reply->error);
1403 reply->cookie = be64_to_cpu(reply->cookie);
1405 return 0;
1408 /* nbd_receive_reply_chunk_header
1409 * Read structured reply chunk except magic field (which should be already
1410 * read). Normalize into the compact form.
1411 * Payload is not read.
1413 static int nbd_receive_reply_chunk_header(QIOChannel *ioc, NBDReply *chunk,
1414 Error **errp)
1416 int ret;
1417 size_t len;
1418 uint64_t payload_len;
1420 if (chunk->magic == NBD_STRUCTURED_REPLY_MAGIC) {
1421 len = sizeof(chunk->structured);
1422 } else {
1423 assert(chunk->magic == NBD_EXTENDED_REPLY_MAGIC);
1424 len = sizeof(chunk->extended);
1427 ret = nbd_read(ioc, (uint8_t *)chunk + sizeof(chunk->magic),
1428 len - sizeof(chunk->magic), "structured chunk",
1429 errp);
1430 if (ret < 0) {
1431 return ret;
1434 /* flags, type, and cookie occupy same space between forms */
1435 chunk->structured.flags = be16_to_cpu(chunk->structured.flags);
1436 chunk->structured.type = be16_to_cpu(chunk->structured.type);
1437 chunk->structured.cookie = be64_to_cpu(chunk->structured.cookie);
1440 * Because we use BLOCK_STATUS with REQ_ONE, and cap READ requests
1441 * at 32M, no valid server should send us payload larger than
1442 * this. Even if we stopped using REQ_ONE, sane servers will cap
1443 * the number of extents they return for block status.
1445 if (chunk->magic == NBD_STRUCTURED_REPLY_MAGIC) {
1446 payload_len = be32_to_cpu(chunk->structured.length);
1447 } else {
1448 /* For now, we are ignoring the extended header offset. */
1449 payload_len = be64_to_cpu(chunk->extended.length);
1450 chunk->magic = NBD_STRUCTURED_REPLY_MAGIC;
1452 if (payload_len > NBD_MAX_BUFFER_SIZE + sizeof(NBDStructuredReadData)) {
1453 error_setg(errp, "server chunk %" PRIu32 " (%s) payload is too long",
1454 chunk->structured.type,
1455 nbd_rep_lookup(chunk->structured.type));
1456 return -EINVAL;
1458 chunk->structured.length = payload_len;
1460 return 0;
1463 /* nbd_read_eof
1464 * Tries to read @size bytes from @ioc.
1465 * Returns 1 on success
1466 * 0 on eof, when no data was read (errp is not set)
1467 * negative errno on failure (errp is set)
1469 static inline int coroutine_fn
1470 nbd_read_eof(BlockDriverState *bs, QIOChannel *ioc, void *buffer, size_t size,
1471 Error **errp)
1473 bool partial = false;
1475 assert(size);
1476 while (size > 0) {
1477 struct iovec iov = { .iov_base = buffer, .iov_len = size };
1478 ssize_t len;
1480 len = qio_channel_readv(ioc, &iov, 1, errp);
1481 if (len == QIO_CHANNEL_ERR_BLOCK) {
1482 qio_channel_yield(ioc, G_IO_IN);
1483 continue;
1484 } else if (len < 0) {
1485 return -EIO;
1486 } else if (len == 0) {
1487 if (partial) {
1488 error_setg(errp,
1489 "Unexpected end-of-file before all bytes were read");
1490 return -EIO;
1491 } else {
1492 return 0;
1496 partial = true;
1497 size -= len;
1498 buffer = (uint8_t*) buffer + len;
1500 return 1;
1503 /* nbd_receive_reply
1505 * Wait for a new reply. If this yields, the coroutine must be able to be
1506 * safely reentered for nbd_client_attach_aio_context(). @mode determines
1507 * which reply magic we are expecting, although this normalizes the result
1508 * so that the caller only has to work with compact headers.
1510 * Returns 1 on success
1511 * 0 on eof, when no data was read
1512 * negative errno on failure
1514 int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
1515 NBDReply *reply, NBDMode mode, Error **errp)
1517 int ret;
1518 const char *type;
1519 uint32_t expected;
1521 ret = nbd_read_eof(bs, ioc, &reply->magic, sizeof(reply->magic), errp);
1522 if (ret <= 0) {
1523 return ret;
1526 reply->magic = be32_to_cpu(reply->magic);
1528 /* Diagnose but accept wrong-width header */
1529 switch (reply->magic) {
1530 case NBD_SIMPLE_REPLY_MAGIC:
1531 if (mode >= NBD_MODE_EXTENDED) {
1532 trace_nbd_receive_wrong_header(reply->magic,
1533 nbd_mode_lookup(mode));
1535 ret = nbd_receive_simple_reply(ioc, &reply->simple, errp);
1536 if (ret < 0) {
1537 return ret;
1539 trace_nbd_receive_simple_reply(reply->simple.error,
1540 nbd_err_lookup(reply->simple.error),
1541 reply->cookie);
1542 break;
1543 case NBD_STRUCTURED_REPLY_MAGIC:
1544 case NBD_EXTENDED_REPLY_MAGIC:
1545 expected = mode >= NBD_MODE_EXTENDED ? NBD_EXTENDED_REPLY_MAGIC
1546 : NBD_STRUCTURED_REPLY_MAGIC;
1547 if (reply->magic != expected) {
1548 trace_nbd_receive_wrong_header(reply->magic,
1549 nbd_mode_lookup(mode));
1551 ret = nbd_receive_reply_chunk_header(ioc, reply, errp);
1552 if (ret < 0) {
1553 return ret;
1555 type = nbd_reply_type_lookup(reply->structured.type);
1556 trace_nbd_receive_reply_chunk_header(reply->structured.flags,
1557 reply->structured.type, type,
1558 reply->structured.cookie,
1559 reply->structured.length);
1560 break;
1561 default:
1562 trace_nbd_receive_wrong_header(reply->magic, nbd_mode_lookup(mode));
1563 error_setg(errp, "invalid magic (got 0x%" PRIx32 ")", reply->magic);
1564 return -EINVAL;
1567 return 1;